problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_9815
rasdani/github-patches
git_diff
pex-tool__pex-264
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Python 2: SyntaxError when installing distributions with coding hints in setup.py I think this is a regression introduced in #231, which was released in pex 1.1.7. I cannot reproduce this problem on pex 1.1.6. Affects Python 2 only. Reproduce with: ``` wget -q 'https://github.com/pantsbuild/pex/releases/download/v1.1.7/pex27' chmod a+x pex27 ./pex27 backoff ``` Stack trace: ``` **** Failed to install backoff-1.1.0. stdout: **** Failed to install backoff-1.1.0. stderr: Traceback (most recent call last): File "<stdin>", line 7, in <module> File "setup.py", line 0 SyntaxError: encoding declaration in Unicode string Traceback (most recent call last): File "/private/tmp/pex/pex27/.bootstrap/_pex/pex.py", line 328, in execute File "/private/tmp/pex/pex27/.bootstrap/_pex/pex.py", line 260, in _wrap_coverage File "/private/tmp/pex/pex27/.bootstrap/_pex/pex.py", line 292, in _wrap_profiling File "/private/tmp/pex/pex27/.bootstrap/_pex/pex.py", line 371, in _execute File "/private/tmp/pex/pex27/.bootstrap/_pex/pex.py", line 429, in execute_entry File "/private/tmp/pex/pex27/.bootstrap/_pex/pex.py", line 447, in execute_pkg_resources File "/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/bin/pex.py", line 538, in main pex_builder = build_pex(reqs, options, resolver_options_builder) File "/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/bin/pex.py", line 489, in build_pex resolveds = resolver.resolve(resolvables) File "/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/resolver.py", line 200, in resolve dist = self.build(package, resolvable.options) File "/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/resolver.py", line 257, in build dist = super(CachingResolver, self).build(package, options) File "/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/resolver.py", line 168, in build raise Untranslateable('Package %s is not translateable by %s' % (package, translator)) pex.resolver.Untranslateable: Package SourcePackage(u'file:///Users/saj/.pex/build/backoff-1.1.0.tar.gz') is not translateable by ChainedTranslator(WheelTranslator, EggTranslator, SourceTranslator) ``` Even if we were to ignore the problem above, I don't think it is right to assume a UTF-8 encoding on all files. From [PEP 263](https://www.python.org/dev/peps/pep-0263/): > If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised. From the same PEP: > The builtin compile() API will be enhanced to accept Unicode as input. 8-bit string input is subject to the standard procedure for encoding detection as described above. Instead of opening `setup.py` as a text file and decoding it ourselves, it sounds like it should be possible to hand a binary stream directly to the `compile()` builtin and let Python deal with this muck. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/installer.py` Content: ``` 1 # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 from __future__ import absolute_import, print_function 5 6 import os 7 import subprocess 8 import sys 9 import tempfile 10 11 from pkg_resources import Distribution, PathMetadata 12 13 from .common import safe_mkdtemp, safe_rmtree 14 from .compatibility import WINDOWS 15 from .interpreter import PythonInterpreter 16 from .tracer import TRACER 17 from .version import SETUPTOOLS_REQUIREMENT, WHEEL_REQUIREMENT 18 19 __all__ = ( 20 'Installer', 21 'Packager' 22 ) 23 24 25 def after_installation(function): 26 def function_wrapper(self, *args, **kw): 27 self._installed = self.run() 28 if not self._installed: 29 raise Installer.InstallFailure('Failed to install %s' % self._source_dir) 30 return function(self, *args, **kw) 31 return function_wrapper 32 33 34 class InstallerBase(object): 35 SETUP_BOOTSTRAP_HEADER = "import io, sys" 36 SETUP_BOOTSTRAP_MODULE = "sys.path.insert(0, %(path)r); import %(module)s" 37 SETUP_BOOTSTRAP_FOOTER = """ 38 __file__ = 'setup.py' 39 sys.argv[0] = 'setup.py' 40 exec(compile(io.open(__file__, encoding='utf-8').read().replace('\\r\\n', '\\n'), __file__, 'exec')) 41 """ 42 43 class Error(Exception): pass 44 class InstallFailure(Error): pass 45 class IncapableInterpreter(Error): pass 46 47 def __init__(self, source_dir, strict=True, interpreter=None, install_dir=None): 48 """ 49 Create an installer from an unpacked source distribution in source_dir. 50 51 If strict=True, fail if any installation dependencies (e.g. distribute) 52 are missing. 53 """ 54 self._source_dir = source_dir 55 self._install_tmp = install_dir or safe_mkdtemp() 56 self._installed = None 57 self._strict = strict 58 self._interpreter = interpreter or PythonInterpreter.get() 59 if not self._interpreter.satisfies(self.capability) and strict: 60 raise self.IncapableInterpreter('Interpreter %s not capable of running %s' % ( 61 self._interpreter.binary, self.__class__.__name__)) 62 63 def mixins(self): 64 """Return a map from import name to requirement to load into setup script prior to invocation. 65 66 May be subclassed. 67 """ 68 return {} 69 70 @property 71 def install_tmp(self): 72 return self._install_tmp 73 74 def _setup_command(self): 75 """the setup command-line to run, to be implemented by subclasses.""" 76 raise NotImplementedError 77 78 def _postprocess(self): 79 """a post-processing function to run following setup.py invocation.""" 80 81 @property 82 def capability(self): 83 """returns the list of requirements for the interpreter to run this installer.""" 84 return list(self.mixins().values()) 85 86 @property 87 def bootstrap_script(self): 88 bootstrap_modules = [] 89 for module, requirement in self.mixins().items(): 90 path = self._interpreter.get_location(requirement) 91 if not path: 92 assert not self._strict # This should be caught by validation 93 continue 94 bootstrap_modules.append(self.SETUP_BOOTSTRAP_MODULE % {'path': path, 'module': module}) 95 return '\n'.join( 96 [self.SETUP_BOOTSTRAP_HEADER] + bootstrap_modules + [self.SETUP_BOOTSTRAP_FOOTER]) 97 98 def run(self): 99 if self._installed is not None: 100 return self._installed 101 102 with TRACER.timed('Installing %s' % self._install_tmp, V=2): 103 command = [self._interpreter.binary, '-'] 104 command.extend(self._setup_command()) 105 po = subprocess.Popen(command, 106 stdin=subprocess.PIPE, 107 stdout=subprocess.PIPE, 108 stderr=subprocess.PIPE, 109 env=self._interpreter.sanitized_environment(), 110 cwd=self._source_dir) 111 so, se = po.communicate(self.bootstrap_script.encode('ascii')) 112 self._installed = po.returncode == 0 113 114 if not self._installed: 115 name = os.path.basename(self._source_dir) 116 print('**** Failed to install %s. stdout:\n%s' % (name, so.decode('utf-8')), file=sys.stderr) 117 print('**** Failed to install %s. stderr:\n%s' % (name, se.decode('utf-8')), file=sys.stderr) 118 return self._installed 119 120 self._postprocess() 121 return self._installed 122 123 def cleanup(self): 124 safe_rmtree(self._install_tmp) 125 126 127 class Installer(InstallerBase): 128 """Install an unpacked distribution with a setup.py.""" 129 130 def __init__(self, source_dir, strict=True, interpreter=None): 131 """ 132 Create an installer from an unpacked source distribution in source_dir. 133 134 If strict=True, fail if any installation dependencies (e.g. setuptools) 135 are missing. 136 """ 137 super(Installer, self).__init__(source_dir, strict=strict, interpreter=interpreter) 138 self._egg_info = None 139 fd, self._install_record = tempfile.mkstemp() 140 os.close(fd) 141 142 def _setup_command(self): 143 return ['install', 144 '--root=%s' % self._install_tmp, 145 '--prefix=', 146 '--single-version-externally-managed', 147 '--record', self._install_record] 148 149 def _postprocess(self): 150 installed_files = [] 151 egg_info = None 152 with open(self._install_record) as fp: 153 installed_files = fp.read().splitlines() 154 for line in installed_files: 155 if line.endswith('.egg-info'): 156 assert line.startswith('/'), 'Expect .egg-info to be within install_tmp!' 157 egg_info = line 158 break 159 160 if not egg_info: 161 self._installed = False 162 return self._installed 163 164 installed_files = [os.path.relpath(fn, egg_info) for fn in installed_files if fn != egg_info] 165 166 self._egg_info = os.path.join(self._install_tmp, egg_info[1:]) 167 with open(os.path.join(self._egg_info, 'installed-files.txt'), 'w') as fp: 168 fp.write('\n'.join(installed_files)) 169 fp.write('\n') 170 171 return self._installed 172 173 @after_installation 174 def egg_info(self): 175 return self._egg_info 176 177 @after_installation 178 def root(self): 179 egg_info = self.egg_info() 180 assert egg_info 181 return os.path.realpath(os.path.dirname(egg_info)) 182 183 @after_installation 184 def distribution(self): 185 base_dir = self.root() 186 egg_info = self.egg_info() 187 metadata = PathMetadata(base_dir, egg_info) 188 return Distribution.from_location(base_dir, os.path.basename(egg_info), metadata=metadata) 189 190 191 class DistributionPackager(InstallerBase): 192 def mixins(self): 193 mixins = super(DistributionPackager, self).mixins().copy() 194 mixins.update(setuptools=SETUPTOOLS_REQUIREMENT) 195 return mixins 196 197 def find_distribution(self): 198 dists = os.listdir(self.install_tmp) 199 if len(dists) == 0: 200 raise self.InstallFailure('No distributions were produced!') 201 elif len(dists) > 1: 202 raise self.InstallFailure('Ambiguous source distributions found: %s' % (' '.join(dists))) 203 else: 204 return os.path.join(self.install_tmp, dists[0]) 205 206 207 class Packager(DistributionPackager): 208 """ 209 Create a source distribution from an unpacked setup.py-based project. 210 """ 211 212 def _setup_command(self): 213 if WINDOWS: 214 return ['sdist', '--formats=zip', '--dist-dir=%s' % self._install_tmp] 215 else: 216 return ['sdist', '--formats=gztar', '--dist-dir=%s' % self._install_tmp] 217 218 @after_installation 219 def sdist(self): 220 return self.find_distribution() 221 222 223 class EggInstaller(DistributionPackager): 224 """ 225 Create a source distribution from an unpacked setup.py-based project. 226 """ 227 228 def _setup_command(self): 229 return ['bdist_egg', '--dist-dir=%s' % self._install_tmp] 230 231 @after_installation 232 def bdist(self): 233 return self.find_distribution() 234 235 236 class WheelInstaller(DistributionPackager): 237 """ 238 Create a source distribution from an unpacked setup.py-based project. 239 """ 240 MIXINS = { 241 'setuptools': SETUPTOOLS_REQUIREMENT, 242 'wheel': WHEEL_REQUIREMENT, 243 } 244 245 def mixins(self): 246 mixins = super(WheelInstaller, self).mixins().copy() 247 mixins.update(self.MIXINS) 248 return mixins 249 250 def _setup_command(self): 251 return ['bdist_wheel', '--dist-dir=%s' % self._install_tmp] 252 253 @after_installation 254 def bdist(self): 255 return self.find_distribution() 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/installer.py b/pex/installer.py --- a/pex/installer.py +++ b/pex/installer.py @@ -32,12 +32,12 @@ class InstallerBase(object): - SETUP_BOOTSTRAP_HEADER = "import io, sys" + SETUP_BOOTSTRAP_HEADER = "import sys" SETUP_BOOTSTRAP_MODULE = "sys.path.insert(0, %(path)r); import %(module)s" SETUP_BOOTSTRAP_FOOTER = """ __file__ = 'setup.py' sys.argv[0] = 'setup.py' -exec(compile(io.open(__file__, encoding='utf-8').read().replace('\\r\\n', '\\n'), __file__, 'exec')) +exec(compile(open(__file__, 'rb').read(), __file__, 'exec')) """ class Error(Exception): pass
{"golden_diff": "diff --git a/pex/installer.py b/pex/installer.py\n--- a/pex/installer.py\n+++ b/pex/installer.py\n@@ -32,12 +32,12 @@\n \n \n class InstallerBase(object):\n- SETUP_BOOTSTRAP_HEADER = \"import io, sys\"\n+ SETUP_BOOTSTRAP_HEADER = \"import sys\"\n SETUP_BOOTSTRAP_MODULE = \"sys.path.insert(0, %(path)r); import %(module)s\"\n SETUP_BOOTSTRAP_FOOTER = \"\"\"\n __file__ = 'setup.py'\n sys.argv[0] = 'setup.py'\n-exec(compile(io.open(__file__, encoding='utf-8').read().replace('\\\\r\\\\n', '\\\\n'), __file__, 'exec'))\n+exec(compile(open(__file__, 'rb').read(), __file__, 'exec'))\n \"\"\"\n \n class Error(Exception): pass\n", "issue": "Python 2: SyntaxError when installing distributions with coding hints in setup.py\nI think this is a regression introduced in #231, which was released in pex 1.1.7. I cannot reproduce this problem on pex 1.1.6. Affects Python 2 only.\n\nReproduce with:\n\n```\nwget -q 'https://github.com/pantsbuild/pex/releases/download/v1.1.7/pex27'\nchmod a+x pex27\n./pex27 backoff\n```\n\nStack trace:\n\n```\n**** Failed to install backoff-1.1.0. stdout:\n\n**** Failed to install backoff-1.1.0. stderr:\nTraceback (most recent call last):\n File \"<stdin>\", line 7, in <module>\n File \"setup.py\", line 0\nSyntaxError: encoding declaration in Unicode string\n\nTraceback (most recent call last):\n File \"/private/tmp/pex/pex27/.bootstrap/_pex/pex.py\", line 328, in execute\n File \"/private/tmp/pex/pex27/.bootstrap/_pex/pex.py\", line 260, in _wrap_coverage\n File \"/private/tmp/pex/pex27/.bootstrap/_pex/pex.py\", line 292, in _wrap_profiling\n File \"/private/tmp/pex/pex27/.bootstrap/_pex/pex.py\", line 371, in _execute\n File \"/private/tmp/pex/pex27/.bootstrap/_pex/pex.py\", line 429, in execute_entry\n File \"/private/tmp/pex/pex27/.bootstrap/_pex/pex.py\", line 447, in execute_pkg_resources\n File \"/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/bin/pex.py\", line 538, in main\n pex_builder = build_pex(reqs, options, resolver_options_builder)\n File \"/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/bin/pex.py\", line 489, in build_pex\n resolveds = resolver.resolve(resolvables)\n File \"/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/resolver.py\", line 200, in resolve\n dist = self.build(package, resolvable.options)\n File \"/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/resolver.py\", line 257, in build\n dist = super(CachingResolver, self).build(package, options)\n File \"/Users/saj/.pex/install/pex-1.1.7-py2.py3-none-any.whl.1089a1bbad10037ad29421de8a162a77c90c8133/pex-1.1.7-py2.py3-none-any.whl/pex/resolver.py\", line 168, in build\n raise Untranslateable('Package %s is not translateable by %s' % (package, translator))\npex.resolver.Untranslateable: Package SourcePackage(u'file:///Users/saj/.pex/build/backoff-1.1.0.tar.gz') is not translateable by ChainedTranslator(WheelTranslator, EggTranslator, SourceTranslator)\n```\n\nEven if we were to ignore the problem above, I don't think it is right to assume a UTF-8 encoding on all files.\n\nFrom [PEP 263](https://www.python.org/dev/peps/pep-0263/):\n\n> If a Unicode string with a coding declaration is passed to compile(), a SyntaxError will be raised.\n\nFrom the same PEP:\n\n> The builtin compile() API will be enhanced to accept Unicode as input. 8-bit string input is subject to the standard procedure for encoding detection as described above.\n\nInstead of opening `setup.py` as a text file and decoding it ourselves, it sounds like it should be possible to hand a binary stream directly to the `compile()` builtin and let Python deal with this muck.\n\n", "before_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom pkg_resources import Distribution, PathMetadata\n\nfrom .common import safe_mkdtemp, safe_rmtree\nfrom .compatibility import WINDOWS\nfrom .interpreter import PythonInterpreter\nfrom .tracer import TRACER\nfrom .version import SETUPTOOLS_REQUIREMENT, WHEEL_REQUIREMENT\n\n__all__ = (\n 'Installer',\n 'Packager'\n)\n\n\ndef after_installation(function):\n def function_wrapper(self, *args, **kw):\n self._installed = self.run()\n if not self._installed:\n raise Installer.InstallFailure('Failed to install %s' % self._source_dir)\n return function(self, *args, **kw)\n return function_wrapper\n\n\nclass InstallerBase(object):\n SETUP_BOOTSTRAP_HEADER = \"import io, sys\"\n SETUP_BOOTSTRAP_MODULE = \"sys.path.insert(0, %(path)r); import %(module)s\"\n SETUP_BOOTSTRAP_FOOTER = \"\"\"\n__file__ = 'setup.py'\nsys.argv[0] = 'setup.py'\nexec(compile(io.open(__file__, encoding='utf-8').read().replace('\\\\r\\\\n', '\\\\n'), __file__, 'exec'))\n\"\"\"\n\n class Error(Exception): pass\n class InstallFailure(Error): pass\n class IncapableInterpreter(Error): pass\n\n def __init__(self, source_dir, strict=True, interpreter=None, install_dir=None):\n \"\"\"\n Create an installer from an unpacked source distribution in source_dir.\n\n If strict=True, fail if any installation dependencies (e.g. distribute)\n are missing.\n \"\"\"\n self._source_dir = source_dir\n self._install_tmp = install_dir or safe_mkdtemp()\n self._installed = None\n self._strict = strict\n self._interpreter = interpreter or PythonInterpreter.get()\n if not self._interpreter.satisfies(self.capability) and strict:\n raise self.IncapableInterpreter('Interpreter %s not capable of running %s' % (\n self._interpreter.binary, self.__class__.__name__))\n\n def mixins(self):\n \"\"\"Return a map from import name to requirement to load into setup script prior to invocation.\n\n May be subclassed.\n \"\"\"\n return {}\n\n @property\n def install_tmp(self):\n return self._install_tmp\n\n def _setup_command(self):\n \"\"\"the setup command-line to run, to be implemented by subclasses.\"\"\"\n raise NotImplementedError\n\n def _postprocess(self):\n \"\"\"a post-processing function to run following setup.py invocation.\"\"\"\n\n @property\n def capability(self):\n \"\"\"returns the list of requirements for the interpreter to run this installer.\"\"\"\n return list(self.mixins().values())\n\n @property\n def bootstrap_script(self):\n bootstrap_modules = []\n for module, requirement in self.mixins().items():\n path = self._interpreter.get_location(requirement)\n if not path:\n assert not self._strict # This should be caught by validation\n continue\n bootstrap_modules.append(self.SETUP_BOOTSTRAP_MODULE % {'path': path, 'module': module})\n return '\\n'.join(\n [self.SETUP_BOOTSTRAP_HEADER] + bootstrap_modules + [self.SETUP_BOOTSTRAP_FOOTER])\n\n def run(self):\n if self._installed is not None:\n return self._installed\n\n with TRACER.timed('Installing %s' % self._install_tmp, V=2):\n command = [self._interpreter.binary, '-']\n command.extend(self._setup_command())\n po = subprocess.Popen(command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=self._interpreter.sanitized_environment(),\n cwd=self._source_dir)\n so, se = po.communicate(self.bootstrap_script.encode('ascii'))\n self._installed = po.returncode == 0\n\n if not self._installed:\n name = os.path.basename(self._source_dir)\n print('**** Failed to install %s. stdout:\\n%s' % (name, so.decode('utf-8')), file=sys.stderr)\n print('**** Failed to install %s. stderr:\\n%s' % (name, se.decode('utf-8')), file=sys.stderr)\n return self._installed\n\n self._postprocess()\n return self._installed\n\n def cleanup(self):\n safe_rmtree(self._install_tmp)\n\n\nclass Installer(InstallerBase):\n \"\"\"Install an unpacked distribution with a setup.py.\"\"\"\n\n def __init__(self, source_dir, strict=True, interpreter=None):\n \"\"\"\n Create an installer from an unpacked source distribution in source_dir.\n\n If strict=True, fail if any installation dependencies (e.g. setuptools)\n are missing.\n \"\"\"\n super(Installer, self).__init__(source_dir, strict=strict, interpreter=interpreter)\n self._egg_info = None\n fd, self._install_record = tempfile.mkstemp()\n os.close(fd)\n\n def _setup_command(self):\n return ['install',\n '--root=%s' % self._install_tmp,\n '--prefix=',\n '--single-version-externally-managed',\n '--record', self._install_record]\n\n def _postprocess(self):\n installed_files = []\n egg_info = None\n with open(self._install_record) as fp:\n installed_files = fp.read().splitlines()\n for line in installed_files:\n if line.endswith('.egg-info'):\n assert line.startswith('/'), 'Expect .egg-info to be within install_tmp!'\n egg_info = line\n break\n\n if not egg_info:\n self._installed = False\n return self._installed\n\n installed_files = [os.path.relpath(fn, egg_info) for fn in installed_files if fn != egg_info]\n\n self._egg_info = os.path.join(self._install_tmp, egg_info[1:])\n with open(os.path.join(self._egg_info, 'installed-files.txt'), 'w') as fp:\n fp.write('\\n'.join(installed_files))\n fp.write('\\n')\n\n return self._installed\n\n @after_installation\n def egg_info(self):\n return self._egg_info\n\n @after_installation\n def root(self):\n egg_info = self.egg_info()\n assert egg_info\n return os.path.realpath(os.path.dirname(egg_info))\n\n @after_installation\n def distribution(self):\n base_dir = self.root()\n egg_info = self.egg_info()\n metadata = PathMetadata(base_dir, egg_info)\n return Distribution.from_location(base_dir, os.path.basename(egg_info), metadata=metadata)\n\n\nclass DistributionPackager(InstallerBase):\n def mixins(self):\n mixins = super(DistributionPackager, self).mixins().copy()\n mixins.update(setuptools=SETUPTOOLS_REQUIREMENT)\n return mixins\n\n def find_distribution(self):\n dists = os.listdir(self.install_tmp)\n if len(dists) == 0:\n raise self.InstallFailure('No distributions were produced!')\n elif len(dists) > 1:\n raise self.InstallFailure('Ambiguous source distributions found: %s' % (' '.join(dists)))\n else:\n return os.path.join(self.install_tmp, dists[0])\n\n\nclass Packager(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n\n def _setup_command(self):\n if WINDOWS:\n return ['sdist', '--formats=zip', '--dist-dir=%s' % self._install_tmp]\n else:\n return ['sdist', '--formats=gztar', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def sdist(self):\n return self.find_distribution()\n\n\nclass EggInstaller(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n\n def _setup_command(self):\n return ['bdist_egg', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def bdist(self):\n return self.find_distribution()\n\n\nclass WheelInstaller(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n MIXINS = {\n 'setuptools': SETUPTOOLS_REQUIREMENT,\n 'wheel': WHEEL_REQUIREMENT,\n }\n\n def mixins(self):\n mixins = super(WheelInstaller, self).mixins().copy()\n mixins.update(self.MIXINS)\n return mixins\n\n def _setup_command(self):\n return ['bdist_wheel', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def bdist(self):\n return self.find_distribution()\n", "path": "pex/installer.py"}], "after_files": [{"content": "# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import absolute_import, print_function\n\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom pkg_resources import Distribution, PathMetadata\n\nfrom .common import safe_mkdtemp, safe_rmtree\nfrom .compatibility import WINDOWS\nfrom .interpreter import PythonInterpreter\nfrom .tracer import TRACER\nfrom .version import SETUPTOOLS_REQUIREMENT, WHEEL_REQUIREMENT\n\n__all__ = (\n 'Installer',\n 'Packager'\n)\n\n\ndef after_installation(function):\n def function_wrapper(self, *args, **kw):\n self._installed = self.run()\n if not self._installed:\n raise Installer.InstallFailure('Failed to install %s' % self._source_dir)\n return function(self, *args, **kw)\n return function_wrapper\n\n\nclass InstallerBase(object):\n SETUP_BOOTSTRAP_HEADER = \"import sys\"\n SETUP_BOOTSTRAP_MODULE = \"sys.path.insert(0, %(path)r); import %(module)s\"\n SETUP_BOOTSTRAP_FOOTER = \"\"\"\n__file__ = 'setup.py'\nsys.argv[0] = 'setup.py'\nexec(compile(open(__file__, 'rb').read(), __file__, 'exec'))\n\"\"\"\n\n class Error(Exception): pass\n class InstallFailure(Error): pass\n class IncapableInterpreter(Error): pass\n\n def __init__(self, source_dir, strict=True, interpreter=None, install_dir=None):\n \"\"\"\n Create an installer from an unpacked source distribution in source_dir.\n\n If strict=True, fail if any installation dependencies (e.g. distribute)\n are missing.\n \"\"\"\n self._source_dir = source_dir\n self._install_tmp = install_dir or safe_mkdtemp()\n self._installed = None\n self._strict = strict\n self._interpreter = interpreter or PythonInterpreter.get()\n if not self._interpreter.satisfies(self.capability) and strict:\n raise self.IncapableInterpreter('Interpreter %s not capable of running %s' % (\n self._interpreter.binary, self.__class__.__name__))\n\n def mixins(self):\n \"\"\"Return a map from import name to requirement to load into setup script prior to invocation.\n\n May be subclassed.\n \"\"\"\n return {}\n\n @property\n def install_tmp(self):\n return self._install_tmp\n\n def _setup_command(self):\n \"\"\"the setup command-line to run, to be implemented by subclasses.\"\"\"\n raise NotImplementedError\n\n def _postprocess(self):\n \"\"\"a post-processing function to run following setup.py invocation.\"\"\"\n\n @property\n def capability(self):\n \"\"\"returns the list of requirements for the interpreter to run this installer.\"\"\"\n return list(self.mixins().values())\n\n @property\n def bootstrap_script(self):\n bootstrap_modules = []\n for module, requirement in self.mixins().items():\n path = self._interpreter.get_location(requirement)\n if not path:\n assert not self._strict # This should be caught by validation\n continue\n bootstrap_modules.append(self.SETUP_BOOTSTRAP_MODULE % {'path': path, 'module': module})\n return '\\n'.join(\n [self.SETUP_BOOTSTRAP_HEADER] + bootstrap_modules + [self.SETUP_BOOTSTRAP_FOOTER])\n\n def run(self):\n if self._installed is not None:\n return self._installed\n\n with TRACER.timed('Installing %s' % self._install_tmp, V=2):\n command = [self._interpreter.binary, '-']\n command.extend(self._setup_command())\n po = subprocess.Popen(command,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n env=self._interpreter.sanitized_environment(),\n cwd=self._source_dir)\n so, se = po.communicate(self.bootstrap_script.encode('ascii'))\n self._installed = po.returncode == 0\n\n if not self._installed:\n name = os.path.basename(self._source_dir)\n print('**** Failed to install %s. stdout:\\n%s' % (name, so.decode('utf-8')), file=sys.stderr)\n print('**** Failed to install %s. stderr:\\n%s' % (name, se.decode('utf-8')), file=sys.stderr)\n return self._installed\n\n self._postprocess()\n return self._installed\n\n def cleanup(self):\n safe_rmtree(self._install_tmp)\n\n\nclass Installer(InstallerBase):\n \"\"\"Install an unpacked distribution with a setup.py.\"\"\"\n\n def __init__(self, source_dir, strict=True, interpreter=None):\n \"\"\"\n Create an installer from an unpacked source distribution in source_dir.\n\n If strict=True, fail if any installation dependencies (e.g. setuptools)\n are missing.\n \"\"\"\n super(Installer, self).__init__(source_dir, strict=strict, interpreter=interpreter)\n self._egg_info = None\n fd, self._install_record = tempfile.mkstemp()\n os.close(fd)\n\n def _setup_command(self):\n return ['install',\n '--root=%s' % self._install_tmp,\n '--prefix=',\n '--single-version-externally-managed',\n '--record', self._install_record]\n\n def _postprocess(self):\n installed_files = []\n egg_info = None\n with open(self._install_record) as fp:\n installed_files = fp.read().splitlines()\n for line in installed_files:\n if line.endswith('.egg-info'):\n assert line.startswith('/'), 'Expect .egg-info to be within install_tmp!'\n egg_info = line\n break\n\n if not egg_info:\n self._installed = False\n return self._installed\n\n installed_files = [os.path.relpath(fn, egg_info) for fn in installed_files if fn != egg_info]\n\n self._egg_info = os.path.join(self._install_tmp, egg_info[1:])\n with open(os.path.join(self._egg_info, 'installed-files.txt'), 'w') as fp:\n fp.write('\\n'.join(installed_files))\n fp.write('\\n')\n\n return self._installed\n\n @after_installation\n def egg_info(self):\n return self._egg_info\n\n @after_installation\n def root(self):\n egg_info = self.egg_info()\n assert egg_info\n return os.path.realpath(os.path.dirname(egg_info))\n\n @after_installation\n def distribution(self):\n base_dir = self.root()\n egg_info = self.egg_info()\n metadata = PathMetadata(base_dir, egg_info)\n return Distribution.from_location(base_dir, os.path.basename(egg_info), metadata=metadata)\n\n\nclass DistributionPackager(InstallerBase):\n def mixins(self):\n mixins = super(DistributionPackager, self).mixins().copy()\n mixins.update(setuptools=SETUPTOOLS_REQUIREMENT)\n return mixins\n\n def find_distribution(self):\n dists = os.listdir(self.install_tmp)\n if len(dists) == 0:\n raise self.InstallFailure('No distributions were produced!')\n elif len(dists) > 1:\n raise self.InstallFailure('Ambiguous source distributions found: %s' % (' '.join(dists)))\n else:\n return os.path.join(self.install_tmp, dists[0])\n\n\nclass Packager(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n\n def _setup_command(self):\n if WINDOWS:\n return ['sdist', '--formats=zip', '--dist-dir=%s' % self._install_tmp]\n else:\n return ['sdist', '--formats=gztar', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def sdist(self):\n return self.find_distribution()\n\n\nclass EggInstaller(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n\n def _setup_command(self):\n return ['bdist_egg', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def bdist(self):\n return self.find_distribution()\n\n\nclass WheelInstaller(DistributionPackager):\n \"\"\"\n Create a source distribution from an unpacked setup.py-based project.\n \"\"\"\n MIXINS = {\n 'setuptools': SETUPTOOLS_REQUIREMENT,\n 'wheel': WHEEL_REQUIREMENT,\n }\n\n def mixins(self):\n mixins = super(WheelInstaller, self).mixins().copy()\n mixins.update(self.MIXINS)\n return mixins\n\n def _setup_command(self):\n return ['bdist_wheel', '--dist-dir=%s' % self._install_tmp]\n\n @after_installation\n def bdist(self):\n return self.find_distribution()\n", "path": "pex/installer.py"}]}
3,997
188
gh_patches_debug_59251
rasdani/github-patches
git_diff
ephios-dev__ephios-639
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PWA does not respect orientation lock on Android --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ephios/core/views/pwa.py` Content: ``` 1 import functools 2 3 from django.conf import settings 4 from django.contrib.staticfiles import finders 5 from django.http import HttpResponse, JsonResponse 6 from django.shortcuts import render 7 from django.utils.translation import get_language 8 9 10 def manifest(request): 11 manifest_json = { 12 "name": "ephios", 13 "short_name": "ephios", 14 "description": "ephios manages events for medical services", 15 "start_url": "/", 16 "display": "standalone", 17 "scope": "/", 18 "orientation": "any", 19 "background_color": "#fff", 20 "theme_color": "#000", 21 "status_bar": "default", 22 "dir": "auto", 23 "icons": settings.PWA_APP_ICONS, 24 "lang": get_language(), 25 } 26 response = JsonResponse(manifest_json) 27 response["Service-Worker-Allowed"] = "/" 28 return response 29 30 31 @functools.lru_cache 32 def serviceworker_js(): 33 with open(finders.find("ephios/js/serviceworker.js"), "rb") as sw_js: 34 return sw_js.read() 35 36 37 def serviceworker(request): 38 return HttpResponse( 39 serviceworker_js(), 40 content_type="application/javascript", 41 ) 42 43 44 def offline(request): 45 return render(request, "offline.html") 46 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ephios/core/views/pwa.py b/ephios/core/views/pwa.py --- a/ephios/core/views/pwa.py +++ b/ephios/core/views/pwa.py @@ -15,7 +15,6 @@ "start_url": "/", "display": "standalone", "scope": "/", - "orientation": "any", "background_color": "#fff", "theme_color": "#000", "status_bar": "default",
{"golden_diff": "diff --git a/ephios/core/views/pwa.py b/ephios/core/views/pwa.py\n--- a/ephios/core/views/pwa.py\n+++ b/ephios/core/views/pwa.py\n@@ -15,7 +15,6 @@\n \"start_url\": \"/\",\n \"display\": \"standalone\",\n \"scope\": \"/\",\n- \"orientation\": \"any\",\n \"background_color\": \"#fff\",\n \"theme_color\": \"#000\",\n \"status_bar\": \"default\",\n", "issue": "PWA does not respect orientation lock on Android\n\n", "before_files": [{"content": "import functools\n\nfrom django.conf import settings\nfrom django.contrib.staticfiles import finders\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.translation import get_language\n\n\ndef manifest(request):\n manifest_json = {\n \"name\": \"ephios\",\n \"short_name\": \"ephios\",\n \"description\": \"ephios manages events for medical services\",\n \"start_url\": \"/\",\n \"display\": \"standalone\",\n \"scope\": \"/\",\n \"orientation\": \"any\",\n \"background_color\": \"#fff\",\n \"theme_color\": \"#000\",\n \"status_bar\": \"default\",\n \"dir\": \"auto\",\n \"icons\": settings.PWA_APP_ICONS,\n \"lang\": get_language(),\n }\n response = JsonResponse(manifest_json)\n response[\"Service-Worker-Allowed\"] = \"/\"\n return response\n\n\[email protected]_cache\ndef serviceworker_js():\n with open(finders.find(\"ephios/js/serviceworker.js\"), \"rb\") as sw_js:\n return sw_js.read()\n\n\ndef serviceworker(request):\n return HttpResponse(\n serviceworker_js(),\n content_type=\"application/javascript\",\n )\n\n\ndef offline(request):\n return render(request, \"offline.html\")\n", "path": "ephios/core/views/pwa.py"}], "after_files": [{"content": "import functools\n\nfrom django.conf import settings\nfrom django.contrib.staticfiles import finders\nfrom django.http import HttpResponse, JsonResponse\nfrom django.shortcuts import render\nfrom django.utils.translation import get_language\n\n\ndef manifest(request):\n manifest_json = {\n \"name\": \"ephios\",\n \"short_name\": \"ephios\",\n \"description\": \"ephios manages events for medical services\",\n \"start_url\": \"/\",\n \"display\": \"standalone\",\n \"scope\": \"/\",\n \"background_color\": \"#fff\",\n \"theme_color\": \"#000\",\n \"status_bar\": \"default\",\n \"dir\": \"auto\",\n \"icons\": settings.PWA_APP_ICONS,\n \"lang\": get_language(),\n }\n response = JsonResponse(manifest_json)\n response[\"Service-Worker-Allowed\"] = \"/\"\n return response\n\n\[email protected]_cache\ndef serviceworker_js():\n with open(finders.find(\"ephios/js/serviceworker.js\"), \"rb\") as sw_js:\n return sw_js.read()\n\n\ndef serviceworker(request):\n return HttpResponse(\n serviceworker_js(),\n content_type=\"application/javascript\",\n )\n\n\ndef offline(request):\n return render(request, \"offline.html\")\n", "path": "ephios/core/views/pwa.py"}]}
628
111
gh_patches_debug_31130
rasdani/github-patches
git_diff
kedro-org__kedro-1789
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add parameters to `%reload_kedro` line magic ## Description Currently you cannot pass things like `env` or `extra_params` via the line magic, but you can by importing the function. https://github.com/kedro-org/kedro/blob/5ae97cfb70e5b0d4490132847977d482f13c840f/kedro/extras/extensions/ipython.py#L38 Why don't we introduce feature parity here? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kedro/extras/extensions/ipython.py` Content: ``` 1 # pylint: disable=import-outside-toplevel,global-statement,invalid-name,too-many-locals 2 """ 3 This script creates an IPython extension to load Kedro-related variables in 4 local scope. 5 """ 6 import logging 7 import sys 8 from pathlib import Path 9 from typing import Any, Dict 10 11 logger = logging.getLogger(__name__) 12 default_project_path = Path.cwd() 13 14 15 def _remove_cached_modules(package_name): 16 to_remove = [mod for mod in sys.modules if mod.startswith(package_name)] 17 # `del` is used instead of `reload()` because: If the new version of a module does not 18 # define a name that was defined by the old version, the old definition remains. 19 for module in to_remove: 20 del sys.modules[module] # pragma: no cover 21 22 23 def _find_kedro_project(current_dir: Path): # pragma: no cover 24 from kedro.framework.startup import _is_project 25 26 while current_dir != current_dir.parent: 27 if _is_project(current_dir): 28 return current_dir 29 current_dir = current_dir.parent 30 31 return None 32 33 34 def reload_kedro( 35 path: str = None, env: str = None, extra_params: Dict[str, Any] = None 36 ): 37 """Line magic which reloads all Kedro default variables. 38 Setting the path will also make it default for subsequent calls. 39 """ 40 from IPython import get_ipython 41 from IPython.core.magic import needs_local_scope, register_line_magic 42 43 from kedro.framework.cli import load_entry_points 44 from kedro.framework.project import LOGGING # noqa # pylint:disable=unused-import 45 from kedro.framework.project import configure_project, pipelines 46 from kedro.framework.session import KedroSession 47 from kedro.framework.startup import bootstrap_project 48 49 # If a path is provided, set it as default for subsequent calls 50 global default_project_path 51 if path: 52 default_project_path = Path(path).expanduser().resolve() 53 logger.info("Updated path to Kedro project: %s", default_project_path) 54 else: 55 logger.info("No path argument was provided. Using: %s", default_project_path) 56 57 metadata = bootstrap_project(default_project_path) 58 _remove_cached_modules(metadata.package_name) 59 configure_project(metadata.package_name) 60 61 session = KedroSession.create( 62 metadata.package_name, default_project_path, env=env, extra_params=extra_params 63 ) 64 context = session.load_context() 65 catalog = context.catalog 66 67 get_ipython().push( 68 variables={ 69 "context": context, 70 "catalog": catalog, 71 "session": session, 72 "pipelines": pipelines, 73 } 74 ) 75 76 logger.info("Kedro project %s", str(metadata.project_name)) 77 logger.info( 78 "Defined global variable 'context', 'session', 'catalog' and 'pipelines'" 79 ) 80 81 for line_magic in load_entry_points("line_magic"): 82 register_line_magic(needs_local_scope(line_magic)) 83 logger.info("Registered line magic '%s'", line_magic.__name__) # type: ignore 84 85 86 def load_ipython_extension(ipython): 87 """Main entry point when %load_ext is executed""" 88 89 global default_project_path 90 91 ipython.register_magic_function(reload_kedro, "line", "reload_kedro") 92 93 default_project_path = _find_kedro_project(Path.cwd()) 94 95 if default_project_path is None: 96 logger.warning( 97 "Kedro extension was registered but couldn't find a Kedro project. " 98 "Make sure you run '%reload_kedro <project_root>'." 99 ) 100 return 101 102 reload_kedro(default_project_path) 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kedro/extras/extensions/ipython.py b/kedro/extras/extensions/ipython.py --- a/kedro/extras/extensions/ipython.py +++ b/kedro/extras/extensions/ipython.py @@ -8,6 +8,9 @@ from pathlib import Path from typing import Any, Dict +from kedro.framework.cli.project import PARAMS_ARG_HELP +from kedro.framework.cli.utils import ENV_HELP, _split_params + logger = logging.getLogger(__name__) default_project_path = Path.cwd() @@ -84,12 +87,46 @@ def load_ipython_extension(ipython): - """Main entry point when %load_ext is executed""" + """ + Main entry point when %load_ext is executed. + IPython will look for this function specifically. + See https://ipython.readthedocs.io/en/stable/config/extensions/index.html - global default_project_path + This function is called when users do `%load_ext kedro.extras.extensions.ipython`. + When user use `kedro jupyter notebook` or `jupyter ipython`, this extension is + loaded automatically. + """ + from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring + + @magic_arguments() + @argument( + "path", + type=str, + help=( + "Path to the project root directory. If not given, use the previously set" + "project root." + ), + nargs="?", + default=None, + ) + @argument("-e", "--env", type=str, default=None, help=ENV_HELP) + @argument( + "--params", + type=lambda value: _split_params(None, None, value), + default=None, + help=PARAMS_ARG_HELP, + ) + def magic_reload_kedro(line: str): + """ + The `%reload_kedro` IPython line magic. See + https://kedro.readthedocs.io/en/stable/tools_integration/ipython.html for more. + """ + args = parse_argstring(magic_reload_kedro, line) + reload_kedro(args.path, args.env, args.params) - ipython.register_magic_function(reload_kedro, "line", "reload_kedro") + global default_project_path + ipython.register_magic_function(magic_reload_kedro, magic_name="reload_kedro") default_project_path = _find_kedro_project(Path.cwd()) if default_project_path is None:
{"golden_diff": "diff --git a/kedro/extras/extensions/ipython.py b/kedro/extras/extensions/ipython.py\n--- a/kedro/extras/extensions/ipython.py\n+++ b/kedro/extras/extensions/ipython.py\n@@ -8,6 +8,9 @@\n from pathlib import Path\n from typing import Any, Dict\n \n+from kedro.framework.cli.project import PARAMS_ARG_HELP\n+from kedro.framework.cli.utils import ENV_HELP, _split_params\n+\n logger = logging.getLogger(__name__)\n default_project_path = Path.cwd()\n \n@@ -84,12 +87,46 @@\n \n \n def load_ipython_extension(ipython):\n- \"\"\"Main entry point when %load_ext is executed\"\"\"\n+ \"\"\"\n+ Main entry point when %load_ext is executed.\n+ IPython will look for this function specifically.\n+ See https://ipython.readthedocs.io/en/stable/config/extensions/index.html\n \n- global default_project_path\n+ This function is called when users do `%load_ext kedro.extras.extensions.ipython`.\n+ When user use `kedro jupyter notebook` or `jupyter ipython`, this extension is\n+ loaded automatically.\n+ \"\"\"\n+ from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n+\n+ @magic_arguments()\n+ @argument(\n+ \"path\",\n+ type=str,\n+ help=(\n+ \"Path to the project root directory. If not given, use the previously set\"\n+ \"project root.\"\n+ ),\n+ nargs=\"?\",\n+ default=None,\n+ )\n+ @argument(\"-e\", \"--env\", type=str, default=None, help=ENV_HELP)\n+ @argument(\n+ \"--params\",\n+ type=lambda value: _split_params(None, None, value),\n+ default=None,\n+ help=PARAMS_ARG_HELP,\n+ )\n+ def magic_reload_kedro(line: str):\n+ \"\"\"\n+ The `%reload_kedro` IPython line magic. See\n+ https://kedro.readthedocs.io/en/stable/tools_integration/ipython.html for more.\n+ \"\"\"\n+ args = parse_argstring(magic_reload_kedro, line)\n+ reload_kedro(args.path, args.env, args.params)\n \n- ipython.register_magic_function(reload_kedro, \"line\", \"reload_kedro\")\n+ global default_project_path\n \n+ ipython.register_magic_function(magic_reload_kedro, magic_name=\"reload_kedro\")\n default_project_path = _find_kedro_project(Path.cwd())\n \n if default_project_path is None:\n", "issue": "Add parameters to `%reload_kedro` line magic \n## Description\r\n\r\nCurrently you cannot pass things like `env` or `extra_params` via the line magic, but you can by importing the function.\r\n\r\nhttps://github.com/kedro-org/kedro/blob/5ae97cfb70e5b0d4490132847977d482f13c840f/kedro/extras/extensions/ipython.py#L38\r\n\r\nWhy don't we introduce feature parity here? \n", "before_files": [{"content": "# pylint: disable=import-outside-toplevel,global-statement,invalid-name,too-many-locals\n\"\"\"\nThis script creates an IPython extension to load Kedro-related variables in\nlocal scope.\n\"\"\"\nimport logging\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nlogger = logging.getLogger(__name__)\ndefault_project_path = Path.cwd()\n\n\ndef _remove_cached_modules(package_name):\n to_remove = [mod for mod in sys.modules if mod.startswith(package_name)]\n # `del` is used instead of `reload()` because: If the new version of a module does not\n # define a name that was defined by the old version, the old definition remains.\n for module in to_remove:\n del sys.modules[module] # pragma: no cover\n\n\ndef _find_kedro_project(current_dir: Path): # pragma: no cover\n from kedro.framework.startup import _is_project\n\n while current_dir != current_dir.parent:\n if _is_project(current_dir):\n return current_dir\n current_dir = current_dir.parent\n\n return None\n\n\ndef reload_kedro(\n path: str = None, env: str = None, extra_params: Dict[str, Any] = None\n):\n \"\"\"Line magic which reloads all Kedro default variables.\n Setting the path will also make it default for subsequent calls.\n \"\"\"\n from IPython import get_ipython\n from IPython.core.magic import needs_local_scope, register_line_magic\n\n from kedro.framework.cli import load_entry_points\n from kedro.framework.project import LOGGING # noqa # pylint:disable=unused-import\n from kedro.framework.project import configure_project, pipelines\n from kedro.framework.session import KedroSession\n from kedro.framework.startup import bootstrap_project\n\n # If a path is provided, set it as default for subsequent calls\n global default_project_path\n if path:\n default_project_path = Path(path).expanduser().resolve()\n logger.info(\"Updated path to Kedro project: %s\", default_project_path)\n else:\n logger.info(\"No path argument was provided. Using: %s\", default_project_path)\n\n metadata = bootstrap_project(default_project_path)\n _remove_cached_modules(metadata.package_name)\n configure_project(metadata.package_name)\n\n session = KedroSession.create(\n metadata.package_name, default_project_path, env=env, extra_params=extra_params\n )\n context = session.load_context()\n catalog = context.catalog\n\n get_ipython().push(\n variables={\n \"context\": context,\n \"catalog\": catalog,\n \"session\": session,\n \"pipelines\": pipelines,\n }\n )\n\n logger.info(\"Kedro project %s\", str(metadata.project_name))\n logger.info(\n \"Defined global variable 'context', 'session', 'catalog' and 'pipelines'\"\n )\n\n for line_magic in load_entry_points(\"line_magic\"):\n register_line_magic(needs_local_scope(line_magic))\n logger.info(\"Registered line magic '%s'\", line_magic.__name__) # type: ignore\n\n\ndef load_ipython_extension(ipython):\n \"\"\"Main entry point when %load_ext is executed\"\"\"\n\n global default_project_path\n\n ipython.register_magic_function(reload_kedro, \"line\", \"reload_kedro\")\n\n default_project_path = _find_kedro_project(Path.cwd())\n\n if default_project_path is None:\n logger.warning(\n \"Kedro extension was registered but couldn't find a Kedro project. \"\n \"Make sure you run '%reload_kedro <project_root>'.\"\n )\n return\n\n reload_kedro(default_project_path)\n", "path": "kedro/extras/extensions/ipython.py"}], "after_files": [{"content": "# pylint: disable=import-outside-toplevel,global-statement,invalid-name,too-many-locals\n\"\"\"\nThis script creates an IPython extension to load Kedro-related variables in\nlocal scope.\n\"\"\"\nimport logging\nimport sys\nfrom pathlib import Path\nfrom typing import Any, Dict\n\nfrom kedro.framework.cli.project import PARAMS_ARG_HELP\nfrom kedro.framework.cli.utils import ENV_HELP, _split_params\n\nlogger = logging.getLogger(__name__)\ndefault_project_path = Path.cwd()\n\n\ndef _remove_cached_modules(package_name):\n to_remove = [mod for mod in sys.modules if mod.startswith(package_name)]\n # `del` is used instead of `reload()` because: If the new version of a module does not\n # define a name that was defined by the old version, the old definition remains.\n for module in to_remove:\n del sys.modules[module] # pragma: no cover\n\n\ndef _find_kedro_project(current_dir: Path): # pragma: no cover\n from kedro.framework.startup import _is_project\n\n while current_dir != current_dir.parent:\n if _is_project(current_dir):\n return current_dir\n current_dir = current_dir.parent\n\n return None\n\n\ndef reload_kedro(\n path: str = None, env: str = None, extra_params: Dict[str, Any] = None\n):\n \"\"\"Line magic which reloads all Kedro default variables.\n Setting the path will also make it default for subsequent calls.\n \"\"\"\n from IPython import get_ipython\n from IPython.core.magic import needs_local_scope, register_line_magic\n\n from kedro.framework.cli import load_entry_points\n from kedro.framework.project import LOGGING # noqa # pylint:disable=unused-import\n from kedro.framework.project import configure_project, pipelines\n from kedro.framework.session import KedroSession\n from kedro.framework.startup import bootstrap_project\n\n # If a path is provided, set it as default for subsequent calls\n global default_project_path\n if path:\n default_project_path = Path(path).expanduser().resolve()\n logger.info(\"Updated path to Kedro project: %s\", default_project_path)\n else:\n logger.info(\"No path argument was provided. Using: %s\", default_project_path)\n\n metadata = bootstrap_project(default_project_path)\n _remove_cached_modules(metadata.package_name)\n configure_project(metadata.package_name)\n\n session = KedroSession.create(\n metadata.package_name, default_project_path, env=env, extra_params=extra_params\n )\n context = session.load_context()\n catalog = context.catalog\n\n get_ipython().push(\n variables={\n \"context\": context,\n \"catalog\": catalog,\n \"session\": session,\n \"pipelines\": pipelines,\n }\n )\n\n logger.info(\"Kedro project %s\", str(metadata.project_name))\n logger.info(\n \"Defined global variable 'context', 'session', 'catalog' and 'pipelines'\"\n )\n\n for line_magic in load_entry_points(\"line_magic\"):\n register_line_magic(needs_local_scope(line_magic))\n logger.info(\"Registered line magic '%s'\", line_magic.__name__) # type: ignore\n\n\ndef load_ipython_extension(ipython):\n \"\"\"\n Main entry point when %load_ext is executed.\n IPython will look for this function specifically.\n See https://ipython.readthedocs.io/en/stable/config/extensions/index.html\n\n This function is called when users do `%load_ext kedro.extras.extensions.ipython`.\n When user use `kedro jupyter notebook` or `jupyter ipython`, this extension is\n loaded automatically.\n \"\"\"\n from IPython.core.magic_arguments import argument, magic_arguments, parse_argstring\n\n @magic_arguments()\n @argument(\n \"path\",\n type=str,\n help=(\n \"Path to the project root directory. If not given, use the previously set\"\n \"project root.\"\n ),\n nargs=\"?\",\n default=None,\n )\n @argument(\"-e\", \"--env\", type=str, default=None, help=ENV_HELP)\n @argument(\n \"--params\",\n type=lambda value: _split_params(None, None, value),\n default=None,\n help=PARAMS_ARG_HELP,\n )\n def magic_reload_kedro(line: str):\n \"\"\"\n The `%reload_kedro` IPython line magic. See\n https://kedro.readthedocs.io/en/stable/tools_integration/ipython.html for more.\n \"\"\"\n args = parse_argstring(magic_reload_kedro, line)\n reload_kedro(args.path, args.env, args.params)\n\n global default_project_path\n\n ipython.register_magic_function(magic_reload_kedro, magic_name=\"reload_kedro\")\n default_project_path = _find_kedro_project(Path.cwd())\n\n if default_project_path is None:\n logger.warning(\n \"Kedro extension was registered but couldn't find a Kedro project. \"\n \"Make sure you run '%reload_kedro <project_root>'.\"\n )\n return\n\n reload_kedro(default_project_path)\n", "path": "kedro/extras/extensions/ipython.py"}]}
1,378
569
gh_patches_debug_19036
rasdani/github-patches
git_diff
lutris__lutris-2998
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- After the Lutris update, no games start. On the previous version 0.5.6, all games were launched. After updating Lutris to version 0.5.7-1, **not a single game starts if "Disable desktop effects" is enabled in preference** Here is what the console writes. **FileNotFoundError: [Errno 2] No such file or directory: 'qdbus'** Archlinux. python-dbus is installed ![image](https://user-images.githubusercontent.com/67700024/86579374-e878ae00-bf85-11ea-9528-60aeeb460fca.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lutris/util/display.py` Content: ``` 1 """Module to deal with various aspects of displays""" 2 # isort:skip_file 3 import enum 4 import os 5 import subprocess 6 7 try: 8 from dbus.exceptions import DBusException 9 DBUS_AVAILABLE = True 10 except ImportError: 11 DBUS_AVAILABLE = False 12 13 from gi.repository import Gdk, GLib, GnomeDesktop 14 15 from lutris.util import system 16 from lutris.util.graphics.displayconfig import MutterDisplayManager 17 from lutris.util.graphics.xrandr import LegacyDisplayManager, change_resolution, get_outputs 18 from lutris.util.log import logger 19 20 21 class NoScreenDetected(Exception): 22 23 """Raise this when unable to detect screens""" 24 25 26 def restore_gamma(): 27 """Restores gamma to a normal level.""" 28 xgamma_path = system.find_executable("xgamma") 29 try: 30 subprocess.Popen([xgamma_path, "-gamma", "1.0"]) 31 except (FileNotFoundError, TypeError): 32 logger.warning("xgamma is not available on your system") 33 except PermissionError: 34 logger.warning("you do not have permission to call xgamma") 35 36 37 def _get_graphics_adapters(): 38 """Return the list of graphics cards available on a system 39 40 Returns: 41 list: list of tuples containing PCI ID and description of the display controller 42 """ 43 lspci_path = system.find_executable("lspci") 44 dev_subclasses = ["VGA", "XGA", "3D controller", "Display controller"] 45 if not lspci_path: 46 logger.warning("lspci is not available. List of graphics cards not available") 47 return [] 48 return [ 49 (pci_id, device_desc.split(": ")[1]) for pci_id, device_desc in [ 50 line.split(maxsplit=1) for line in system.execute(lspci_path, timeout=3).split("\n") 51 if any(subclass in line for subclass in dev_subclasses) 52 ] 53 ] 54 55 56 class DisplayManager: 57 58 """Get display and resolution using GnomeDesktop""" 59 60 def __init__(self): 61 screen = Gdk.Screen.get_default() 62 if not screen: 63 raise NoScreenDetected 64 self.rr_screen = GnomeDesktop.RRScreen.new(screen) 65 self.rr_config = GnomeDesktop.RRConfig.new_current(self.rr_screen) 66 self.rr_config.load_current() 67 68 def get_display_names(self): 69 """Return names of connected displays""" 70 return [output_info.get_display_name() for output_info in self.rr_config.get_outputs()] 71 72 def get_resolutions(self): 73 """Return available resolutions""" 74 resolutions = ["%sx%s" % (mode.get_width(), mode.get_height()) for mode in self.rr_screen.list_modes()] 75 return sorted(set(resolutions), key=lambda x: int(x.split("x")[0]), reverse=True) 76 77 def _get_primary_output(self): 78 """Return the RROutput used as a primary display""" 79 for output in self.rr_screen.list_outputs(): 80 if output.get_is_primary(): 81 return output 82 return 83 84 def get_current_resolution(self): 85 """Return the current resolution for the primary display""" 86 output = self._get_primary_output() 87 if not output: 88 logger.error("Failed to get a default output") 89 return "", "" 90 current_mode = output.get_current_mode() 91 return str(current_mode.get_width()), str(current_mode.get_height()) 92 93 @staticmethod 94 def set_resolution(resolution): 95 """Set the resolution of one or more displays. 96 The resolution can either be a string, which will be applied to the 97 primary display or a list of configurations as returned by `get_config`. 98 This method uses XrandR and will not work on Wayland. 99 """ 100 return change_resolution(resolution) 101 102 @staticmethod 103 def get_config(): 104 """Return the current display resolution 105 This method uses XrandR and will not work on wayland 106 The output can be fed in `set_resolution` 107 """ 108 return get_outputs() 109 110 111 def get_display_manager(): 112 """Return the appropriate display manager instance. 113 Defaults to Mutter if available. This is the only one to support Wayland. 114 """ 115 if DBUS_AVAILABLE: 116 try: 117 return MutterDisplayManager() 118 except DBusException as ex: 119 logger.debug("Mutter DBus service not reachable: %s", ex) 120 except Exception as ex: # pylint: disable=broad-except 121 logger.exception("Failed to instanciate MutterDisplayConfig. Please report with exception: %s", ex) 122 else: 123 logger.error("DBus is not available, lutris was not properly installed.") 124 try: 125 return DisplayManager() 126 except (GLib.Error, NoScreenDetected): 127 return LegacyDisplayManager() 128 129 130 DISPLAY_MANAGER = get_display_manager() 131 USE_DRI_PRIME = len(_get_graphics_adapters()) > 1 132 133 134 class DesktopEnvironment(enum.Enum): 135 136 """Enum of desktop environments.""" 137 138 PLASMA = 0 139 MATE = 1 140 XFCE = 2 141 DEEPIN = 3 142 UNKNOWN = 999 143 144 145 def get_desktop_environment(): 146 """Converts the value of the DESKTOP_SESSION environment variable 147 to one of the constants in the DesktopEnvironment class. 148 Returns None if DESKTOP_SESSION is empty or unset. 149 """ 150 desktop_session = os.environ.get("DESKTOP_SESSION", "").lower() 151 if not desktop_session: 152 return None 153 if desktop_session.endswith("plasma"): 154 return DesktopEnvironment.PLASMA 155 if desktop_session.endswith("mate"): 156 return DesktopEnvironment.MATE 157 if desktop_session.endswith("xfce"): 158 return DesktopEnvironment.XFCE 159 if desktop_session.endswith("deepin"): 160 return DesktopEnvironment.DEEPIN 161 return DesktopEnvironment.UNKNOWN 162 163 164 def _get_command_output(*command): 165 return subprocess.Popen(command, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, close_fds=True).communicate()[0] 166 167 168 def is_compositing_enabled(): 169 """Checks whether compositing is currently disabled or enabled. 170 Returns True for enabled, False for disabled, and None if unknown. 171 """ 172 desktop_environment = get_desktop_environment() 173 if desktop_environment is DesktopEnvironment.PLASMA: 174 return _get_command_output( 175 "qdbus", "org.kde.KWin", "/Compositor", "org.kde.kwin.Compositing.active" 176 ) == b"true\n" 177 if desktop_environment is DesktopEnvironment.MATE: 178 return _get_command_output("gsettings", "get org.mate.Marco.general", "compositing-manager") == b"true\n" 179 if desktop_environment is DesktopEnvironment.XFCE: 180 return _get_command_output( 181 "xfconf-query", "--channel=xfwm4", "--property=/general/use_compositing" 182 ) == b"true\n" 183 if desktop_environment is DesktopEnvironment.DEEPIN: 184 return _get_command_output( 185 "dbus-send", "--session", "--dest=com.deepin.WMSwitcher", "--type=method_call", 186 "--print-reply=literal", "/com/deepin/WMSwitcher", "com.deepin.WMSwitcher.CurrentWM" 187 ) == b"deepin wm\n" 188 return None 189 190 191 # One element is appended to this for every invocation of disable_compositing: 192 # True if compositing has been disabled, False if not. enable_compositing 193 # removes the last element, and only re-enables compositing if that element 194 # was True. 195 _COMPOSITING_DISABLED_STACK = [] 196 197 198 def _get_compositor_commands(): 199 """Returns the commands to enable/disable compositing on the current 200 desktop environment as a 2-tuple. 201 """ 202 start_compositor = None 203 stop_compositor = None 204 desktop_environment = get_desktop_environment() 205 if desktop_environment is DesktopEnvironment.PLASMA: 206 stop_compositor = ("qdbus", "org.kde.KWin", "/Compositor", "org.kde.kwin.Compositing.suspend") 207 start_compositor = ("qdbus", "org.kde.KWin", "/Compositor", "org.kde.kwin.Compositing.resume") 208 elif desktop_environment is DesktopEnvironment.MATE: 209 stop_compositor = ("gsettings", "set org.mate.Marco.general", "compositing-manager", "false") 210 start_compositor = ("gsettings", "set org.mate.Marco.general", "compositing-manager", "true") 211 elif desktop_environment is DesktopEnvironment.XFCE: 212 stop_compositor = ("xfconf-query", "--channel=xfwm4", "--property=/general/use_compositing", "--set=false") 213 start_compositor = ("xfconf-query", "--channel=xfwm4", "--property=/general/use_compositing", "--set=true") 214 elif desktop_environment is DesktopEnvironment.DEEPIN: 215 start_compositor = ( 216 "dbus-send", "--session", "--dest=com.deepin.WMSwitcher", "--type=method_call", 217 "/com/deepin/WMSwitcher", "com.deepin.WMSwitcher.RequestSwitchWM", 218 ) 219 stop_compositor = start_compositor 220 return start_compositor, stop_compositor 221 222 223 def _run_command(*command): 224 return subprocess.Popen(command, stdin=subprocess.DEVNULL, close_fds=True) 225 226 227 def disable_compositing(): 228 """Disable compositing if not already disabled.""" 229 compositing_enabled = is_compositing_enabled() 230 if compositing_enabled is None: 231 compositing_enabled = True 232 if any(_COMPOSITING_DISABLED_STACK): 233 compositing_enabled = False 234 _COMPOSITING_DISABLED_STACK.append(compositing_enabled) 235 if not compositing_enabled: 236 return 237 _, stop_compositor = _get_compositor_commands() 238 if stop_compositor: 239 _run_command(*stop_compositor) 240 241 242 def enable_compositing(): 243 """Re-enable compositing if the corresponding call to disable_compositing 244 disabled it.""" 245 compositing_disabled = _COMPOSITING_DISABLED_STACK.pop() 246 if not compositing_disabled: 247 return 248 start_compositor, _ = _get_compositor_commands() 249 if start_compositor: 250 _run_command(*start_compositor) 251 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lutris/util/display.py b/lutris/util/display.py --- a/lutris/util/display.py +++ b/lutris/util/display.py @@ -162,7 +162,16 @@ def _get_command_output(*command): - return subprocess.Popen(command, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, close_fds=True).communicate()[0] + """Some rogue function that gives no shit about residing in the correct module""" + try: + return subprocess.Popen( + command, + stdin=subprocess.DEVNULL, + stdout=subprocess.PIPE, + close_fds=True + ).communicate()[0] + except FileNotFoundError: + logger.error("Unable to run command, %s not found", command[0]) def is_compositing_enabled(): @@ -221,7 +230,13 @@ def _run_command(*command): - return subprocess.Popen(command, stdin=subprocess.DEVNULL, close_fds=True) + """Random _run_command lost in the middle of the project, + are you lost little _run_command? + """ + try: + return subprocess.Popen(command, stdin=subprocess.DEVNULL, close_fds=True) + except FileNotFoundError: + logger.error("Oh no") def disable_compositing():
{"golden_diff": "diff --git a/lutris/util/display.py b/lutris/util/display.py\n--- a/lutris/util/display.py\n+++ b/lutris/util/display.py\n@@ -162,7 +162,16 @@\n \n \n def _get_command_output(*command):\n- return subprocess.Popen(command, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, close_fds=True).communicate()[0]\n+ \"\"\"Some rogue function that gives no shit about residing in the correct module\"\"\"\n+ try:\n+ return subprocess.Popen(\n+ command,\n+ stdin=subprocess.DEVNULL,\n+ stdout=subprocess.PIPE,\n+ close_fds=True\n+ ).communicate()[0]\n+ except FileNotFoundError:\n+ logger.error(\"Unable to run command, %s not found\", command[0])\n \n \n def is_compositing_enabled():\n@@ -221,7 +230,13 @@\n \n \n def _run_command(*command):\n- return subprocess.Popen(command, stdin=subprocess.DEVNULL, close_fds=True)\n+ \"\"\"Random _run_command lost in the middle of the project,\n+ are you lost little _run_command?\n+ \"\"\"\n+ try:\n+ return subprocess.Popen(command, stdin=subprocess.DEVNULL, close_fds=True)\n+ except FileNotFoundError:\n+ logger.error(\"Oh no\")\n \n \n def disable_compositing():\n", "issue": "After the Lutris update, no games start.\nOn the previous version 0.5.6, all games were launched.\r\nAfter updating Lutris to version 0.5.7-1, **not a single game starts if \"Disable desktop effects\" is enabled in preference**\r\nHere is what the console writes. **FileNotFoundError: [Errno 2] No such file or directory: 'qdbus'**\r\nArchlinux.\r\npython-dbus is installed\r\n![image](https://user-images.githubusercontent.com/67700024/86579374-e878ae00-bf85-11ea-9528-60aeeb460fca.png)\r\n\n", "before_files": [{"content": "\"\"\"Module to deal with various aspects of displays\"\"\"\n# isort:skip_file\nimport enum\nimport os\nimport subprocess\n\ntry:\n from dbus.exceptions import DBusException\n DBUS_AVAILABLE = True\nexcept ImportError:\n DBUS_AVAILABLE = False\n\nfrom gi.repository import Gdk, GLib, GnomeDesktop\n\nfrom lutris.util import system\nfrom lutris.util.graphics.displayconfig import MutterDisplayManager\nfrom lutris.util.graphics.xrandr import LegacyDisplayManager, change_resolution, get_outputs\nfrom lutris.util.log import logger\n\n\nclass NoScreenDetected(Exception):\n\n \"\"\"Raise this when unable to detect screens\"\"\"\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n xgamma_path = system.find_executable(\"xgamma\")\n try:\n subprocess.Popen([xgamma_path, \"-gamma\", \"1.0\"])\n except (FileNotFoundError, TypeError):\n logger.warning(\"xgamma is not available on your system\")\n except PermissionError:\n logger.warning(\"you do not have permission to call xgamma\")\n\n\ndef _get_graphics_adapters():\n \"\"\"Return the list of graphics cards available on a system\n\n Returns:\n list: list of tuples containing PCI ID and description of the display controller\n \"\"\"\n lspci_path = system.find_executable(\"lspci\")\n dev_subclasses = [\"VGA\", \"XGA\", \"3D controller\", \"Display controller\"]\n if not lspci_path:\n logger.warning(\"lspci is not available. List of graphics cards not available\")\n return []\n return [\n (pci_id, device_desc.split(\": \")[1]) for pci_id, device_desc in [\n line.split(maxsplit=1) for line in system.execute(lspci_path, timeout=3).split(\"\\n\")\n if any(subclass in line for subclass in dev_subclasses)\n ]\n ]\n\n\nclass DisplayManager:\n\n \"\"\"Get display and resolution using GnomeDesktop\"\"\"\n\n def __init__(self):\n screen = Gdk.Screen.get_default()\n if not screen:\n raise NoScreenDetected\n self.rr_screen = GnomeDesktop.RRScreen.new(screen)\n self.rr_config = GnomeDesktop.RRConfig.new_current(self.rr_screen)\n self.rr_config.load_current()\n\n def get_display_names(self):\n \"\"\"Return names of connected displays\"\"\"\n return [output_info.get_display_name() for output_info in self.rr_config.get_outputs()]\n\n def get_resolutions(self):\n \"\"\"Return available resolutions\"\"\"\n resolutions = [\"%sx%s\" % (mode.get_width(), mode.get_height()) for mode in self.rr_screen.list_modes()]\n return sorted(set(resolutions), key=lambda x: int(x.split(\"x\")[0]), reverse=True)\n\n def _get_primary_output(self):\n \"\"\"Return the RROutput used as a primary display\"\"\"\n for output in self.rr_screen.list_outputs():\n if output.get_is_primary():\n return output\n return\n\n def get_current_resolution(self):\n \"\"\"Return the current resolution for the primary display\"\"\"\n output = self._get_primary_output()\n if not output:\n logger.error(\"Failed to get a default output\")\n return \"\", \"\"\n current_mode = output.get_current_mode()\n return str(current_mode.get_width()), str(current_mode.get_height())\n\n @staticmethod\n def set_resolution(resolution):\n \"\"\"Set the resolution of one or more displays.\n The resolution can either be a string, which will be applied to the\n primary display or a list of configurations as returned by `get_config`.\n This method uses XrandR and will not work on Wayland.\n \"\"\"\n return change_resolution(resolution)\n\n @staticmethod\n def get_config():\n \"\"\"Return the current display resolution\n This method uses XrandR and will not work on wayland\n The output can be fed in `set_resolution`\n \"\"\"\n return get_outputs()\n\n\ndef get_display_manager():\n \"\"\"Return the appropriate display manager instance.\n Defaults to Mutter if available. This is the only one to support Wayland.\n \"\"\"\n if DBUS_AVAILABLE:\n try:\n return MutterDisplayManager()\n except DBusException as ex:\n logger.debug(\"Mutter DBus service not reachable: %s\", ex)\n except Exception as ex: # pylint: disable=broad-except\n logger.exception(\"Failed to instanciate MutterDisplayConfig. Please report with exception: %s\", ex)\n else:\n logger.error(\"DBus is not available, lutris was not properly installed.\")\n try:\n return DisplayManager()\n except (GLib.Error, NoScreenDetected):\n return LegacyDisplayManager()\n\n\nDISPLAY_MANAGER = get_display_manager()\nUSE_DRI_PRIME = len(_get_graphics_adapters()) > 1\n\n\nclass DesktopEnvironment(enum.Enum):\n\n \"\"\"Enum of desktop environments.\"\"\"\n\n PLASMA = 0\n MATE = 1\n XFCE = 2\n DEEPIN = 3\n UNKNOWN = 999\n\n\ndef get_desktop_environment():\n \"\"\"Converts the value of the DESKTOP_SESSION environment variable\n to one of the constants in the DesktopEnvironment class.\n Returns None if DESKTOP_SESSION is empty or unset.\n \"\"\"\n desktop_session = os.environ.get(\"DESKTOP_SESSION\", \"\").lower()\n if not desktop_session:\n return None\n if desktop_session.endswith(\"plasma\"):\n return DesktopEnvironment.PLASMA\n if desktop_session.endswith(\"mate\"):\n return DesktopEnvironment.MATE\n if desktop_session.endswith(\"xfce\"):\n return DesktopEnvironment.XFCE\n if desktop_session.endswith(\"deepin\"):\n return DesktopEnvironment.DEEPIN\n return DesktopEnvironment.UNKNOWN\n\n\ndef _get_command_output(*command):\n return subprocess.Popen(command, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, close_fds=True).communicate()[0]\n\n\ndef is_compositing_enabled():\n \"\"\"Checks whether compositing is currently disabled or enabled.\n Returns True for enabled, False for disabled, and None if unknown.\n \"\"\"\n desktop_environment = get_desktop_environment()\n if desktop_environment is DesktopEnvironment.PLASMA:\n return _get_command_output(\n \"qdbus\", \"org.kde.KWin\", \"/Compositor\", \"org.kde.kwin.Compositing.active\"\n ) == b\"true\\n\"\n if desktop_environment is DesktopEnvironment.MATE:\n return _get_command_output(\"gsettings\", \"get org.mate.Marco.general\", \"compositing-manager\") == b\"true\\n\"\n if desktop_environment is DesktopEnvironment.XFCE:\n return _get_command_output(\n \"xfconf-query\", \"--channel=xfwm4\", \"--property=/general/use_compositing\"\n ) == b\"true\\n\"\n if desktop_environment is DesktopEnvironment.DEEPIN:\n return _get_command_output(\n \"dbus-send\", \"--session\", \"--dest=com.deepin.WMSwitcher\", \"--type=method_call\",\n \"--print-reply=literal\", \"/com/deepin/WMSwitcher\", \"com.deepin.WMSwitcher.CurrentWM\"\n ) == b\"deepin wm\\n\"\n return None\n\n\n# One element is appended to this for every invocation of disable_compositing:\n# True if compositing has been disabled, False if not. enable_compositing\n# removes the last element, and only re-enables compositing if that element\n# was True.\n_COMPOSITING_DISABLED_STACK = []\n\n\ndef _get_compositor_commands():\n \"\"\"Returns the commands to enable/disable compositing on the current\n desktop environment as a 2-tuple.\n \"\"\"\n start_compositor = None\n stop_compositor = None\n desktop_environment = get_desktop_environment()\n if desktop_environment is DesktopEnvironment.PLASMA:\n stop_compositor = (\"qdbus\", \"org.kde.KWin\", \"/Compositor\", \"org.kde.kwin.Compositing.suspend\")\n start_compositor = (\"qdbus\", \"org.kde.KWin\", \"/Compositor\", \"org.kde.kwin.Compositing.resume\")\n elif desktop_environment is DesktopEnvironment.MATE:\n stop_compositor = (\"gsettings\", \"set org.mate.Marco.general\", \"compositing-manager\", \"false\")\n start_compositor = (\"gsettings\", \"set org.mate.Marco.general\", \"compositing-manager\", \"true\")\n elif desktop_environment is DesktopEnvironment.XFCE:\n stop_compositor = (\"xfconf-query\", \"--channel=xfwm4\", \"--property=/general/use_compositing\", \"--set=false\")\n start_compositor = (\"xfconf-query\", \"--channel=xfwm4\", \"--property=/general/use_compositing\", \"--set=true\")\n elif desktop_environment is DesktopEnvironment.DEEPIN:\n start_compositor = (\n \"dbus-send\", \"--session\", \"--dest=com.deepin.WMSwitcher\", \"--type=method_call\",\n \"/com/deepin/WMSwitcher\", \"com.deepin.WMSwitcher.RequestSwitchWM\",\n )\n stop_compositor = start_compositor\n return start_compositor, stop_compositor\n\n\ndef _run_command(*command):\n return subprocess.Popen(command, stdin=subprocess.DEVNULL, close_fds=True)\n\n\ndef disable_compositing():\n \"\"\"Disable compositing if not already disabled.\"\"\"\n compositing_enabled = is_compositing_enabled()\n if compositing_enabled is None:\n compositing_enabled = True\n if any(_COMPOSITING_DISABLED_STACK):\n compositing_enabled = False\n _COMPOSITING_DISABLED_STACK.append(compositing_enabled)\n if not compositing_enabled:\n return\n _, stop_compositor = _get_compositor_commands()\n if stop_compositor:\n _run_command(*stop_compositor)\n\n\ndef enable_compositing():\n \"\"\"Re-enable compositing if the corresponding call to disable_compositing\n disabled it.\"\"\"\n compositing_disabled = _COMPOSITING_DISABLED_STACK.pop()\n if not compositing_disabled:\n return\n start_compositor, _ = _get_compositor_commands()\n if start_compositor:\n _run_command(*start_compositor)\n", "path": "lutris/util/display.py"}], "after_files": [{"content": "\"\"\"Module to deal with various aspects of displays\"\"\"\n# isort:skip_file\nimport enum\nimport os\nimport subprocess\n\ntry:\n from dbus.exceptions import DBusException\n DBUS_AVAILABLE = True\nexcept ImportError:\n DBUS_AVAILABLE = False\n\nfrom gi.repository import Gdk, GLib, GnomeDesktop\n\nfrom lutris.util import system\nfrom lutris.util.graphics.displayconfig import MutterDisplayManager\nfrom lutris.util.graphics.xrandr import LegacyDisplayManager, change_resolution, get_outputs\nfrom lutris.util.log import logger\n\n\nclass NoScreenDetected(Exception):\n\n \"\"\"Raise this when unable to detect screens\"\"\"\n\n\ndef restore_gamma():\n \"\"\"Restores gamma to a normal level.\"\"\"\n xgamma_path = system.find_executable(\"xgamma\")\n try:\n subprocess.Popen([xgamma_path, \"-gamma\", \"1.0\"])\n except (FileNotFoundError, TypeError):\n logger.warning(\"xgamma is not available on your system\")\n except PermissionError:\n logger.warning(\"you do not have permission to call xgamma\")\n\n\ndef _get_graphics_adapters():\n \"\"\"Return the list of graphics cards available on a system\n\n Returns:\n list: list of tuples containing PCI ID and description of the display controller\n \"\"\"\n lspci_path = system.find_executable(\"lspci\")\n dev_subclasses = [\"VGA\", \"XGA\", \"3D controller\", \"Display controller\"]\n if not lspci_path:\n logger.warning(\"lspci is not available. List of graphics cards not available\")\n return []\n return [\n (pci_id, device_desc.split(\": \")[1]) for pci_id, device_desc in [\n line.split(maxsplit=1) for line in system.execute(lspci_path, timeout=3).split(\"\\n\")\n if any(subclass in line for subclass in dev_subclasses)\n ]\n ]\n\n\nclass DisplayManager:\n\n \"\"\"Get display and resolution using GnomeDesktop\"\"\"\n\n def __init__(self):\n screen = Gdk.Screen.get_default()\n if not screen:\n raise NoScreenDetected\n self.rr_screen = GnomeDesktop.RRScreen.new(screen)\n self.rr_config = GnomeDesktop.RRConfig.new_current(self.rr_screen)\n self.rr_config.load_current()\n\n def get_display_names(self):\n \"\"\"Return names of connected displays\"\"\"\n return [output_info.get_display_name() for output_info in self.rr_config.get_outputs()]\n\n def get_resolutions(self):\n \"\"\"Return available resolutions\"\"\"\n resolutions = [\"%sx%s\" % (mode.get_width(), mode.get_height()) for mode in self.rr_screen.list_modes()]\n return sorted(set(resolutions), key=lambda x: int(x.split(\"x\")[0]), reverse=True)\n\n def _get_primary_output(self):\n \"\"\"Return the RROutput used as a primary display\"\"\"\n for output in self.rr_screen.list_outputs():\n if output.get_is_primary():\n return output\n return\n\n def get_current_resolution(self):\n \"\"\"Return the current resolution for the primary display\"\"\"\n output = self._get_primary_output()\n if not output:\n logger.error(\"Failed to get a default output\")\n return \"\", \"\"\n current_mode = output.get_current_mode()\n return str(current_mode.get_width()), str(current_mode.get_height())\n\n @staticmethod\n def set_resolution(resolution):\n \"\"\"Set the resolution of one or more displays.\n The resolution can either be a string, which will be applied to the\n primary display or a list of configurations as returned by `get_config`.\n This method uses XrandR and will not work on Wayland.\n \"\"\"\n return change_resolution(resolution)\n\n @staticmethod\n def get_config():\n \"\"\"Return the current display resolution\n This method uses XrandR and will not work on wayland\n The output can be fed in `set_resolution`\n \"\"\"\n return get_outputs()\n\n\ndef get_display_manager():\n \"\"\"Return the appropriate display manager instance.\n Defaults to Mutter if available. This is the only one to support Wayland.\n \"\"\"\n if DBUS_AVAILABLE:\n try:\n return MutterDisplayManager()\n except DBusException as ex:\n logger.debug(\"Mutter DBus service not reachable: %s\", ex)\n except Exception as ex: # pylint: disable=broad-except\n logger.exception(\"Failed to instanciate MutterDisplayConfig. Please report with exception: %s\", ex)\n else:\n logger.error(\"DBus is not available, lutris was not properly installed.\")\n try:\n return DisplayManager()\n except (GLib.Error, NoScreenDetected):\n return LegacyDisplayManager()\n\n\nDISPLAY_MANAGER = get_display_manager()\nUSE_DRI_PRIME = len(_get_graphics_adapters()) > 1\n\n\nclass DesktopEnvironment(enum.Enum):\n\n \"\"\"Enum of desktop environments.\"\"\"\n\n PLASMA = 0\n MATE = 1\n XFCE = 2\n DEEPIN = 3\n UNKNOWN = 999\n\n\ndef get_desktop_environment():\n \"\"\"Converts the value of the DESKTOP_SESSION environment variable\n to one of the constants in the DesktopEnvironment class.\n Returns None if DESKTOP_SESSION is empty or unset.\n \"\"\"\n desktop_session = os.environ.get(\"DESKTOP_SESSION\", \"\").lower()\n if not desktop_session:\n return None\n if desktop_session.endswith(\"plasma\"):\n return DesktopEnvironment.PLASMA\n if desktop_session.endswith(\"mate\"):\n return DesktopEnvironment.MATE\n if desktop_session.endswith(\"xfce\"):\n return DesktopEnvironment.XFCE\n if desktop_session.endswith(\"deepin\"):\n return DesktopEnvironment.DEEPIN\n return DesktopEnvironment.UNKNOWN\n\n\ndef _get_command_output(*command):\n \"\"\"Some rogue function that gives no shit about residing in the correct module\"\"\"\n try:\n return subprocess.Popen(\n command,\n stdin=subprocess.DEVNULL,\n stdout=subprocess.PIPE,\n close_fds=True\n ).communicate()[0]\n except FileNotFoundError:\n logger.error(\"Unable to run command, %s not found\", command[0])\n\n\ndef is_compositing_enabled():\n \"\"\"Checks whether compositing is currently disabled or enabled.\n Returns True for enabled, False for disabled, and None if unknown.\n \"\"\"\n desktop_environment = get_desktop_environment()\n if desktop_environment is DesktopEnvironment.PLASMA:\n return _get_command_output(\n \"qdbus\", \"org.kde.KWin\", \"/Compositor\", \"org.kde.kwin.Compositing.active\"\n ) == b\"true\\n\"\n if desktop_environment is DesktopEnvironment.MATE:\n return _get_command_output(\"gsettings\", \"get org.mate.Marco.general\", \"compositing-manager\") == b\"true\\n\"\n if desktop_environment is DesktopEnvironment.XFCE:\n return _get_command_output(\n \"xfconf-query\", \"--channel=xfwm4\", \"--property=/general/use_compositing\"\n ) == b\"true\\n\"\n if desktop_environment is DesktopEnvironment.DEEPIN:\n return _get_command_output(\n \"dbus-send\", \"--session\", \"--dest=com.deepin.WMSwitcher\", \"--type=method_call\",\n \"--print-reply=literal\", \"/com/deepin/WMSwitcher\", \"com.deepin.WMSwitcher.CurrentWM\"\n ) == b\"deepin wm\\n\"\n return None\n\n\n# One element is appended to this for every invocation of disable_compositing:\n# True if compositing has been disabled, False if not. enable_compositing\n# removes the last element, and only re-enables compositing if that element\n# was True.\n_COMPOSITING_DISABLED_STACK = []\n\n\ndef _get_compositor_commands():\n \"\"\"Returns the commands to enable/disable compositing on the current\n desktop environment as a 2-tuple.\n \"\"\"\n start_compositor = None\n stop_compositor = None\n desktop_environment = get_desktop_environment()\n if desktop_environment is DesktopEnvironment.PLASMA:\n stop_compositor = (\"qdbus\", \"org.kde.KWin\", \"/Compositor\", \"org.kde.kwin.Compositing.suspend\")\n start_compositor = (\"qdbus\", \"org.kde.KWin\", \"/Compositor\", \"org.kde.kwin.Compositing.resume\")\n elif desktop_environment is DesktopEnvironment.MATE:\n stop_compositor = (\"gsettings\", \"set org.mate.Marco.general\", \"compositing-manager\", \"false\")\n start_compositor = (\"gsettings\", \"set org.mate.Marco.general\", \"compositing-manager\", \"true\")\n elif desktop_environment is DesktopEnvironment.XFCE:\n stop_compositor = (\"xfconf-query\", \"--channel=xfwm4\", \"--property=/general/use_compositing\", \"--set=false\")\n start_compositor = (\"xfconf-query\", \"--channel=xfwm4\", \"--property=/general/use_compositing\", \"--set=true\")\n elif desktop_environment is DesktopEnvironment.DEEPIN:\n start_compositor = (\n \"dbus-send\", \"--session\", \"--dest=com.deepin.WMSwitcher\", \"--type=method_call\",\n \"/com/deepin/WMSwitcher\", \"com.deepin.WMSwitcher.RequestSwitchWM\",\n )\n stop_compositor = start_compositor\n return start_compositor, stop_compositor\n\n\ndef _run_command(*command):\n \"\"\"Random _run_command lost in the middle of the project,\n are you lost little _run_command?\n \"\"\"\n try:\n return subprocess.Popen(command, stdin=subprocess.DEVNULL, close_fds=True)\n except FileNotFoundError:\n logger.error(\"Oh no\")\n\n\ndef disable_compositing():\n \"\"\"Disable compositing if not already disabled.\"\"\"\n compositing_enabled = is_compositing_enabled()\n if compositing_enabled is None:\n compositing_enabled = True\n if any(_COMPOSITING_DISABLED_STACK):\n compositing_enabled = False\n _COMPOSITING_DISABLED_STACK.append(compositing_enabled)\n if not compositing_enabled:\n return\n _, stop_compositor = _get_compositor_commands()\n if stop_compositor:\n _run_command(*stop_compositor)\n\n\ndef enable_compositing():\n \"\"\"Re-enable compositing if the corresponding call to disable_compositing\n disabled it.\"\"\"\n compositing_disabled = _COMPOSITING_DISABLED_STACK.pop()\n if not compositing_disabled:\n return\n start_compositor, _ = _get_compositor_commands()\n if start_compositor:\n _run_command(*start_compositor)\n", "path": "lutris/util/display.py"}]}
3,242
286
gh_patches_debug_19054
rasdani/github-patches
git_diff
cupy__cupy-6948
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- python primitive scalar float fails as jit.rawkernel argument ### Description Passing python primitive float as a jit.rawkernel argument seems not to work. Numpy.float32 works, python float does not. ### To Reproduce ```py @jit.rawkernel() def scalar_multiply(a, m, size): tid = jit.blockIdx.x * jit.blockDim.x + jit.threadIdx.x ntid = jit.gridDim.x * jit.blockDim.x for i in range(tid, size, ntid): a[i] = a[i] * m size = 5 a = cp.random.random(size, dtype=np.float32) print(a) scalar_multiply((128,),(1024,),(a, np.float32(5.0), size)) print(a) scalar_multiply((128,),(1024,),(a, 5.0, size)) print(a) ``` output: [0.17959814 0.42873758 0.77541053 0.8213136 0.8284943 ] [0.8979907 2.143688 3.8770528 4.1065683 4.1424713] [0. 0. 0. 0. 0.] ### Installation Wheel ### Environment ``` OS : Linux-5.15.0-41-generic-x86_64-with-glibc2.29 Python Version : 3.8.10 CuPy Version : 11.0.0 CuPy Platform : NVIDIA CUDA NumPy Version : 1.22.0 SciPy Version : 1.4.1 Cython Build Version : 0.29.24 Cython Runtime Version : 0.29.28 CUDA Root : /usr/local/cuda nvcc PATH : /usr/local/cuda/bin/nvcc CUDA Build Version : 11070 CUDA Driver Version : 11070 CUDA Runtime Version : 11070 cuBLAS Version : (available) cuFFT Version : 10702 cuRAND Version : 10210 cuSOLVER Version : (11, 3, 5) cuSPARSE Version : (available) NVRTC Version : (11, 7) Thrust Version : 101500 CUB Build Version : 101500 Jitify Build Version : 4a37de0 cuDNN Build Version : 8400 cuDNN Version : 8401 NCCL Build Version : None NCCL Runtime Version : None cuTENSOR Version : None cuSPARSELt Build Version : None Device 0 Name : NVIDIA GeForce GTX 950 Device 0 Compute Capability : 52 Device 0 PCI Bus ID : 0000:01:00.0 ``` ### Additional Information Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupyx/jit/_interface.py` Content: ``` 1 import functools 2 import warnings 3 4 import numpy 5 6 from cupy_backends.cuda.api import runtime 7 import cupy 8 from cupy._core import core 9 from cupyx.jit import _compile 10 from cupyx.jit import _cuda_typerules 11 from cupyx.jit import _cuda_types 12 from cupyx.jit import _internal_types 13 14 15 class _CudaFunction: 16 """JIT cupy function object 17 """ 18 19 def __init__(self, func, mode, device=False, inline=False): 20 self.attributes = [] 21 22 if device: 23 self.attributes.append('__device__') 24 else: 25 self.attributes.append('__global__') 26 27 if inline: 28 self.attributes.append('inline') 29 30 self.name = getattr(func, 'name', func.__name__) 31 self.func = func 32 self.mode = mode 33 34 def __call__(self, *args, **kwargs): 35 raise NotImplementedError 36 37 def _emit_code_from_types(self, in_types, ret_type=None): 38 return _compile.transpile( 39 self.func, self.attributes, self.mode, in_types, ret_type) 40 41 42 class _JitRawKernel: 43 """JIT CUDA kernel object. 44 45 The decorator :func:``cupyx.jit.rawkernel`` converts the target function 46 to an object of this class. This class is not inteded to be instantiated 47 by users. 48 """ 49 50 def __init__(self, func, mode, device): 51 self._func = func 52 self._mode = mode 53 self._device = device 54 self._cache = {} 55 self._cached_codes = {} 56 57 def __call__( 58 self, grid, block, args, shared_mem=0, stream=None): 59 """Calls the CUDA kernel. 60 61 The compilation will be deferred until the first function call. 62 CuPy's JIT compiler infers the types of arguments at the call 63 time, and will cache the compiled kernels for speeding up any 64 subsequent calls. 65 66 Args: 67 grid (tuple of int): Size of grid in blocks. 68 block (tuple of int): Dimensions of each thread block. 69 args (tuple): 70 Arguments of the kernel. The type of all elements must be 71 ``bool``, ``int``, ``float``, ``complex``, NumPy scalar or 72 ``cupy.ndarray``. 73 shared_mem (int): 74 Dynamic shared-memory size per thread block in bytes. 75 stream (cupy.cuda.Stream): CUDA stream. 76 77 .. seealso:: :ref:`jit_kernel_definition` 78 """ 79 in_types = [] 80 for x in args: 81 if isinstance(x, cupy.ndarray): 82 t = _cuda_types.CArray.from_ndarray(x) 83 elif numpy.isscalar(x): 84 t = _cuda_typerules.get_ctype_from_scalar(self._mode, x) 85 else: 86 raise TypeError(f'{type(x)} is not supported for RawKernel') 87 in_types.append(t) 88 in_types = tuple(in_types) 89 device_id = cupy.cuda.get_device_id() 90 91 kern, enable_cg = self._cache.get((in_types, device_id), (None, None)) 92 if kern is None: 93 result = self._cached_codes.get(in_types) 94 if result is None: 95 result = _compile.transpile( 96 self._func, 97 ['extern "C"', '__global__'], 98 self._mode, 99 in_types, 100 _cuda_types.void, 101 ) 102 self._cached_codes[in_types] = result 103 104 fname = result.func_name 105 enable_cg = result.enable_cooperative_groups 106 # workaround for hipRTC: as of ROCm 4.1.0 hipRTC still does not 107 # recognize "-D", so we have to compile using hipcc... 108 backend = 'nvcc' if runtime.is_hip else 'nvrtc' 109 module = core.compile_with_cache( 110 source=result.code, 111 options=('-DCUPY_JIT_MODE', '--std=c++14'), 112 backend=backend) 113 kern = module.get_function(fname) 114 self._cache[(in_types, device_id)] = (kern, enable_cg) 115 116 kern(grid, block, args, shared_mem, stream, enable_cg) 117 118 def __getitem__(self, grid_and_block): 119 """Numba-style kernel call. 120 121 .. seealso:: :ref:`jit_kernel_definition` 122 """ 123 grid, block = grid_and_block 124 if not isinstance(grid, tuple): 125 grid = (grid, 1, 1) 126 if not isinstance(block, tuple): 127 block = (block, 1, 1) 128 return lambda *args, **kwargs: self(grid, block, args, **kwargs) 129 130 @property 131 def cached_codes(self): 132 """Returns a dict that has input types as keys and codes values. 133 134 This proprety method is for debugging purpose. 135 The return value is not guaranteed to keep backward compatibility. 136 """ 137 if len(self._cached_codes) == 0: 138 warnings.warn( 139 'No codes are cached because compilation is deferred until ' 140 'the first function call.') 141 return dict([(k, v.code) for k, v in self._cached_codes.items()]) 142 143 @property 144 def cached_code(self): 145 """Returns `next(iter(self.cached_codes.values()))`. 146 147 This proprety method is for debugging purpose. 148 The return value is not guaranteed to keep backward compatibility. 149 """ 150 codes = self.cached_codes 151 if len(codes) > 1: 152 warnings.warn( 153 'The input types of the kernel could not be inferred. ' 154 'Please use `.cached_codes` instead.') 155 return next(iter(codes.values())) 156 157 158 def rawkernel(*, mode='cuda', device=False): 159 """A decorator compiles a Python function into CUDA kernel. 160 """ 161 cupy._util.experimental('cupyx.jit.rawkernel') 162 163 def wrapper(func): 164 return functools.update_wrapper( 165 _JitRawKernel(func, mode, device), func) 166 return wrapper 167 168 169 threadIdx = _internal_types.Data('threadIdx', _cuda_types.dim3) 170 blockDim = _internal_types.Data('blockDim', _cuda_types.dim3) 171 blockIdx = _internal_types.Data('blockIdx', _cuda_types.dim3) 172 gridDim = _internal_types.Data('gridDim', _cuda_types.dim3) 173 174 warpsize = _internal_types.Data( 175 '64' if runtime.is_hip else '32', _cuda_types.uint32) 176 warpsize.__doc__ = r"""Returns the number of threads in a warp. 177 178 In CUDA this is always 32, and in ROCm/HIP always 64. 179 180 .. seealso:: :obj:`numba.cuda.warpsize` 181 """ 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupyx/jit/_interface.py b/cupyx/jit/_interface.py --- a/cupyx/jit/_interface.py +++ b/cupyx/jit/_interface.py @@ -10,6 +10,7 @@ from cupyx.jit import _cuda_typerules from cupyx.jit import _cuda_types from cupyx.jit import _internal_types +from cupyx.jit._cuda_types import Scalar class _CudaFunction: @@ -113,7 +114,16 @@ kern = module.get_function(fname) self._cache[(in_types, device_id)] = (kern, enable_cg) - kern(grid, block, args, shared_mem, stream, enable_cg) + new_args = [] + for a, t in zip(args, in_types): + if isinstance(t, Scalar): + if t.dtype.char == 'e': + a = numpy.float32(a) + else: + a = t.dtype.type(a) + new_args.append(a) + + kern(grid, block, tuple(new_args), shared_mem, stream, enable_cg) def __getitem__(self, grid_and_block): """Numba-style kernel call.
{"golden_diff": "diff --git a/cupyx/jit/_interface.py b/cupyx/jit/_interface.py\n--- a/cupyx/jit/_interface.py\n+++ b/cupyx/jit/_interface.py\n@@ -10,6 +10,7 @@\n from cupyx.jit import _cuda_typerules\n from cupyx.jit import _cuda_types\n from cupyx.jit import _internal_types\n+from cupyx.jit._cuda_types import Scalar\n \n \n class _CudaFunction:\n@@ -113,7 +114,16 @@\n kern = module.get_function(fname)\n self._cache[(in_types, device_id)] = (kern, enable_cg)\n \n- kern(grid, block, args, shared_mem, stream, enable_cg)\n+ new_args = []\n+ for a, t in zip(args, in_types):\n+ if isinstance(t, Scalar):\n+ if t.dtype.char == 'e':\n+ a = numpy.float32(a)\n+ else:\n+ a = t.dtype.type(a)\n+ new_args.append(a)\n+\n+ kern(grid, block, tuple(new_args), shared_mem, stream, enable_cg)\n \n def __getitem__(self, grid_and_block):\n \"\"\"Numba-style kernel call.\n", "issue": "python primitive scalar float fails as jit.rawkernel argument\n### Description\r\n\r\nPassing python primitive float as a jit.rawkernel argument seems not to work. Numpy.float32 works, python float does not.\r\n\r\n### To Reproduce\r\n\r\n```py\r\[email protected]()\r\ndef scalar_multiply(a, m, size):\r\n tid = jit.blockIdx.x * jit.blockDim.x + jit.threadIdx.x\r\n ntid = jit.gridDim.x * jit.blockDim.x\r\n for i in range(tid, size, ntid):\r\n a[i] = a[i] * m\r\n\r\nsize = 5\r\na = cp.random.random(size, dtype=np.float32)\r\n\r\nprint(a)\r\nscalar_multiply((128,),(1024,),(a, np.float32(5.0), size))\r\nprint(a)\r\nscalar_multiply((128,),(1024,),(a, 5.0, size))\r\nprint(a)\r\n```\r\noutput:\r\n\r\n[0.17959814 0.42873758 0.77541053 0.8213136 0.8284943 ]\r\n[0.8979907 2.143688 3.8770528 4.1065683 4.1424713]\r\n[0. 0. 0. 0. 0.]\r\n\r\n### Installation\r\n\r\nWheel\r\n\r\n### Environment\r\n\r\n```\r\nOS : Linux-5.15.0-41-generic-x86_64-with-glibc2.29\r\nPython Version : 3.8.10\r\nCuPy Version : 11.0.0\r\nCuPy Platform : NVIDIA CUDA\r\nNumPy Version : 1.22.0\r\nSciPy Version : 1.4.1\r\nCython Build Version : 0.29.24\r\nCython Runtime Version : 0.29.28\r\nCUDA Root : /usr/local/cuda\r\nnvcc PATH : /usr/local/cuda/bin/nvcc\r\nCUDA Build Version : 11070\r\nCUDA Driver Version : 11070\r\nCUDA Runtime Version : 11070\r\ncuBLAS Version : (available)\r\ncuFFT Version : 10702\r\ncuRAND Version : 10210\r\ncuSOLVER Version : (11, 3, 5)\r\ncuSPARSE Version : (available)\r\nNVRTC Version : (11, 7)\r\nThrust Version : 101500\r\nCUB Build Version : 101500\r\nJitify Build Version : 4a37de0\r\ncuDNN Build Version : 8400\r\ncuDNN Version : 8401\r\nNCCL Build Version : None\r\nNCCL Runtime Version : None\r\ncuTENSOR Version : None\r\ncuSPARSELt Build Version : None\r\nDevice 0 Name : NVIDIA GeForce GTX 950\r\nDevice 0 Compute Capability : 52\r\nDevice 0 PCI Bus ID : 0000:01:00.0\r\n```\r\n\r\n\r\n### Additional Information\r\n\r\nThanks!\n", "before_files": [{"content": "import functools\nimport warnings\n\nimport numpy\n\nfrom cupy_backends.cuda.api import runtime\nimport cupy\nfrom cupy._core import core\nfrom cupyx.jit import _compile\nfrom cupyx.jit import _cuda_typerules\nfrom cupyx.jit import _cuda_types\nfrom cupyx.jit import _internal_types\n\n\nclass _CudaFunction:\n \"\"\"JIT cupy function object\n \"\"\"\n\n def __init__(self, func, mode, device=False, inline=False):\n self.attributes = []\n\n if device:\n self.attributes.append('__device__')\n else:\n self.attributes.append('__global__')\n\n if inline:\n self.attributes.append('inline')\n\n self.name = getattr(func, 'name', func.__name__)\n self.func = func\n self.mode = mode\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError\n\n def _emit_code_from_types(self, in_types, ret_type=None):\n return _compile.transpile(\n self.func, self.attributes, self.mode, in_types, ret_type)\n\n\nclass _JitRawKernel:\n \"\"\"JIT CUDA kernel object.\n\n The decorator :func:``cupyx.jit.rawkernel`` converts the target function\n to an object of this class. This class is not inteded to be instantiated\n by users.\n \"\"\"\n\n def __init__(self, func, mode, device):\n self._func = func\n self._mode = mode\n self._device = device\n self._cache = {}\n self._cached_codes = {}\n\n def __call__(\n self, grid, block, args, shared_mem=0, stream=None):\n \"\"\"Calls the CUDA kernel.\n\n The compilation will be deferred until the first function call.\n CuPy's JIT compiler infers the types of arguments at the call\n time, and will cache the compiled kernels for speeding up any\n subsequent calls.\n\n Args:\n grid (tuple of int): Size of grid in blocks.\n block (tuple of int): Dimensions of each thread block.\n args (tuple):\n Arguments of the kernel. The type of all elements must be\n ``bool``, ``int``, ``float``, ``complex``, NumPy scalar or\n ``cupy.ndarray``.\n shared_mem (int):\n Dynamic shared-memory size per thread block in bytes.\n stream (cupy.cuda.Stream): CUDA stream.\n\n .. seealso:: :ref:`jit_kernel_definition`\n \"\"\"\n in_types = []\n for x in args:\n if isinstance(x, cupy.ndarray):\n t = _cuda_types.CArray.from_ndarray(x)\n elif numpy.isscalar(x):\n t = _cuda_typerules.get_ctype_from_scalar(self._mode, x)\n else:\n raise TypeError(f'{type(x)} is not supported for RawKernel')\n in_types.append(t)\n in_types = tuple(in_types)\n device_id = cupy.cuda.get_device_id()\n\n kern, enable_cg = self._cache.get((in_types, device_id), (None, None))\n if kern is None:\n result = self._cached_codes.get(in_types)\n if result is None:\n result = _compile.transpile(\n self._func,\n ['extern \"C\"', '__global__'],\n self._mode,\n in_types,\n _cuda_types.void,\n )\n self._cached_codes[in_types] = result\n\n fname = result.func_name\n enable_cg = result.enable_cooperative_groups\n # workaround for hipRTC: as of ROCm 4.1.0 hipRTC still does not\n # recognize \"-D\", so we have to compile using hipcc...\n backend = 'nvcc' if runtime.is_hip else 'nvrtc'\n module = core.compile_with_cache(\n source=result.code,\n options=('-DCUPY_JIT_MODE', '--std=c++14'),\n backend=backend)\n kern = module.get_function(fname)\n self._cache[(in_types, device_id)] = (kern, enable_cg)\n\n kern(grid, block, args, shared_mem, stream, enable_cg)\n\n def __getitem__(self, grid_and_block):\n \"\"\"Numba-style kernel call.\n\n .. seealso:: :ref:`jit_kernel_definition`\n \"\"\"\n grid, block = grid_and_block\n if not isinstance(grid, tuple):\n grid = (grid, 1, 1)\n if not isinstance(block, tuple):\n block = (block, 1, 1)\n return lambda *args, **kwargs: self(grid, block, args, **kwargs)\n\n @property\n def cached_codes(self):\n \"\"\"Returns a dict that has input types as keys and codes values.\n\n This proprety method is for debugging purpose.\n The return value is not guaranteed to keep backward compatibility.\n \"\"\"\n if len(self._cached_codes) == 0:\n warnings.warn(\n 'No codes are cached because compilation is deferred until '\n 'the first function call.')\n return dict([(k, v.code) for k, v in self._cached_codes.items()])\n\n @property\n def cached_code(self):\n \"\"\"Returns `next(iter(self.cached_codes.values()))`.\n\n This proprety method is for debugging purpose.\n The return value is not guaranteed to keep backward compatibility.\n \"\"\"\n codes = self.cached_codes\n if len(codes) > 1:\n warnings.warn(\n 'The input types of the kernel could not be inferred. '\n 'Please use `.cached_codes` instead.')\n return next(iter(codes.values()))\n\n\ndef rawkernel(*, mode='cuda', device=False):\n \"\"\"A decorator compiles a Python function into CUDA kernel.\n \"\"\"\n cupy._util.experimental('cupyx.jit.rawkernel')\n\n def wrapper(func):\n return functools.update_wrapper(\n _JitRawKernel(func, mode, device), func)\n return wrapper\n\n\nthreadIdx = _internal_types.Data('threadIdx', _cuda_types.dim3)\nblockDim = _internal_types.Data('blockDim', _cuda_types.dim3)\nblockIdx = _internal_types.Data('blockIdx', _cuda_types.dim3)\ngridDim = _internal_types.Data('gridDim', _cuda_types.dim3)\n\nwarpsize = _internal_types.Data(\n '64' if runtime.is_hip else '32', _cuda_types.uint32)\nwarpsize.__doc__ = r\"\"\"Returns the number of threads in a warp.\n\nIn CUDA this is always 32, and in ROCm/HIP always 64.\n\n.. seealso:: :obj:`numba.cuda.warpsize`\n\"\"\"\n", "path": "cupyx/jit/_interface.py"}], "after_files": [{"content": "import functools\nimport warnings\n\nimport numpy\n\nfrom cupy_backends.cuda.api import runtime\nimport cupy\nfrom cupy._core import core\nfrom cupyx.jit import _compile\nfrom cupyx.jit import _cuda_typerules\nfrom cupyx.jit import _cuda_types\nfrom cupyx.jit import _internal_types\nfrom cupyx.jit._cuda_types import Scalar\n\n\nclass _CudaFunction:\n \"\"\"JIT cupy function object\n \"\"\"\n\n def __init__(self, func, mode, device=False, inline=False):\n self.attributes = []\n\n if device:\n self.attributes.append('__device__')\n else:\n self.attributes.append('__global__')\n\n if inline:\n self.attributes.append('inline')\n\n self.name = getattr(func, 'name', func.__name__)\n self.func = func\n self.mode = mode\n\n def __call__(self, *args, **kwargs):\n raise NotImplementedError\n\n def _emit_code_from_types(self, in_types, ret_type=None):\n return _compile.transpile(\n self.func, self.attributes, self.mode, in_types, ret_type)\n\n\nclass _JitRawKernel:\n \"\"\"JIT CUDA kernel object.\n\n The decorator :func:``cupyx.jit.rawkernel`` converts the target function\n to an object of this class. This class is not inteded to be instantiated\n by users.\n \"\"\"\n\n def __init__(self, func, mode, device):\n self._func = func\n self._mode = mode\n self._device = device\n self._cache = {}\n self._cached_codes = {}\n\n def __call__(\n self, grid, block, args, shared_mem=0, stream=None):\n \"\"\"Calls the CUDA kernel.\n\n The compilation will be deferred until the first function call.\n CuPy's JIT compiler infers the types of arguments at the call\n time, and will cache the compiled kernels for speeding up any\n subsequent calls.\n\n Args:\n grid (tuple of int): Size of grid in blocks.\n block (tuple of int): Dimensions of each thread block.\n args (tuple):\n Arguments of the kernel. The type of all elements must be\n ``bool``, ``int``, ``float``, ``complex``, NumPy scalar or\n ``cupy.ndarray``.\n shared_mem (int):\n Dynamic shared-memory size per thread block in bytes.\n stream (cupy.cuda.Stream): CUDA stream.\n\n .. seealso:: :ref:`jit_kernel_definition`\n \"\"\"\n in_types = []\n for x in args:\n if isinstance(x, cupy.ndarray):\n t = _cuda_types.CArray.from_ndarray(x)\n elif numpy.isscalar(x):\n t = _cuda_typerules.get_ctype_from_scalar(self._mode, x)\n else:\n raise TypeError(f'{type(x)} is not supported for RawKernel')\n in_types.append(t)\n in_types = tuple(in_types)\n device_id = cupy.cuda.get_device_id()\n\n kern, enable_cg = self._cache.get((in_types, device_id), (None, None))\n if kern is None:\n result = self._cached_codes.get(in_types)\n if result is None:\n result = _compile.transpile(\n self._func,\n ['extern \"C\"', '__global__'],\n self._mode,\n in_types,\n _cuda_types.void,\n )\n self._cached_codes[in_types] = result\n\n fname = result.func_name\n enable_cg = result.enable_cooperative_groups\n # workaround for hipRTC: as of ROCm 4.1.0 hipRTC still does not\n # recognize \"-D\", so we have to compile using hipcc...\n backend = 'nvcc' if runtime.is_hip else 'nvrtc'\n module = core.compile_with_cache(\n source=result.code,\n options=('-DCUPY_JIT_MODE', '--std=c++14'),\n backend=backend)\n kern = module.get_function(fname)\n self._cache[(in_types, device_id)] = (kern, enable_cg)\n\n new_args = []\n for a, t in zip(args, in_types):\n if isinstance(t, Scalar):\n if t.dtype.char == 'e':\n a = numpy.float32(a)\n else:\n a = t.dtype.type(a)\n new_args.append(a)\n\n kern(grid, block, tuple(new_args), shared_mem, stream, enable_cg)\n\n def __getitem__(self, grid_and_block):\n \"\"\"Numba-style kernel call.\n\n .. seealso:: :ref:`jit_kernel_definition`\n \"\"\"\n grid, block = grid_and_block\n if not isinstance(grid, tuple):\n grid = (grid, 1, 1)\n if not isinstance(block, tuple):\n block = (block, 1, 1)\n return lambda *args, **kwargs: self(grid, block, args, **kwargs)\n\n @property\n def cached_codes(self):\n \"\"\"Returns a dict that has input types as keys and codes values.\n\n This proprety method is for debugging purpose.\n The return value is not guaranteed to keep backward compatibility.\n \"\"\"\n if len(self._cached_codes) == 0:\n warnings.warn(\n 'No codes are cached because compilation is deferred until '\n 'the first function call.')\n return dict([(k, v.code) for k, v in self._cached_codes.items()])\n\n @property\n def cached_code(self):\n \"\"\"Returns `next(iter(self.cached_codes.values()))`.\n\n This proprety method is for debugging purpose.\n The return value is not guaranteed to keep backward compatibility.\n \"\"\"\n codes = self.cached_codes\n if len(codes) > 1:\n warnings.warn(\n 'The input types of the kernel could not be inferred. '\n 'Please use `.cached_codes` instead.')\n return next(iter(codes.values()))\n\n\ndef rawkernel(*, mode='cuda', device=False):\n \"\"\"A decorator compiles a Python function into CUDA kernel.\n \"\"\"\n cupy._util.experimental('cupyx.jit.rawkernel')\n\n def wrapper(func):\n return functools.update_wrapper(\n _JitRawKernel(func, mode, device), func)\n return wrapper\n\n\nthreadIdx = _internal_types.Data('threadIdx', _cuda_types.dim3)\nblockDim = _internal_types.Data('blockDim', _cuda_types.dim3)\nblockIdx = _internal_types.Data('blockIdx', _cuda_types.dim3)\ngridDim = _internal_types.Data('gridDim', _cuda_types.dim3)\n\nwarpsize = _internal_types.Data(\n '64' if runtime.is_hip else '32', _cuda_types.uint32)\nwarpsize.__doc__ = r\"\"\"Returns the number of threads in a warp.\n\nIn CUDA this is always 32, and in ROCm/HIP always 64.\n\n.. seealso:: :obj:`numba.cuda.warpsize`\n\"\"\"\n", "path": "cupyx/jit/_interface.py"}]}
2,873
274
gh_patches_debug_7545
rasdani/github-patches
git_diff
deeppavlov__DeepPavlov-861
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Python 3.7.0 support DeepPavlov has scikit-learn version fixed to v0.19.1, but its c-extensions build fails on python 3.7.0 (at least on macOS), please see [scikit-learn issue](https://github.com/scikit-learn/scikit-learn/issues/11320). This issue has been fixed in scikit-learn v0.19.2 release, so you have to up at least minor version to enable python 3.7.0 support. I can try python 3.7.0 compatibility of other packages and prepare a pull-request, if you want. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `deeppavlov/__init__.py` Content: ``` 1 # Copyright 2017 Neural Networks and Deep Learning lab, MIPT 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import sys 16 from pathlib import Path 17 18 from .core.common.log import init_logger 19 20 try: 21 from .configs import configs 22 # noinspection PyUnresolvedReferences 23 from .core.commands.infer import build_model 24 # noinspection PyUnresolvedReferences 25 from .core.commands.train import train_evaluate_model_from_config 26 from .download import deep_download 27 from .core.common.chainer import Chainer 28 29 # TODO: make better 30 def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer: 31 train_evaluate_model_from_config(config, download=download, recursive=recursive) 32 return build_model(config, load_trained=True) 33 34 def evaluate_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> dict: 35 return train_evaluate_model_from_config(config, to_train=False, download=download, recursive=recursive) 36 37 except ImportError: 38 'Assuming that requirements are not yet installed' 39 40 __version__ = '0.4.0' 41 __author__ = 'Neural Networks and Deep Learning lab, MIPT' 42 __description__ = 'An open source library for building end-to-end dialog systems and training chatbots.' 43 __keywords__ = ['NLP', 'NER', 'SQUAD', 'Intents', 'Chatbot'] 44 __license__ = 'Apache License, Version 2.0' 45 __email__ = '[email protected]' 46 47 # check version 48 assert sys.hexversion >= 0x3060000, 'Does not work in python3.5 or lower' 49 50 # resolve conflicts with previous DeepPavlov installations versioned up to 0.0.9 51 dot_dp_path = Path('~/.deeppavlov').expanduser().resolve() 52 if dot_dp_path.is_file(): 53 dot_dp_path.unlink() 54 55 # initiate logging 56 init_logger() 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/deeppavlov/__init__.py b/deeppavlov/__init__.py --- a/deeppavlov/__init__.py +++ b/deeppavlov/__init__.py @@ -37,7 +37,7 @@ except ImportError: 'Assuming that requirements are not yet installed' -__version__ = '0.4.0' +__version__ = '0.5.0' __author__ = 'Neural Networks and Deep Learning lab, MIPT' __description__ = 'An open source library for building end-to-end dialog systems and training chatbots.' __keywords__ = ['NLP', 'NER', 'SQUAD', 'Intents', 'Chatbot']
{"golden_diff": "diff --git a/deeppavlov/__init__.py b/deeppavlov/__init__.py\n--- a/deeppavlov/__init__.py\n+++ b/deeppavlov/__init__.py\n@@ -37,7 +37,7 @@\n except ImportError:\n 'Assuming that requirements are not yet installed'\n \n-__version__ = '0.4.0'\n+__version__ = '0.5.0'\n __author__ = 'Neural Networks and Deep Learning lab, MIPT'\n __description__ = 'An open source library for building end-to-end dialog systems and training chatbots.'\n __keywords__ = ['NLP', 'NER', 'SQUAD', 'Intents', 'Chatbot']\n", "issue": "Python 3.7.0 support\nDeepPavlov has scikit-learn version fixed to v0.19.1, but its c-extensions build fails on python 3.7.0 (at least on macOS), please see [scikit-learn issue](https://github.com/scikit-learn/scikit-learn/issues/11320).\r\n\r\nThis issue has been fixed in scikit-learn v0.19.2 release, so you have to up at least minor version to enable python 3.7.0 support.\r\n\r\nI can try python 3.7.0 compatibility of other packages and prepare a pull-request, if you want.\n", "before_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom pathlib import Path\n\nfrom .core.common.log import init_logger\n\ntry:\n from .configs import configs\n # noinspection PyUnresolvedReferences\n from .core.commands.infer import build_model\n # noinspection PyUnresolvedReferences\n from .core.commands.train import train_evaluate_model_from_config\n from .download import deep_download\n from .core.common.chainer import Chainer\n\n # TODO: make better\n def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer:\n train_evaluate_model_from_config(config, download=download, recursive=recursive)\n return build_model(config, load_trained=True)\n\n def evaluate_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> dict:\n return train_evaluate_model_from_config(config, to_train=False, download=download, recursive=recursive)\n\nexcept ImportError:\n 'Assuming that requirements are not yet installed'\n\n__version__ = '0.4.0'\n__author__ = 'Neural Networks and Deep Learning lab, MIPT'\n__description__ = 'An open source library for building end-to-end dialog systems and training chatbots.'\n__keywords__ = ['NLP', 'NER', 'SQUAD', 'Intents', 'Chatbot']\n__license__ = 'Apache License, Version 2.0'\n__email__ = '[email protected]'\n\n# check version\nassert sys.hexversion >= 0x3060000, 'Does not work in python3.5 or lower'\n\n# resolve conflicts with previous DeepPavlov installations versioned up to 0.0.9\ndot_dp_path = Path('~/.deeppavlov').expanduser().resolve()\nif dot_dp_path.is_file():\n dot_dp_path.unlink()\n\n# initiate logging\ninit_logger()\n", "path": "deeppavlov/__init__.py"}], "after_files": [{"content": "# Copyright 2017 Neural Networks and Deep Learning lab, MIPT\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\nfrom pathlib import Path\n\nfrom .core.common.log import init_logger\n\ntry:\n from .configs import configs\n # noinspection PyUnresolvedReferences\n from .core.commands.infer import build_model\n # noinspection PyUnresolvedReferences\n from .core.commands.train import train_evaluate_model_from_config\n from .download import deep_download\n from .core.common.chainer import Chainer\n\n # TODO: make better\n def train_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> Chainer:\n train_evaluate_model_from_config(config, download=download, recursive=recursive)\n return build_model(config, load_trained=True)\n\n def evaluate_model(config: [str, Path, dict], download: bool = False, recursive: bool = False) -> dict:\n return train_evaluate_model_from_config(config, to_train=False, download=download, recursive=recursive)\n\nexcept ImportError:\n 'Assuming that requirements are not yet installed'\n\n__version__ = '0.5.0'\n__author__ = 'Neural Networks and Deep Learning lab, MIPT'\n__description__ = 'An open source library for building end-to-end dialog systems and training chatbots.'\n__keywords__ = ['NLP', 'NER', 'SQUAD', 'Intents', 'Chatbot']\n__license__ = 'Apache License, Version 2.0'\n__email__ = '[email protected]'\n\n# check version\nassert sys.hexversion >= 0x3060000, 'Does not work in python3.5 or lower'\n\n# resolve conflicts with previous DeepPavlov installations versioned up to 0.0.9\ndot_dp_path = Path('~/.deeppavlov').expanduser().resolve()\nif dot_dp_path.is_file():\n dot_dp_path.unlink()\n\n# initiate logging\ninit_logger()\n", "path": "deeppavlov/__init__.py"}]}
1,061
160
gh_patches_debug_30784
rasdani/github-patches
git_diff
mozilla__bugbug-3958
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [model:bugtype] ValueError: y should be a 1d array, got an array of shape (198635, 5) instead. > I'm wondering if the error encountered during the training of the `bugtype` model is something we should investigate. > > ```bash > Traceback (most recent call last): > File "/home/promisefru/anaconda3/envs/bugbug/lib/python3.10/runpy.py", line 196, in _run_module_as_main > return _run_code(code, main_globals, None, > File "/home/promisefru/anaconda3/envs/bugbug/lib/python3.10/runpy.py", line 86, in _run_code > exec(code, run_globals) > File "/home/promisefru/mozilla/bugbug/scripts/trainer.py", line 145, in <module> > main() > File "/home/promisefru/mozilla/bugbug/scripts/trainer.py", line 141, in main > retriever.go(args) > File "/home/promisefru/mozilla/bugbug/scripts/trainer.py", line 41, in go > metrics = model_obj.train(limit=args.limit) > File "/home/promisefru/mozilla/bugbug/bugbug/model.py", line 377, in train > self.le.fit(y) > File "/home/promisefru/anaconda3/envs/bugbug/lib/python3.10/site-packages/sklearn/preprocessing/_label.py", line 98, in fit > y = column_or_1d(y, warn=True) > File "/home/promisefru/anaconda3/envs/bugbug/lib/python3.10/site-packages/sklearn/utils/validation.py", line 1156, in column_or_1d > raise ValueError( > ValueError: y should be a 1d array, got an array of shape (198635, 5) instead. > ``` _Originally posted by @PromiseFru in https://github.com/mozilla/bugbug/issues/3928#issuecomment-1875673580_ See also: https://github.com/mozilla/bugbug/pull/3823#pullrequestreview-1746981626 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bugbug/models/rcatype.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import logging 7 import re 8 9 import numpy as np 10 import xgboost 11 from sklearn.compose import ColumnTransformer 12 from sklearn.feature_extraction import DictVectorizer 13 from sklearn.multiclass import OneVsRestClassifier 14 from sklearn.pipeline import Pipeline 15 16 from bugbug import bug_features, bugzilla, feature_cleanup, utils 17 from bugbug.model import BugModel 18 19 # For the moment, rca - XYZ is treated of bugtype XYZ, 20 # so we don't need to store it in a dictionary. 21 RCA_CATEGORIES = [ 22 "requirementerror", 23 "poorarchitecture", 24 "designerror", 25 "codingerror", 26 "testingerror", 27 "externalsoftwareaffectingfirefox", 28 "performanceerror", 29 "standards", 30 "systemerror", 31 "localizationerror", 32 "memory", 33 "infrastructure/builderror", 34 "communicationissues", 35 "productdecision", 36 "undocumentedchange", 37 "cornercase", 38 ] 39 40 RCA_SUBCATEGORIES = [ 41 "codingerror-syntaxerror", 42 "codingerror-logicalerror", 43 "codingerror-semanticerror", 44 "codingerror-runtimeerror", 45 "codingerror-unhandledexceptions", 46 "codingerror-internalapiissue", 47 "codingerror-networkissue", 48 "codingerror-compatibilityissue", 49 "codingerror-other", 50 ] 51 52 logger = logging.getLogger(__name__) 53 54 55 class RCATypeModel(BugModel): 56 def __init__( 57 self, lemmatization=False, historical=False, rca_subcategories_enabled=False 58 ): 59 BugModel.__init__(self, lemmatization) 60 61 self.calculate_importance = False 62 self.rca_subcategories_enabled = rca_subcategories_enabled 63 64 # should we consider only the main category or all sub categories 65 self.RCA_TYPES = ( 66 RCA_SUBCATEGORIES + RCA_CATEGORIES 67 if rca_subcategories_enabled 68 else RCA_CATEGORIES 69 ) 70 71 self.RCA_LIST = sorted(set(self.RCA_TYPES)) 72 73 feature_extractors = [ 74 bug_features.HasSTR(), 75 bug_features.Severity(), 76 bug_features.IsCoverityIssue(), 77 bug_features.HasCrashSignature(), 78 bug_features.HasURL(), 79 bug_features.HasW3CURL(), 80 bug_features.HasGithubURL(), 81 # Ignore whiteboards that would make the ML completely skewed 82 # bug_features.whiteboard(), 83 bug_features.Patches(), 84 bug_features.Landings(), 85 bug_features.BlockedBugsNumber(), 86 bug_features.EverAffected(), 87 bug_features.AffectedThenUnaffected(), 88 bug_features.Product(), 89 bug_features.Component(), 90 ] 91 92 cleanup_functions = [ 93 feature_cleanup.url(), 94 feature_cleanup.fileref(), 95 feature_cleanup.synonyms(), 96 ] 97 98 self.extraction_pipeline = Pipeline( 99 [ 100 ( 101 "bug_extractor", 102 bug_features.BugExtractor(feature_extractors, cleanup_functions), 103 ), 104 ] 105 ) 106 107 self.clf = Pipeline( 108 [ 109 ( 110 "union", 111 ColumnTransformer( 112 [ 113 ("data", DictVectorizer(), "data"), 114 ("title", self.text_vectorizer(min_df=0.001), "title"), 115 ( 116 "first_comment", 117 self.text_vectorizer(min_df=0.001), 118 "first_comment", 119 ), 120 ( 121 "comments", 122 self.text_vectorizer(min_df=0.001), 123 "comments", 124 ), 125 ] 126 ), 127 ), 128 ( 129 "estimator", 130 OneVsRestClassifier( 131 xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count()) 132 ), 133 ), 134 ] 135 ) 136 137 # return rca from a whiteboard string 138 def get_rca_from_whiteboard(self, whiteboard_data): 139 rca = [] 140 whiteboard_data = re.sub(" ", "", whiteboard_data).lower() 141 for whiteboard in whiteboard_data.split("["): 142 if not any(whiteboard.startswith(s) for s in ("rca-", "rca:")): 143 continue 144 145 rca_whiteboard = re.sub("]", "", whiteboard) 146 147 # Hybrid cases: rca:X-Y 148 rca_whiteboard = re.sub(":", "-", rca_whiteboard) 149 150 rca_whiteboard_split = ( 151 rca_whiteboard.split("-", 1) 152 if self.rca_subcategories_enabled 153 else rca_whiteboard.split("-") 154 ) 155 156 if rca_whiteboard_split[1] not in self.RCA_LIST: 157 logger.warning(rca_whiteboard_split[1] + " not in RCA_LIST") 158 else: 159 rca.append(rca_whiteboard_split[1]) 160 return rca 161 162 def get_labels(self): 163 classes = {} 164 for bug in bugzilla.get_bugs(): 165 target = np.zeros(len(self.RCA_LIST)) 166 for rca in self.get_rca_from_whiteboard(bug["whiteboard"]): 167 target[self.RCA_LIST.index(rca)] = 1 168 classes[bug["id"]] = target 169 return classes, self.RCA_LIST 170 171 def get_feature_names(self): 172 return self.clf.named_steps["union"].get_feature_names_out() 173 174 def overwrite_classes(self, bugs, classes, probabilities): 175 rca_values = self.get_rca(bugs) 176 for i in len(classes): 177 for rca in rca_values[i]: 178 if rca in self.RCA_LIST: 179 if probabilities: 180 classes[i][self.RCA_LIST.index(rca)] = 1.0 181 else: 182 classes[i][self.RCA_LIST.index(rca)] = 1 183 184 return classes 185 ``` Path: `bugbug/models/bugtype.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import logging 7 from typing import Iterable 8 9 import numpy as np 10 import xgboost 11 from sklearn.compose import ColumnTransformer 12 from sklearn.feature_extraction import DictVectorizer 13 from sklearn.multiclass import OneVsRestClassifier 14 from sklearn.pipeline import Pipeline 15 16 from bugbug import bug_features, bugzilla, feature_cleanup, utils 17 from bugbug.model import BugModel 18 19 logger = logging.getLogger(__name__) 20 21 22 class BugTypeModel(BugModel): 23 def __init__(self, lemmatization=False, historical=False): 24 BugModel.__init__(self, lemmatization) 25 26 self.calculate_importance = False 27 28 self.bug_type_extractors = bug_features.BugTypes.bug_type_extractors 29 30 label_keyword_prefixes = { 31 keyword 32 for extractor in self.bug_type_extractors 33 for keyword in extractor.keyword_prefixes 34 } 35 36 feature_extractors = [ 37 bug_features.HasSTR(), 38 bug_features.Severity(), 39 # Ignore keywords that would make the ML completely skewed 40 # (we are going to use them as 100% rules in the evaluation phase). 41 bug_features.Keywords(label_keyword_prefixes), 42 bug_features.IsCoverityIssue(), 43 bug_features.HasCrashSignature(), 44 bug_features.HasURL(), 45 bug_features.HasW3CURL(), 46 bug_features.HasGithubURL(), 47 bug_features.Whiteboard(), 48 bug_features.Patches(), 49 bug_features.Landings(), 50 bug_features.BlockedBugsNumber(), 51 bug_features.EverAffected(), 52 bug_features.AffectedThenUnaffected(), 53 bug_features.Product(), 54 bug_features.Component(), 55 ] 56 57 cleanup_functions = [ 58 feature_cleanup.url(), 59 feature_cleanup.fileref(), 60 feature_cleanup.synonyms(), 61 ] 62 63 self.extraction_pipeline = Pipeline( 64 [ 65 ( 66 "bug_extractor", 67 bug_features.BugExtractor(feature_extractors, cleanup_functions), 68 ), 69 ] 70 ) 71 72 self.clf = Pipeline( 73 [ 74 ( 75 "union", 76 ColumnTransformer( 77 [ 78 ("data", DictVectorizer(), "data"), 79 ("title", self.text_vectorizer(min_df=0.001), "title"), 80 ( 81 "first_comment", 82 self.text_vectorizer(min_df=0.001), 83 "first_comment", 84 ), 85 ( 86 "comments", 87 self.text_vectorizer(min_df=0.001), 88 "comments", 89 ), 90 ] 91 ), 92 ), 93 ( 94 "estimator", 95 OneVsRestClassifier( 96 xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count()) 97 ), 98 ), 99 ] 100 ) 101 102 def get_labels(self) -> tuple[dict[int, np.ndarray], list[str]]: 103 classes = {} 104 105 bug_map = {bug["id"]: bug for bug in bugzilla.get_bugs()} 106 107 for bug_data in bug_map.values(): 108 target = np.zeros(len(self.bug_type_extractors)) 109 for i, is_type in enumerate(self.bug_type_extractors): 110 if is_type(bug_data, bug_map): 111 target[i] = 1 112 113 classes[int(bug_data["id"])] = target 114 115 bug_types = [extractor.type_name for extractor in self.bug_type_extractors] 116 117 for i, bug_type in enumerate(bug_types): 118 logger.info( 119 "%d %s bugs", 120 sum(target[i] for target in classes.values()), 121 bug_type, 122 ) 123 124 return classes, bug_types 125 126 def get_feature_names(self): 127 return self.clf.named_steps["union"].get_feature_names_out() 128 129 def overwrite_classes( 130 self, 131 bugs: Iterable[bugzilla.BugDict], 132 classes: dict[int, np.ndarray], 133 probabilities: bool, 134 ): 135 bug_map = {bug["id"]: bug for bug in bugs} 136 137 for i, bug in enumerate(bugs): 138 for j, is_type_applicable in enumerate(self.bug_type_extractors): 139 if is_type_applicable(bug, bug_map): 140 if probabilities: 141 classes[i][j] = 1.0 142 else: 143 classes[i][j] = 1 144 145 return classes 146 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bugbug/models/bugtype.py b/bugbug/models/bugtype.py --- a/bugbug/models/bugtype.py +++ b/bugbug/models/bugtype.py @@ -12,6 +12,7 @@ from sklearn.feature_extraction import DictVectorizer from sklearn.multiclass import OneVsRestClassifier from sklearn.pipeline import Pipeline +from sklearn.preprocessing import LabelBinarizer from bugbug import bug_features, bugzilla, feature_cleanup, utils from bugbug.model import BugModel @@ -25,6 +26,8 @@ self.calculate_importance = False + self.le = LabelBinarizer() + self.bug_type_extractors = bug_features.BugTypes.bug_type_extractors label_keyword_prefixes = { diff --git a/bugbug/models/rcatype.py b/bugbug/models/rcatype.py --- a/bugbug/models/rcatype.py +++ b/bugbug/models/rcatype.py @@ -12,6 +12,7 @@ from sklearn.feature_extraction import DictVectorizer from sklearn.multiclass import OneVsRestClassifier from sklearn.pipeline import Pipeline +from sklearn.preprocessing import LabelBinarizer from bugbug import bug_features, bugzilla, feature_cleanup, utils from bugbug.model import BugModel @@ -61,6 +62,8 @@ self.calculate_importance = False self.rca_subcategories_enabled = rca_subcategories_enabled + self.le = LabelBinarizer() + # should we consider only the main category or all sub categories self.RCA_TYPES = ( RCA_SUBCATEGORIES + RCA_CATEGORIES
{"golden_diff": "diff --git a/bugbug/models/bugtype.py b/bugbug/models/bugtype.py\n--- a/bugbug/models/bugtype.py\n+++ b/bugbug/models/bugtype.py\n@@ -12,6 +12,7 @@\n from sklearn.feature_extraction import DictVectorizer\n from sklearn.multiclass import OneVsRestClassifier\n from sklearn.pipeline import Pipeline\n+from sklearn.preprocessing import LabelBinarizer\n \n from bugbug import bug_features, bugzilla, feature_cleanup, utils\n from bugbug.model import BugModel\n@@ -25,6 +26,8 @@\n \n self.calculate_importance = False\n \n+ self.le = LabelBinarizer()\n+\n self.bug_type_extractors = bug_features.BugTypes.bug_type_extractors\n \n label_keyword_prefixes = {\ndiff --git a/bugbug/models/rcatype.py b/bugbug/models/rcatype.py\n--- a/bugbug/models/rcatype.py\n+++ b/bugbug/models/rcatype.py\n@@ -12,6 +12,7 @@\n from sklearn.feature_extraction import DictVectorizer\n from sklearn.multiclass import OneVsRestClassifier\n from sklearn.pipeline import Pipeline\n+from sklearn.preprocessing import LabelBinarizer\n \n from bugbug import bug_features, bugzilla, feature_cleanup, utils\n from bugbug.model import BugModel\n@@ -61,6 +62,8 @@\n self.calculate_importance = False\n self.rca_subcategories_enabled = rca_subcategories_enabled\n \n+ self.le = LabelBinarizer()\n+\n # should we consider only the main category or all sub categories\n self.RCA_TYPES = (\n RCA_SUBCATEGORIES + RCA_CATEGORIES\n", "issue": "[model:bugtype] ValueError: y should be a 1d array, got an array of shape (198635, 5) instead.\n> I'm wondering if the error encountered during the training of the `bugtype` model is something we should investigate.\r\n>\r\n> ```bash\r\n> Traceback (most recent call last):\r\n> File \"/home/promisefru/anaconda3/envs/bugbug/lib/python3.10/runpy.py\", line 196, in _run_module_as_main\r\n> return _run_code(code, main_globals, None,\r\n> File \"/home/promisefru/anaconda3/envs/bugbug/lib/python3.10/runpy.py\", line 86, in _run_code\r\n> exec(code, run_globals)\r\n> File \"/home/promisefru/mozilla/bugbug/scripts/trainer.py\", line 145, in <module>\r\n> main()\r\n> File \"/home/promisefru/mozilla/bugbug/scripts/trainer.py\", line 141, in main\r\n> retriever.go(args)\r\n> File \"/home/promisefru/mozilla/bugbug/scripts/trainer.py\", line 41, in go\r\n> metrics = model_obj.train(limit=args.limit)\r\n> File \"/home/promisefru/mozilla/bugbug/bugbug/model.py\", line 377, in train\r\n> self.le.fit(y)\r\n> File \"/home/promisefru/anaconda3/envs/bugbug/lib/python3.10/site-packages/sklearn/preprocessing/_label.py\", line 98, in fit\r\n> y = column_or_1d(y, warn=True)\r\n> File \"/home/promisefru/anaconda3/envs/bugbug/lib/python3.10/site-packages/sklearn/utils/validation.py\", line 1156, in column_or_1d\r\n> raise ValueError(\r\n> ValueError: y should be a 1d array, got an array of shape (198635, 5) instead.\r\n> ```\r\n\r\n_Originally posted by @PromiseFru in https://github.com/mozilla/bugbug/issues/3928#issuecomment-1875673580_\r\n\r\nSee also: https://github.com/mozilla/bugbug/pull/3823#pullrequestreview-1746981626\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport re\n\nimport numpy as np\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\n# For the moment, rca - XYZ is treated of bugtype XYZ,\n# so we don't need to store it in a dictionary.\nRCA_CATEGORIES = [\n \"requirementerror\",\n \"poorarchitecture\",\n \"designerror\",\n \"codingerror\",\n \"testingerror\",\n \"externalsoftwareaffectingfirefox\",\n \"performanceerror\",\n \"standards\",\n \"systemerror\",\n \"localizationerror\",\n \"memory\",\n \"infrastructure/builderror\",\n \"communicationissues\",\n \"productdecision\",\n \"undocumentedchange\",\n \"cornercase\",\n]\n\nRCA_SUBCATEGORIES = [\n \"codingerror-syntaxerror\",\n \"codingerror-logicalerror\",\n \"codingerror-semanticerror\",\n \"codingerror-runtimeerror\",\n \"codingerror-unhandledexceptions\",\n \"codingerror-internalapiissue\",\n \"codingerror-networkissue\",\n \"codingerror-compatibilityissue\",\n \"codingerror-other\",\n]\n\nlogger = logging.getLogger(__name__)\n\n\nclass RCATypeModel(BugModel):\n def __init__(\n self, lemmatization=False, historical=False, rca_subcategories_enabled=False\n ):\n BugModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n self.rca_subcategories_enabled = rca_subcategories_enabled\n\n # should we consider only the main category or all sub categories\n self.RCA_TYPES = (\n RCA_SUBCATEGORIES + RCA_CATEGORIES\n if rca_subcategories_enabled\n else RCA_CATEGORIES\n )\n\n self.RCA_LIST = sorted(set(self.RCA_TYPES))\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n # Ignore whiteboards that would make the ML completely skewed\n # bug_features.whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n bug_features.BlockedBugsNumber(),\n bug_features.EverAffected(),\n bug_features.AffectedThenUnaffected(),\n bug_features.Product(),\n bug_features.Component(),\n ]\n\n cleanup_functions = [\n feature_cleanup.url(),\n feature_cleanup.fileref(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n ]\n )\n\n self.clf = Pipeline(\n [\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.001),\n \"first_comment\",\n ),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.001),\n \"comments\",\n ),\n ]\n ),\n ),\n (\n \"estimator\",\n OneVsRestClassifier(\n xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n ),\n ),\n ]\n )\n\n # return rca from a whiteboard string\n def get_rca_from_whiteboard(self, whiteboard_data):\n rca = []\n whiteboard_data = re.sub(\" \", \"\", whiteboard_data).lower()\n for whiteboard in whiteboard_data.split(\"[\"):\n if not any(whiteboard.startswith(s) for s in (\"rca-\", \"rca:\")):\n continue\n\n rca_whiteboard = re.sub(\"]\", \"\", whiteboard)\n\n # Hybrid cases: rca:X-Y\n rca_whiteboard = re.sub(\":\", \"-\", rca_whiteboard)\n\n rca_whiteboard_split = (\n rca_whiteboard.split(\"-\", 1)\n if self.rca_subcategories_enabled\n else rca_whiteboard.split(\"-\")\n )\n\n if rca_whiteboard_split[1] not in self.RCA_LIST:\n logger.warning(rca_whiteboard_split[1] + \" not in RCA_LIST\")\n else:\n rca.append(rca_whiteboard_split[1])\n return rca\n\n def get_labels(self):\n classes = {}\n for bug in bugzilla.get_bugs():\n target = np.zeros(len(self.RCA_LIST))\n for rca in self.get_rca_from_whiteboard(bug[\"whiteboard\"]):\n target[self.RCA_LIST.index(rca)] = 1\n classes[bug[\"id\"]] = target\n return classes, self.RCA_LIST\n\n def get_feature_names(self):\n return self.clf.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n rca_values = self.get_rca(bugs)\n for i in len(classes):\n for rca in rca_values[i]:\n if rca in self.RCA_LIST:\n if probabilities:\n classes[i][self.RCA_LIST.index(rca)] = 1.0\n else:\n classes[i][self.RCA_LIST.index(rca)] = 1\n\n return classes\n", "path": "bugbug/models/rcatype.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom typing import Iterable\n\nimport numpy as np\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass BugTypeModel(BugModel):\n def __init__(self, lemmatization=False, historical=False):\n BugModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n self.bug_type_extractors = bug_features.BugTypes.bug_type_extractors\n\n label_keyword_prefixes = {\n keyword\n for extractor in self.bug_type_extractors\n for keyword in extractor.keyword_prefixes\n }\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n # Ignore keywords that would make the ML completely skewed\n # (we are going to use them as 100% rules in the evaluation phase).\n bug_features.Keywords(label_keyword_prefixes),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n bug_features.BlockedBugsNumber(),\n bug_features.EverAffected(),\n bug_features.AffectedThenUnaffected(),\n bug_features.Product(),\n bug_features.Component(),\n ]\n\n cleanup_functions = [\n feature_cleanup.url(),\n feature_cleanup.fileref(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n ]\n )\n\n self.clf = Pipeline(\n [\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.001),\n \"first_comment\",\n ),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.001),\n \"comments\",\n ),\n ]\n ),\n ),\n (\n \"estimator\",\n OneVsRestClassifier(\n xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n ),\n ),\n ]\n )\n\n def get_labels(self) -> tuple[dict[int, np.ndarray], list[str]]:\n classes = {}\n\n bug_map = {bug[\"id\"]: bug for bug in bugzilla.get_bugs()}\n\n for bug_data in bug_map.values():\n target = np.zeros(len(self.bug_type_extractors))\n for i, is_type in enumerate(self.bug_type_extractors):\n if is_type(bug_data, bug_map):\n target[i] = 1\n\n classes[int(bug_data[\"id\"])] = target\n\n bug_types = [extractor.type_name for extractor in self.bug_type_extractors]\n\n for i, bug_type in enumerate(bug_types):\n logger.info(\n \"%d %s bugs\",\n sum(target[i] for target in classes.values()),\n bug_type,\n )\n\n return classes, bug_types\n\n def get_feature_names(self):\n return self.clf.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(\n self,\n bugs: Iterable[bugzilla.BugDict],\n classes: dict[int, np.ndarray],\n probabilities: bool,\n ):\n bug_map = {bug[\"id\"]: bug for bug in bugs}\n\n for i, bug in enumerate(bugs):\n for j, is_type_applicable in enumerate(self.bug_type_extractors):\n if is_type_applicable(bug, bug_map):\n if probabilities:\n classes[i][j] = 1.0\n else:\n classes[i][j] = 1\n\n return classes\n", "path": "bugbug/models/bugtype.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nimport re\n\nimport numpy as np\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelBinarizer\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\n# For the moment, rca - XYZ is treated of bugtype XYZ,\n# so we don't need to store it in a dictionary.\nRCA_CATEGORIES = [\n \"requirementerror\",\n \"poorarchitecture\",\n \"designerror\",\n \"codingerror\",\n \"testingerror\",\n \"externalsoftwareaffectingfirefox\",\n \"performanceerror\",\n \"standards\",\n \"systemerror\",\n \"localizationerror\",\n \"memory\",\n \"infrastructure/builderror\",\n \"communicationissues\",\n \"productdecision\",\n \"undocumentedchange\",\n \"cornercase\",\n]\n\nRCA_SUBCATEGORIES = [\n \"codingerror-syntaxerror\",\n \"codingerror-logicalerror\",\n \"codingerror-semanticerror\",\n \"codingerror-runtimeerror\",\n \"codingerror-unhandledexceptions\",\n \"codingerror-internalapiissue\",\n \"codingerror-networkissue\",\n \"codingerror-compatibilityissue\",\n \"codingerror-other\",\n]\n\nlogger = logging.getLogger(__name__)\n\n\nclass RCATypeModel(BugModel):\n def __init__(\n self, lemmatization=False, historical=False, rca_subcategories_enabled=False\n ):\n BugModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n self.rca_subcategories_enabled = rca_subcategories_enabled\n\n self.le = LabelBinarizer()\n\n # should we consider only the main category or all sub categories\n self.RCA_TYPES = (\n RCA_SUBCATEGORIES + RCA_CATEGORIES\n if rca_subcategories_enabled\n else RCA_CATEGORIES\n )\n\n self.RCA_LIST = sorted(set(self.RCA_TYPES))\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n # Ignore whiteboards that would make the ML completely skewed\n # bug_features.whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n bug_features.BlockedBugsNumber(),\n bug_features.EverAffected(),\n bug_features.AffectedThenUnaffected(),\n bug_features.Product(),\n bug_features.Component(),\n ]\n\n cleanup_functions = [\n feature_cleanup.url(),\n feature_cleanup.fileref(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n ]\n )\n\n self.clf = Pipeline(\n [\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.001),\n \"first_comment\",\n ),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.001),\n \"comments\",\n ),\n ]\n ),\n ),\n (\n \"estimator\",\n OneVsRestClassifier(\n xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n ),\n ),\n ]\n )\n\n # return rca from a whiteboard string\n def get_rca_from_whiteboard(self, whiteboard_data):\n rca = []\n whiteboard_data = re.sub(\" \", \"\", whiteboard_data).lower()\n for whiteboard in whiteboard_data.split(\"[\"):\n if not any(whiteboard.startswith(s) for s in (\"rca-\", \"rca:\")):\n continue\n\n rca_whiteboard = re.sub(\"]\", \"\", whiteboard)\n\n # Hybrid cases: rca:X-Y\n rca_whiteboard = re.sub(\":\", \"-\", rca_whiteboard)\n\n rca_whiteboard_split = (\n rca_whiteboard.split(\"-\", 1)\n if self.rca_subcategories_enabled\n else rca_whiteboard.split(\"-\")\n )\n\n if rca_whiteboard_split[1] not in self.RCA_LIST:\n logger.warning(rca_whiteboard_split[1] + \" not in RCA_LIST\")\n else:\n rca.append(rca_whiteboard_split[1])\n return rca\n\n def get_labels(self):\n classes = {}\n for bug in bugzilla.get_bugs():\n target = np.zeros(len(self.RCA_LIST))\n for rca in self.get_rca_from_whiteboard(bug[\"whiteboard\"]):\n target[self.RCA_LIST.index(rca)] = 1\n classes[bug[\"id\"]] = target\n return classes, self.RCA_LIST\n\n def get_feature_names(self):\n return self.clf.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(self, bugs, classes, probabilities):\n rca_values = self.get_rca(bugs)\n for i in len(classes):\n for rca in rca_values[i]:\n if rca in self.RCA_LIST:\n if probabilities:\n classes[i][self.RCA_LIST.index(rca)] = 1.0\n else:\n classes[i][self.RCA_LIST.index(rca)] = 1\n\n return classes\n", "path": "bugbug/models/rcatype.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport logging\nfrom typing import Iterable\n\nimport numpy as np\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.multiclass import OneVsRestClassifier\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import LabelBinarizer\n\nfrom bugbug import bug_features, bugzilla, feature_cleanup, utils\nfrom bugbug.model import BugModel\n\nlogger = logging.getLogger(__name__)\n\n\nclass BugTypeModel(BugModel):\n def __init__(self, lemmatization=False, historical=False):\n BugModel.__init__(self, lemmatization)\n\n self.calculate_importance = False\n\n self.le = LabelBinarizer()\n\n self.bug_type_extractors = bug_features.BugTypes.bug_type_extractors\n\n label_keyword_prefixes = {\n keyword\n for extractor in self.bug_type_extractors\n for keyword in extractor.keyword_prefixes\n }\n\n feature_extractors = [\n bug_features.HasSTR(),\n bug_features.Severity(),\n # Ignore keywords that would make the ML completely skewed\n # (we are going to use them as 100% rules in the evaluation phase).\n bug_features.Keywords(label_keyword_prefixes),\n bug_features.IsCoverityIssue(),\n bug_features.HasCrashSignature(),\n bug_features.HasURL(),\n bug_features.HasW3CURL(),\n bug_features.HasGithubURL(),\n bug_features.Whiteboard(),\n bug_features.Patches(),\n bug_features.Landings(),\n bug_features.BlockedBugsNumber(),\n bug_features.EverAffected(),\n bug_features.AffectedThenUnaffected(),\n bug_features.Product(),\n bug_features.Component(),\n ]\n\n cleanup_functions = [\n feature_cleanup.url(),\n feature_cleanup.fileref(),\n feature_cleanup.synonyms(),\n ]\n\n self.extraction_pipeline = Pipeline(\n [\n (\n \"bug_extractor\",\n bug_features.BugExtractor(feature_extractors, cleanup_functions),\n ),\n ]\n )\n\n self.clf = Pipeline(\n [\n (\n \"union\",\n ColumnTransformer(\n [\n (\"data\", DictVectorizer(), \"data\"),\n (\"title\", self.text_vectorizer(min_df=0.001), \"title\"),\n (\n \"first_comment\",\n self.text_vectorizer(min_df=0.001),\n \"first_comment\",\n ),\n (\n \"comments\",\n self.text_vectorizer(min_df=0.001),\n \"comments\",\n ),\n ]\n ),\n ),\n (\n \"estimator\",\n OneVsRestClassifier(\n xgboost.XGBClassifier(n_jobs=utils.get_physical_cpu_count())\n ),\n ),\n ]\n )\n\n def get_labels(self) -> tuple[dict[int, np.ndarray], list[str]]:\n classes = {}\n\n bug_map = {bug[\"id\"]: bug for bug in bugzilla.get_bugs()}\n\n for bug_data in bug_map.values():\n target = np.zeros(len(self.bug_type_extractors))\n for i, is_type in enumerate(self.bug_type_extractors):\n if is_type(bug_data, bug_map):\n target[i] = 1\n\n classes[int(bug_data[\"id\"])] = target\n\n bug_types = [extractor.type_name for extractor in self.bug_type_extractors]\n\n for i, bug_type in enumerate(bug_types):\n logger.info(\n \"%d %s bugs\",\n sum(target[i] for target in classes.values()),\n bug_type,\n )\n\n return classes, bug_types\n\n def get_feature_names(self):\n return self.clf.named_steps[\"union\"].get_feature_names_out()\n\n def overwrite_classes(\n self,\n bugs: Iterable[bugzilla.BugDict],\n classes: dict[int, np.ndarray],\n probabilities: bool,\n ):\n bug_map = {bug[\"id\"]: bug for bug in bugs}\n\n for i, bug in enumerate(bugs):\n for j, is_type_applicable in enumerate(self.bug_type_extractors):\n if is_type_applicable(bug, bug_map):\n if probabilities:\n classes[i][j] = 1.0\n else:\n classes[i][j] = 1\n\n return classes\n", "path": "bugbug/models/bugtype.py"}]}
3,772
356
gh_patches_debug_12274
rasdani/github-patches
git_diff
wagtail__wagtail-11223
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Report pages performance regression ### Issue Summary Various report pages have a performance regression in Wagtail 5.2, which I've tracked down to: https://github.com/wagtail/wagtail/commit/7ba1afb8a402a09be5838a026523be78f08ea877 https://github.com/wagtail/wagtail/pull/10822 On a few sites we've upgraded to Wagtail 5.2 - performance in the Site History report has been significantly reduced: Before: <img width="1717" alt="Screenshot 2023-11-11 at 21 12 02" src="https://github.com/wagtail/wagtail/assets/177332/79650e6b-9c96-4d21-bbdf-23b98c862bf4"> After: <img width="1716" alt="Screenshot 2023-11-11 at 21 13 09" src="https://github.com/wagtail/wagtail/assets/177332/e719e250-5c9c-4dc8-823b-1e1c3b40a74c"> <img width="900" alt="Screenshot 2023-11-11 at 21 13 19" src="https://github.com/wagtail/wagtail/assets/177332/5623467b-a0ca-4472-aa46-540ff568ac82"> ### Steps to Reproduce Find an existing Wagtail project with lots of pages, and log entries. Check http://127.0.0.1:9000/admin/reports/site-history/ with the project running Wagtail 5.2 - page will probably be slow to load. (Note: I did try and create a quick script to test this with Wagtail's starter project - but the performance of SQLite and a lack of a debug toolbar slowing things down made it a bit tricky!). - I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes ### Technical details - Python version: 3.11 / any - Django version: 4.2 / any - Wagtail version: 5.2 / main - Browser version: n/a --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/admin/views/reports/base.py` Content: ``` 1 from django.utils.translation import gettext_lazy as _ 2 3 from wagtail.admin.views.generic.models import IndexView 4 5 6 class ReportView(IndexView): 7 template_name = "wagtailadmin/reports/base_report.html" 8 title = "" 9 paginate_by = 50 10 11 def get_filtered_queryset(self): 12 return self.filter_queryset(self.get_queryset()) 13 14 def decorate_paginated_queryset(self, object_list): 15 # A hook point to allow rewriting the object list after pagination has been applied 16 return object_list 17 18 def get(self, request, *args, **kwargs): 19 self.filters, self.object_list = self.get_filtered_queryset() 20 self.object_list = self.decorate_paginated_queryset(self.object_list) 21 context = self.get_context_data() 22 return self.render_to_response(context) 23 24 def get_context_data(self, *args, **kwargs): 25 context = super().get_context_data(*args, **kwargs) 26 context["title"] = self.title 27 return context 28 29 30 class PageReportView(ReportView): 31 template_name = "wagtailadmin/reports/base_page_report.html" 32 export_headings = { 33 "latest_revision_created_at": _("Updated"), 34 "status_string": _("Status"), 35 "content_type.model_class._meta.verbose_name.title": _("Type"), 36 } 37 list_export = [ 38 "title", 39 "latest_revision_created_at", 40 "status_string", 41 "content_type.model_class._meta.verbose_name.title", 42 ] 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/admin/views/reports/base.py b/wagtail/admin/views/reports/base.py --- a/wagtail/admin/views/reports/base.py +++ b/wagtail/admin/views/reports/base.py @@ -17,8 +17,12 @@ def get(self, request, *args, **kwargs): self.filters, self.object_list = self.get_filtered_queryset() - self.object_list = self.decorate_paginated_queryset(self.object_list) context = self.get_context_data() + # Decorate the queryset *after* Django's BaseListView has returned a paginated/reduced + # list of objects + context["object_list"] = self.decorate_paginated_queryset( + context["object_list"] + ) return self.render_to_response(context) def get_context_data(self, *args, **kwargs):
{"golden_diff": "diff --git a/wagtail/admin/views/reports/base.py b/wagtail/admin/views/reports/base.py\n--- a/wagtail/admin/views/reports/base.py\n+++ b/wagtail/admin/views/reports/base.py\n@@ -17,8 +17,12 @@\n \n def get(self, request, *args, **kwargs):\n self.filters, self.object_list = self.get_filtered_queryset()\n- self.object_list = self.decorate_paginated_queryset(self.object_list)\n context = self.get_context_data()\n+ # Decorate the queryset *after* Django's BaseListView has returned a paginated/reduced\n+ # list of objects\n+ context[\"object_list\"] = self.decorate_paginated_queryset(\n+ context[\"object_list\"]\n+ )\n return self.render_to_response(context)\n \n def get_context_data(self, *args, **kwargs):\n", "issue": "Report pages performance regression\n### Issue Summary\r\n\r\nVarious report pages have a performance regression in Wagtail 5.2, which I've tracked down to:\r\n\r\nhttps://github.com/wagtail/wagtail/commit/7ba1afb8a402a09be5838a026523be78f08ea877\r\nhttps://github.com/wagtail/wagtail/pull/10822\r\n\r\nOn a few sites we've upgraded to Wagtail 5.2 - performance in the Site History report has been significantly reduced:\r\n\r\nBefore:\r\n<img width=\"1717\" alt=\"Screenshot 2023-11-11 at 21 12 02\" src=\"https://github.com/wagtail/wagtail/assets/177332/79650e6b-9c96-4d21-bbdf-23b98c862bf4\">\r\n\r\nAfter:\r\n<img width=\"1716\" alt=\"Screenshot 2023-11-11 at 21 13 09\" src=\"https://github.com/wagtail/wagtail/assets/177332/e719e250-5c9c-4dc8-823b-1e1c3b40a74c\">\r\n<img width=\"900\" alt=\"Screenshot 2023-11-11 at 21 13 19\" src=\"https://github.com/wagtail/wagtail/assets/177332/5623467b-a0ca-4472-aa46-540ff568ac82\">\r\n\r\n### Steps to Reproduce\r\n\r\nFind an existing Wagtail project with lots of pages, and log entries.\r\n\r\nCheck http://127.0.0.1:9000/admin/reports/site-history/ with the project running Wagtail 5.2 - page will probably be slow to load.\r\n\r\n(Note: I did try and create a quick script to test this with Wagtail's starter project - but the performance of SQLite and a lack of a debug toolbar slowing things down made it a bit tricky!).\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Python version: 3.11 / any\r\n- Django version: 4.2 / any\r\n- Wagtail version: 5.2 / main\r\n- Browser version: n/a\n", "before_files": [{"content": "from django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.views.generic.models import IndexView\n\n\nclass ReportView(IndexView):\n template_name = \"wagtailadmin/reports/base_report.html\"\n title = \"\"\n paginate_by = 50\n\n def get_filtered_queryset(self):\n return self.filter_queryset(self.get_queryset())\n\n def decorate_paginated_queryset(self, object_list):\n # A hook point to allow rewriting the object list after pagination has been applied\n return object_list\n\n def get(self, request, *args, **kwargs):\n self.filters, self.object_list = self.get_filtered_queryset()\n self.object_list = self.decorate_paginated_queryset(self.object_list)\n context = self.get_context_data()\n return self.render_to_response(context)\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context[\"title\"] = self.title\n return context\n\n\nclass PageReportView(ReportView):\n template_name = \"wagtailadmin/reports/base_page_report.html\"\n export_headings = {\n \"latest_revision_created_at\": _(\"Updated\"),\n \"status_string\": _(\"Status\"),\n \"content_type.model_class._meta.verbose_name.title\": _(\"Type\"),\n }\n list_export = [\n \"title\",\n \"latest_revision_created_at\",\n \"status_string\",\n \"content_type.model_class._meta.verbose_name.title\",\n ]\n", "path": "wagtail/admin/views/reports/base.py"}], "after_files": [{"content": "from django.utils.translation import gettext_lazy as _\n\nfrom wagtail.admin.views.generic.models import IndexView\n\n\nclass ReportView(IndexView):\n template_name = \"wagtailadmin/reports/base_report.html\"\n title = \"\"\n paginate_by = 50\n\n def get_filtered_queryset(self):\n return self.filter_queryset(self.get_queryset())\n\n def decorate_paginated_queryset(self, object_list):\n # A hook point to allow rewriting the object list after pagination has been applied\n return object_list\n\n def get(self, request, *args, **kwargs):\n self.filters, self.object_list = self.get_filtered_queryset()\n context = self.get_context_data()\n # Decorate the queryset *after* Django's BaseListView has returned a paginated/reduced\n # list of objects\n context[\"object_list\"] = self.decorate_paginated_queryset(\n context[\"object_list\"]\n )\n return self.render_to_response(context)\n\n def get_context_data(self, *args, **kwargs):\n context = super().get_context_data(*args, **kwargs)\n context[\"title\"] = self.title\n return context\n\n\nclass PageReportView(ReportView):\n template_name = \"wagtailadmin/reports/base_page_report.html\"\n export_headings = {\n \"latest_revision_created_at\": _(\"Updated\"),\n \"status_string\": _(\"Status\"),\n \"content_type.model_class._meta.verbose_name.title\": _(\"Type\"),\n }\n list_export = [\n \"title\",\n \"latest_revision_created_at\",\n \"status_string\",\n \"content_type.model_class._meta.verbose_name.title\",\n ]\n", "path": "wagtail/admin/views/reports/base.py"}]}
1,214
186
gh_patches_debug_24294
rasdani/github-patches
git_diff
cupy__cupy-6989
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exception raised in `TCPStore.__del__` upon process termination ### Description `__del__` should not perform syncronization when called during process termination. ``` Exception ignored in: <function TCPStore.__del__ at 0x7fb939be23a0> Traceback (most recent call last): File "/home/maehashi/Development/cupy/cupyx/distributed/_store.py", line 49, in __del__ File "/home/maehashi/Development/cupy/cupyx/distributed/_store.py", line 97, in stop File "/home/maehashi/Development/cupy/cupyx/distributed/_store.py", line 31, in join File "/home/maehashi/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py", line 251, in recv ModuleNotFoundError: import of builtins halted; None in sys.modules ``` ### To Reproduce _No response_ ### Installation _No response_ ### Environment _No response_ ### Additional Information _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupyx/distributed/_store.py` Content: ``` 1 from ctypes import sizeof 2 import multiprocessing 3 import threading 4 import socket 5 import time 6 7 from cupyx.distributed import _klv_utils 8 from cupyx.distributed import _store_actions 9 10 11 _DEFAULT_HOST = '127.0.0.1' 12 _DEFAULT_PORT = 13333 13 14 15 class ExceptionAwareProcess(multiprocessing.Process): 16 def __init__(self, *args, **kwargs): 17 super().__init__(*args, **kwargs) 18 self._exception = None 19 self._parent_p, self._child_p = multiprocessing.Pipe() 20 21 def run(self): 22 try: 23 super().run() 24 self._child_p.send(None) 25 except Exception as e: 26 self._child_p.send(e) 27 28 def join(self): 29 super().join() 30 if self._parent_p.poll(): 31 exception = self._parent_p.recv() 32 if exception is not None: 33 raise exception 34 35 36 class TCPStore: 37 # This is only used for initialization of nccl so we don't care 38 # too much about peformance 39 def __init__(self, world_size): 40 self.storage = {} 41 self._process = None 42 self._world_size = world_size 43 self._run = multiprocessing.Value('b', 1) 44 # For implementing a barrier 45 self._lock = threading.Lock() 46 self._current_barrier = None 47 48 def __del__(self): 49 self.stop() 50 51 def _set_process(self, process): 52 self._process = process 53 54 def _process_request(self, c_socket): 55 with c_socket: 56 # Receive in KLV format 57 action_bytes = c_socket.recv(sizeof(_klv_utils.action_t)) 58 if len(action_bytes) > 0: 59 action_m = _klv_utils.action_t.from_buffer_copy(action_bytes) 60 if action_m.length > 256: 61 raise ValueError('Invalid length for message') 62 value = bytearray(action_m.value)[:action_m.length] 63 r = _store_actions.execute_action(action_m.action, value, self) 64 if r is not None: 65 c_socket.sendall(r.klv()) 66 67 def _server_loop(self, host, port): 68 # This is for minimum info exchange during initialization 69 # a single connection allows to implement locking mechanics easily 70 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 71 s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 72 s.bind((host, port)) 73 s.listen() 74 s.settimeout(0.5) 75 while self._run.value == 1: 76 try: 77 c_socket, addr = s.accept() 78 except socket.timeout: 79 continue 80 81 t = threading.Thread( 82 target=self._process_request, 83 args=(c_socket,), daemon=True) 84 t.start() 85 86 def run(self, host=_DEFAULT_HOST, port=_DEFAULT_PORT): 87 # Run the TCP store in a different process 88 p = ExceptionAwareProcess( 89 target=self._server_loop, args=(host, port)) 90 p.start() 91 self._process = p 92 93 def stop(self): 94 if self._process is not None: 95 with self._run.get_lock(): 96 self._run.value = 0 97 self._process.join() 98 99 100 class TCPStoreProxy: 101 102 MAX_NUM_RETRIES = 50 103 DELAY_FOR_RETRY = 0.5 104 105 def __init__(self, host=_DEFAULT_HOST, port=_DEFAULT_PORT): 106 self.host = host 107 self.port = port 108 109 def _send_recv(self, action): 110 # Retry several times in case the rank 0 has not established the 111 # main store yet 112 for i in range(TCPStoreProxy.MAX_NUM_RETRIES): 113 try: 114 with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: 115 # TODO retry connects 116 s.connect((self.host, self.port)) 117 s.sendall(action.klv()) 118 result_bytes = s.recv(sizeof( 119 _klv_utils.result_action_t)) 120 if len(result_bytes) > 0: 121 result = _klv_utils.result_action_t.from_buffer_copy( 122 result_bytes) 123 value = bytearray(result.value)[:result.length] 124 if result.status == 0: 125 return action.decode_result(value) 126 else: 127 raise RuntimeError(value.decode('utf-8')) 128 except ConnectionRefusedError: 129 time.sleep(TCPStoreProxy.DELAY_FOR_RETRY) 130 raise RuntimeError('TCPStore is not available') 131 132 def __getitem__(self, key): 133 return self._send_recv(_store_actions.Get(key)) 134 135 def __setitem__(self, key, value): 136 self._send_recv(_store_actions.Set(key, value)) 137 138 def barrier(self): 139 # Barrier has special semantics 140 self._send_recv(_store_actions.Barrier()) 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupyx/distributed/_store.py b/cupyx/distributed/_store.py --- a/cupyx/distributed/_store.py +++ b/cupyx/distributed/_store.py @@ -1,3 +1,4 @@ +import atexit from ctypes import sizeof import multiprocessing import threading @@ -11,6 +12,14 @@ _DEFAULT_HOST = '127.0.0.1' _DEFAULT_PORT = 13333 +_exit_mode = False + + [email protected] +def _exit(): + global _exit_mode + _exit_mode = True + class ExceptionAwareProcess(multiprocessing.Process): def __init__(self, *args, **kwargs): @@ -46,7 +55,8 @@ self._current_barrier = None def __del__(self): - self.stop() + if not _exit_mode: + self.stop() def _set_process(self, process): self._process = process @@ -91,6 +101,8 @@ self._process = p def stop(self): + if _exit_mode: + return # Prevent shutdown errors if self._process is not None: with self._run.get_lock(): self._run.value = 0
{"golden_diff": "diff --git a/cupyx/distributed/_store.py b/cupyx/distributed/_store.py\n--- a/cupyx/distributed/_store.py\n+++ b/cupyx/distributed/_store.py\n@@ -1,3 +1,4 @@\n+import atexit\n from ctypes import sizeof\n import multiprocessing\n import threading\n@@ -11,6 +12,14 @@\n _DEFAULT_HOST = '127.0.0.1'\n _DEFAULT_PORT = 13333\n \n+_exit_mode = False\n+\n+\[email protected]\n+def _exit():\n+ global _exit_mode\n+ _exit_mode = True\n+\n \n class ExceptionAwareProcess(multiprocessing.Process):\n def __init__(self, *args, **kwargs):\n@@ -46,7 +55,8 @@\n self._current_barrier = None\n \n def __del__(self):\n- self.stop()\n+ if not _exit_mode:\n+ self.stop()\n \n def _set_process(self, process):\n self._process = process\n@@ -91,6 +101,8 @@\n self._process = p\n \n def stop(self):\n+ if _exit_mode:\n+ return # Prevent shutdown errors\n if self._process is not None:\n with self._run.get_lock():\n self._run.value = 0\n", "issue": "Exception raised in `TCPStore.__del__` upon process termination\n### Description\n\n`__del__` should not perform syncronization when called during process termination.\r\n\r\n```\r\nException ignored in: <function TCPStore.__del__ at 0x7fb939be23a0>\r\nTraceback (most recent call last):\r\n File \"/home/maehashi/Development/cupy/cupyx/distributed/_store.py\", line 49, in __del__\r\n File \"/home/maehashi/Development/cupy/cupyx/distributed/_store.py\", line 97, in stop\r\n File \"/home/maehashi/Development/cupy/cupyx/distributed/_store.py\", line 31, in join\r\n File \"/home/maehashi/.pyenv/versions/3.8.1/lib/python3.8/multiprocessing/connection.py\", line 251, in recv\r\nModuleNotFoundError: import of builtins halted; None in sys.modules\r\n```\n\n### To Reproduce\n\n_No response_\n\n### Installation\n\n_No response_\n\n### Environment\n\n_No response_\n\n### Additional Information\n\n_No response_\n", "before_files": [{"content": "from ctypes import sizeof\nimport multiprocessing\nimport threading\nimport socket\nimport time\n\nfrom cupyx.distributed import _klv_utils\nfrom cupyx.distributed import _store_actions\n\n\n_DEFAULT_HOST = '127.0.0.1'\n_DEFAULT_PORT = 13333\n\n\nclass ExceptionAwareProcess(multiprocessing.Process):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._exception = None\n self._parent_p, self._child_p = multiprocessing.Pipe()\n\n def run(self):\n try:\n super().run()\n self._child_p.send(None)\n except Exception as e:\n self._child_p.send(e)\n\n def join(self):\n super().join()\n if self._parent_p.poll():\n exception = self._parent_p.recv()\n if exception is not None:\n raise exception\n\n\nclass TCPStore:\n # This is only used for initialization of nccl so we don't care\n # too much about peformance\n def __init__(self, world_size):\n self.storage = {}\n self._process = None\n self._world_size = world_size\n self._run = multiprocessing.Value('b', 1)\n # For implementing a barrier\n self._lock = threading.Lock()\n self._current_barrier = None\n\n def __del__(self):\n self.stop()\n\n def _set_process(self, process):\n self._process = process\n\n def _process_request(self, c_socket):\n with c_socket:\n # Receive in KLV format\n action_bytes = c_socket.recv(sizeof(_klv_utils.action_t))\n if len(action_bytes) > 0:\n action_m = _klv_utils.action_t.from_buffer_copy(action_bytes)\n if action_m.length > 256:\n raise ValueError('Invalid length for message')\n value = bytearray(action_m.value)[:action_m.length]\n r = _store_actions.execute_action(action_m.action, value, self)\n if r is not None:\n c_socket.sendall(r.klv())\n\n def _server_loop(self, host, port):\n # This is for minimum info exchange during initialization\n # a single connection allows to implement locking mechanics easily\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((host, port))\n s.listen()\n s.settimeout(0.5)\n while self._run.value == 1:\n try:\n c_socket, addr = s.accept()\n except socket.timeout:\n continue\n\n t = threading.Thread(\n target=self._process_request,\n args=(c_socket,), daemon=True)\n t.start()\n\n def run(self, host=_DEFAULT_HOST, port=_DEFAULT_PORT):\n # Run the TCP store in a different process\n p = ExceptionAwareProcess(\n target=self._server_loop, args=(host, port))\n p.start()\n self._process = p\n\n def stop(self):\n if self._process is not None:\n with self._run.get_lock():\n self._run.value = 0\n self._process.join()\n\n\nclass TCPStoreProxy:\n\n MAX_NUM_RETRIES = 50\n DELAY_FOR_RETRY = 0.5\n\n def __init__(self, host=_DEFAULT_HOST, port=_DEFAULT_PORT):\n self.host = host\n self.port = port\n\n def _send_recv(self, action):\n # Retry several times in case the rank 0 has not established the\n # main store yet\n for i in range(TCPStoreProxy.MAX_NUM_RETRIES):\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n # TODO retry connects\n s.connect((self.host, self.port))\n s.sendall(action.klv())\n result_bytes = s.recv(sizeof(\n _klv_utils.result_action_t))\n if len(result_bytes) > 0:\n result = _klv_utils.result_action_t.from_buffer_copy(\n result_bytes)\n value = bytearray(result.value)[:result.length]\n if result.status == 0:\n return action.decode_result(value)\n else:\n raise RuntimeError(value.decode('utf-8'))\n except ConnectionRefusedError:\n time.sleep(TCPStoreProxy.DELAY_FOR_RETRY)\n raise RuntimeError('TCPStore is not available')\n\n def __getitem__(self, key):\n return self._send_recv(_store_actions.Get(key))\n\n def __setitem__(self, key, value):\n self._send_recv(_store_actions.Set(key, value))\n\n def barrier(self):\n # Barrier has special semantics\n self._send_recv(_store_actions.Barrier())\n", "path": "cupyx/distributed/_store.py"}], "after_files": [{"content": "import atexit\nfrom ctypes import sizeof\nimport multiprocessing\nimport threading\nimport socket\nimport time\n\nfrom cupyx.distributed import _klv_utils\nfrom cupyx.distributed import _store_actions\n\n\n_DEFAULT_HOST = '127.0.0.1'\n_DEFAULT_PORT = 13333\n\n_exit_mode = False\n\n\[email protected]\ndef _exit():\n global _exit_mode\n _exit_mode = True\n\n\nclass ExceptionAwareProcess(multiprocessing.Process):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._exception = None\n self._parent_p, self._child_p = multiprocessing.Pipe()\n\n def run(self):\n try:\n super().run()\n self._child_p.send(None)\n except Exception as e:\n self._child_p.send(e)\n\n def join(self):\n super().join()\n if self._parent_p.poll():\n exception = self._parent_p.recv()\n if exception is not None:\n raise exception\n\n\nclass TCPStore:\n # This is only used for initialization of nccl so we don't care\n # too much about peformance\n def __init__(self, world_size):\n self.storage = {}\n self._process = None\n self._world_size = world_size\n self._run = multiprocessing.Value('b', 1)\n # For implementing a barrier\n self._lock = threading.Lock()\n self._current_barrier = None\n\n def __del__(self):\n if not _exit_mode:\n self.stop()\n\n def _set_process(self, process):\n self._process = process\n\n def _process_request(self, c_socket):\n with c_socket:\n # Receive in KLV format\n action_bytes = c_socket.recv(sizeof(_klv_utils.action_t))\n if len(action_bytes) > 0:\n action_m = _klv_utils.action_t.from_buffer_copy(action_bytes)\n if action_m.length > 256:\n raise ValueError('Invalid length for message')\n value = bytearray(action_m.value)[:action_m.length]\n r = _store_actions.execute_action(action_m.action, value, self)\n if r is not None:\n c_socket.sendall(r.klv())\n\n def _server_loop(self, host, port):\n # This is for minimum info exchange during initialization\n # a single connection allows to implement locking mechanics easily\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n s.bind((host, port))\n s.listen()\n s.settimeout(0.5)\n while self._run.value == 1:\n try:\n c_socket, addr = s.accept()\n except socket.timeout:\n continue\n\n t = threading.Thread(\n target=self._process_request,\n args=(c_socket,), daemon=True)\n t.start()\n\n def run(self, host=_DEFAULT_HOST, port=_DEFAULT_PORT):\n # Run the TCP store in a different process\n p = ExceptionAwareProcess(\n target=self._server_loop, args=(host, port))\n p.start()\n self._process = p\n\n def stop(self):\n if _exit_mode:\n return # Prevent shutdown errors\n if self._process is not None:\n with self._run.get_lock():\n self._run.value = 0\n self._process.join()\n\n\nclass TCPStoreProxy:\n\n MAX_NUM_RETRIES = 50\n DELAY_FOR_RETRY = 0.5\n\n def __init__(self, host=_DEFAULT_HOST, port=_DEFAULT_PORT):\n self.host = host\n self.port = port\n\n def _send_recv(self, action):\n # Retry several times in case the rank 0 has not established the\n # main store yet\n for i in range(TCPStoreProxy.MAX_NUM_RETRIES):\n try:\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n # TODO retry connects\n s.connect((self.host, self.port))\n s.sendall(action.klv())\n result_bytes = s.recv(sizeof(\n _klv_utils.result_action_t))\n if len(result_bytes) > 0:\n result = _klv_utils.result_action_t.from_buffer_copy(\n result_bytes)\n value = bytearray(result.value)[:result.length]\n if result.status == 0:\n return action.decode_result(value)\n else:\n raise RuntimeError(value.decode('utf-8'))\n except ConnectionRefusedError:\n time.sleep(TCPStoreProxy.DELAY_FOR_RETRY)\n raise RuntimeError('TCPStore is not available')\n\n def __getitem__(self, key):\n return self._send_recv(_store_actions.Get(key))\n\n def __setitem__(self, key, value):\n self._send_recv(_store_actions.Set(key, value))\n\n def barrier(self):\n # Barrier has special semantics\n self._send_recv(_store_actions.Barrier())\n", "path": "cupyx/distributed/_store.py"}]}
1,843
296
gh_patches_debug_14928
rasdani/github-patches
git_diff
ansible-collections__community.vmware-1084
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- vmware_host_lockdown crashes on failure ##### SUMMARY Today, I wanted to enable lockdown mode on a host. This failed, although I didn't find out yet why. But that's not important. The bug is that the module includes `vim` from `pyvmomi` instead of `pyVmomi` and doesn't check that this works: https://github.com/ansible-collections/community.vmware/blob/f418bdaa6a678c09b6fb9115d927d8c44d50060f/plugins/modules/vmware_host_lockdown.py#L123-L126 I think nobody ran into this issue yet because enabling or disabling lockdown seldom fails (in my experience) and `vim` is only used in this case: https://github.com/ansible-collections/community.vmware/blob/f418bdaa6a678c09b6fb9115d927d8c44d50060f/plugins/modules/vmware_host_lockdown.py#L176-L182 ##### ISSUE TYPE - Bug Report ##### COMPONENT NAME vmware_host_lockdown ##### ANSIBLE VERSION ``` ansible [core 2.11.6] config file = None configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /usr/lib/python3.9/site-packages/ansible ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections executable location = /usr/bin/ansible python version = 3.9.1 (default, Aug 19 2021, 02:58:42) [GCC 10.2.0] jinja version = 3.0.2 libyaml = True ``` ##### COLLECTION VERSION ``` # /usr/lib/python3.9/site-packages/ansible_collections Collection Version ---------------- ------- community.vmware 1.15.0 ``` ##### CONFIGURATION ``` ``` ##### OS / ENVIRONMENT VMware Photon OS 4.0 and vSphere 7.0U2, but this is irrelevant. ##### STEPS TO REPRODUCE Tricky. As I've said, enabling / disabling lockdown usually works. ##### EXPECTED RESULTS A failure. ##### ACTUAL RESULTS ``` An exception occurred during task execution. To see the full traceback, use -vvv. The error was: NameError: name 'vim' is not defined ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plugins/modules/vmware_host_lockdown.py` Content: ``` 1 #!/usr/bin/python 2 # -*- coding: utf-8 -*- 3 4 # Copyright: (c) 2018, Abhijeet Kasurde <[email protected]> 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 from __future__ import absolute_import, division, print_function 8 __metaclass__ = type 9 10 11 DOCUMENTATION = r''' 12 --- 13 module: vmware_host_lockdown 14 short_description: Manage administrator permission for the local administrative account for the ESXi host 15 description: 16 - This module can be used to manage administrator permission for the local administrative account for the host when ESXi hostname is given. 17 - All parameters and VMware objects values are case sensitive. 18 - This module is destructive as administrator permission are managed using APIs used, please read options carefully and proceed. 19 - Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server. 20 author: 21 - Abhijeet Kasurde (@Akasurde) 22 notes: 23 - Tested on vSphere 6.5 24 requirements: 25 - python >= 2.6 26 - PyVmomi 27 options: 28 cluster_name: 29 description: 30 - Name of cluster. 31 - All host systems from given cluster used to manage lockdown. 32 - Required parameter, if C(esxi_hostname) is not set. 33 type: str 34 esxi_hostname: 35 description: 36 - List of ESXi hostname to manage lockdown. 37 - Required parameter, if C(cluster_name) is not set. 38 - See examples for specifications. 39 type: list 40 elements: str 41 state: 42 description: 43 - State of hosts system 44 - If set to C(present), all host systems will be set in lockdown mode. 45 - If host system is already in lockdown mode and set to C(present), no action will be taken. 46 - If set to C(absent), all host systems will be removed from lockdown mode. 47 - If host system is already out of lockdown mode and set to C(absent), no action will be taken. 48 default: present 49 choices: [ present, absent ] 50 type: str 51 extends_documentation_fragment: 52 - community.vmware.vmware.documentation 53 54 ''' 55 56 EXAMPLES = r''' 57 - name: Enter host system into lockdown mode 58 community.vmware.vmware_host_lockdown: 59 hostname: '{{ vcenter_hostname }}' 60 username: '{{ vcenter_username }}' 61 password: '{{ vcenter_password }}' 62 esxi_hostname: '{{ esxi_hostname }}' 63 state: present 64 delegate_to: localhost 65 66 - name: Exit host systems from lockdown mode 67 community.vmware.vmware_host_lockdown: 68 hostname: '{{ vcenter_hostname }}' 69 username: '{{ vcenter_username }}' 70 password: '{{ vcenter_password }}' 71 esxi_hostname: '{{ esxi_hostname }}' 72 state: absent 73 delegate_to: localhost 74 75 - name: Enter host systems into lockdown mode 76 community.vmware.vmware_host_lockdown: 77 hostname: '{{ vcenter_hostname }}' 78 username: '{{ vcenter_username }}' 79 password: '{{ vcenter_password }}' 80 esxi_hostname: 81 - '{{ esxi_hostname_1 }}' 82 - '{{ esxi_hostname_2 }}' 83 state: present 84 delegate_to: localhost 85 86 - name: Exit host systems from lockdown mode 87 community.vmware.vmware_host_lockdown: 88 hostname: '{{ vcenter_hostname }}' 89 username: '{{ vcenter_username }}' 90 password: '{{ vcenter_password }}' 91 esxi_hostname: 92 - '{{ esxi_hostname_1 }}' 93 - '{{ esxi_hostname_2 }}' 94 state: absent 95 delegate_to: localhost 96 97 - name: Enter all host system from cluster into lockdown mode 98 community.vmware.vmware_host_lockdown: 99 hostname: '{{ vcenter_hostname }}' 100 username: '{{ vcenter_username }}' 101 password: '{{ vcenter_password }}' 102 cluster_name: '{{ cluster_name }}' 103 state: present 104 delegate_to: localhost 105 ''' 106 107 RETURN = r''' 108 results: 109 description: metadata about state of Host system lock down 110 returned: always 111 type: dict 112 sample: { 113 "host_lockdown_state": { 114 "DC0_C0": { 115 "current_state": "present", 116 "previous_state": "absent", 117 "desired_state": "present", 118 }, 119 } 120 } 121 ''' 122 123 try: 124 from pyvmomi import vim 125 except ImportError: 126 pass 127 128 from ansible.module_utils.basic import AnsibleModule 129 from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi 130 from ansible.module_utils._text import to_native 131 132 133 class VmwareLockdownManager(PyVmomi): 134 def __init__(self, module): 135 super(VmwareLockdownManager, self).__init__(module) 136 if not self.is_vcenter(): 137 self.module.fail_json(msg="Lockdown operations are performed from vCenter only. " 138 "hostname %s is an ESXi server. Please specify hostname " 139 "as vCenter server." % self.module.params['hostname']) 140 cluster_name = self.params.get('cluster_name', None) 141 esxi_host_name = self.params.get('esxi_hostname', None) 142 self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name) 143 144 def ensure(self): 145 """ 146 Function to manage internal state management 147 """ 148 results = dict(changed=False, host_lockdown_state=dict()) 149 change_list = [] 150 desired_state = self.params.get('state') 151 for host in self.hosts: 152 results['host_lockdown_state'][host.name] = dict(current_state='', 153 desired_state=desired_state, 154 previous_state='' 155 ) 156 changed = False 157 try: 158 if host.config.adminDisabled: 159 results['host_lockdown_state'][host.name]['previous_state'] = 'present' 160 if desired_state == 'absent': 161 if not self.module.check_mode: 162 host.ExitLockdownMode() 163 results['host_lockdown_state'][host.name]['current_state'] = 'absent' 164 changed = True 165 else: 166 results['host_lockdown_state'][host.name]['current_state'] = 'present' 167 elif not host.config.adminDisabled: 168 results['host_lockdown_state'][host.name]['previous_state'] = 'absent' 169 if desired_state == 'present': 170 if not self.module.check_mode: 171 host.EnterLockdownMode() 172 results['host_lockdown_state'][host.name]['current_state'] = 'present' 173 changed = True 174 else: 175 results['host_lockdown_state'][host.name]['current_state'] = 'absent' 176 except vim.fault.HostConfigFault as host_config_fault: 177 self.module.fail_json(msg="Failed to manage lockdown mode for esxi" 178 " hostname %s : %s" % (host.name, to_native(host_config_fault.msg))) 179 except vim.fault.AdminDisabled as admin_disabled: 180 self.module.fail_json(msg="Failed to manage lockdown mode as administrator " 181 "permission has been disabled for " 182 "esxi hostname %s : %s" % (host.name, to_native(admin_disabled.msg))) 183 except Exception as generic_exception: 184 self.module.fail_json(msg="Failed to manage lockdown mode due to generic exception for esxi " 185 "hostname %s : %s" % (host.name, to_native(generic_exception))) 186 change_list.append(changed) 187 188 if any(change_list): 189 results['changed'] = True 190 191 self.module.exit_json(**results) 192 193 194 def main(): 195 argument_spec = vmware_argument_spec() 196 argument_spec.update( 197 cluster_name=dict(type='str', required=False), 198 esxi_hostname=dict(type='list', required=False, elements='str'), 199 state=dict(type='str', default='present', choices=['present', 'absent'], required=False), 200 ) 201 202 module = AnsibleModule( 203 argument_spec=argument_spec, 204 supports_check_mode=True, 205 required_one_of=[ 206 ['cluster_name', 'esxi_hostname'], 207 ] 208 ) 209 210 vmware_lockdown_mgr = VmwareLockdownManager(module) 211 vmware_lockdown_mgr.ensure() 212 213 214 if __name__ == "__main__": 215 main() 216 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plugins/modules/vmware_host_lockdown.py b/plugins/modules/vmware_host_lockdown.py --- a/plugins/modules/vmware_host_lockdown.py +++ b/plugins/modules/vmware_host_lockdown.py @@ -121,9 +121,10 @@ ''' try: - from pyvmomi import vim + from pyVmomi import vim + HAS_PYVMOMI = True except ImportError: - pass + HAS_PYVMOMI = False from ansible.module_utils.basic import AnsibleModule from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi @@ -207,6 +208,9 @@ ] ) + if not HAS_PYVMOMI: + module.fail_json(msg='pyvmomi required for this module') + vmware_lockdown_mgr = VmwareLockdownManager(module) vmware_lockdown_mgr.ensure()
{"golden_diff": "diff --git a/plugins/modules/vmware_host_lockdown.py b/plugins/modules/vmware_host_lockdown.py\n--- a/plugins/modules/vmware_host_lockdown.py\n+++ b/plugins/modules/vmware_host_lockdown.py\n@@ -121,9 +121,10 @@\n '''\n \n try:\n- from pyvmomi import vim\n+ from pyVmomi import vim\n+ HAS_PYVMOMI = True\n except ImportError:\n- pass\n+ HAS_PYVMOMI = False\n \n from ansible.module_utils.basic import AnsibleModule\n from ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi\n@@ -207,6 +208,9 @@\n ]\n )\n \n+ if not HAS_PYVMOMI:\n+ module.fail_json(msg='pyvmomi required for this module')\n+\n vmware_lockdown_mgr = VmwareLockdownManager(module)\n vmware_lockdown_mgr.ensure()\n", "issue": "vmware_host_lockdown crashes on failure\n##### SUMMARY\r\nToday, I wanted to enable lockdown mode on a host. This failed, although I didn't find out yet why. But that's not important. The bug is that the module includes `vim` from `pyvmomi` instead of `pyVmomi` and doesn't check that this works:\r\n\r\nhttps://github.com/ansible-collections/community.vmware/blob/f418bdaa6a678c09b6fb9115d927d8c44d50060f/plugins/modules/vmware_host_lockdown.py#L123-L126\r\n\r\nI think nobody ran into this issue yet because enabling or disabling lockdown seldom fails (in my experience) and `vim` is only used in this case:\r\n\r\nhttps://github.com/ansible-collections/community.vmware/blob/f418bdaa6a678c09b6fb9115d927d8c44d50060f/plugins/modules/vmware_host_lockdown.py#L176-L182\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\nvmware_host_lockdown\r\n\r\n##### ANSIBLE VERSION\r\n```\r\nansible [core 2.11.6] \r\n config file = None\r\n configured module search path = ['/root/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /usr/lib/python3.9/site-packages/ansible\r\n ansible collection location = /root/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /usr/bin/ansible\r\n python version = 3.9.1 (default, Aug 19 2021, 02:58:42) [GCC 10.2.0]\r\n jinja version = 3.0.2\r\n libyaml = True\r\n```\r\n\r\n##### COLLECTION VERSION\r\n```\r\n# /usr/lib/python3.9/site-packages/ansible_collections\r\nCollection Version\r\n---------------- -------\r\ncommunity.vmware 1.15.0\r\n```\r\n\r\n##### CONFIGURATION\r\n```\r\n\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\nVMware Photon OS 4.0 and vSphere 7.0U2, but this is irrelevant.\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\nTricky. As I've said, enabling / disabling lockdown usually works.\r\n\r\n##### EXPECTED RESULTS\r\nA failure.\r\n\r\n\r\n##### ACTUAL RESULTS\r\n```\r\nAn exception occurred during task execution. To see the full traceback, use -vvv. The error was: NameError: name 'vim' is not defined\r\n```\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_host_lockdown\nshort_description: Manage administrator permission for the local administrative account for the ESXi host\ndescription:\n- This module can be used to manage administrator permission for the local administrative account for the host when ESXi hostname is given.\n- All parameters and VMware objects values are case sensitive.\n- This module is destructive as administrator permission are managed using APIs used, please read options carefully and proceed.\n- Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\nnotes:\n- Tested on vSphere 6.5\nrequirements:\n- python >= 2.6\n- PyVmomi\noptions:\n cluster_name:\n description:\n - Name of cluster.\n - All host systems from given cluster used to manage lockdown.\n - Required parameter, if C(esxi_hostname) is not set.\n type: str\n esxi_hostname:\n description:\n - List of ESXi hostname to manage lockdown.\n - Required parameter, if C(cluster_name) is not set.\n - See examples for specifications.\n type: list\n elements: str\n state:\n description:\n - State of hosts system\n - If set to C(present), all host systems will be set in lockdown mode.\n - If host system is already in lockdown mode and set to C(present), no action will be taken.\n - If set to C(absent), all host systems will be removed from lockdown mode.\n - If host system is already out of lockdown mode and set to C(absent), no action will be taken.\n default: present\n choices: [ present, absent ]\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Enter host system into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n state: present\n delegate_to: localhost\n\n- name: Exit host systems from lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n state: absent\n delegate_to: localhost\n\n- name: Enter host systems into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname:\n - '{{ esxi_hostname_1 }}'\n - '{{ esxi_hostname_2 }}'\n state: present\n delegate_to: localhost\n\n- name: Exit host systems from lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname:\n - '{{ esxi_hostname_1 }}'\n - '{{ esxi_hostname_2 }}'\n state: absent\n delegate_to: localhost\n\n- name: Enter all host system from cluster into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n cluster_name: '{{ cluster_name }}'\n state: present\n delegate_to: localhost\n'''\n\nRETURN = r'''\nresults:\n description: metadata about state of Host system lock down\n returned: always\n type: dict\n sample: {\n \"host_lockdown_state\": {\n \"DC0_C0\": {\n \"current_state\": \"present\",\n \"previous_state\": \"absent\",\n \"desired_state\": \"present\",\n },\n }\n }\n'''\n\ntry:\n from pyvmomi import vim\nexcept ImportError:\n pass\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi\nfrom ansible.module_utils._text import to_native\n\n\nclass VmwareLockdownManager(PyVmomi):\n def __init__(self, module):\n super(VmwareLockdownManager, self).__init__(module)\n if not self.is_vcenter():\n self.module.fail_json(msg=\"Lockdown operations are performed from vCenter only. \"\n \"hostname %s is an ESXi server. Please specify hostname \"\n \"as vCenter server.\" % self.module.params['hostname'])\n cluster_name = self.params.get('cluster_name', None)\n esxi_host_name = self.params.get('esxi_hostname', None)\n self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)\n\n def ensure(self):\n \"\"\"\n Function to manage internal state management\n \"\"\"\n results = dict(changed=False, host_lockdown_state=dict())\n change_list = []\n desired_state = self.params.get('state')\n for host in self.hosts:\n results['host_lockdown_state'][host.name] = dict(current_state='',\n desired_state=desired_state,\n previous_state=''\n )\n changed = False\n try:\n if host.config.adminDisabled:\n results['host_lockdown_state'][host.name]['previous_state'] = 'present'\n if desired_state == 'absent':\n if not self.module.check_mode:\n host.ExitLockdownMode()\n results['host_lockdown_state'][host.name]['current_state'] = 'absent'\n changed = True\n else:\n results['host_lockdown_state'][host.name]['current_state'] = 'present'\n elif not host.config.adminDisabled:\n results['host_lockdown_state'][host.name]['previous_state'] = 'absent'\n if desired_state == 'present':\n if not self.module.check_mode:\n host.EnterLockdownMode()\n results['host_lockdown_state'][host.name]['current_state'] = 'present'\n changed = True\n else:\n results['host_lockdown_state'][host.name]['current_state'] = 'absent'\n except vim.fault.HostConfigFault as host_config_fault:\n self.module.fail_json(msg=\"Failed to manage lockdown mode for esxi\"\n \" hostname %s : %s\" % (host.name, to_native(host_config_fault.msg)))\n except vim.fault.AdminDisabled as admin_disabled:\n self.module.fail_json(msg=\"Failed to manage lockdown mode as administrator \"\n \"permission has been disabled for \"\n \"esxi hostname %s : %s\" % (host.name, to_native(admin_disabled.msg)))\n except Exception as generic_exception:\n self.module.fail_json(msg=\"Failed to manage lockdown mode due to generic exception for esxi \"\n \"hostname %s : %s\" % (host.name, to_native(generic_exception)))\n change_list.append(changed)\n\n if any(change_list):\n results['changed'] = True\n\n self.module.exit_json(**results)\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n cluster_name=dict(type='str', required=False),\n esxi_hostname=dict(type='list', required=False, elements='str'),\n state=dict(type='str', default='present', choices=['present', 'absent'], required=False),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_one_of=[\n ['cluster_name', 'esxi_hostname'],\n ]\n )\n\n vmware_lockdown_mgr = VmwareLockdownManager(module)\n vmware_lockdown_mgr.ensure()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/vmware_host_lockdown.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = r'''\n---\nmodule: vmware_host_lockdown\nshort_description: Manage administrator permission for the local administrative account for the ESXi host\ndescription:\n- This module can be used to manage administrator permission for the local administrative account for the host when ESXi hostname is given.\n- All parameters and VMware objects values are case sensitive.\n- This module is destructive as administrator permission are managed using APIs used, please read options carefully and proceed.\n- Please specify C(hostname) as vCenter IP or hostname only, as lockdown operations are not possible from standalone ESXi server.\nauthor:\n- Abhijeet Kasurde (@Akasurde)\nnotes:\n- Tested on vSphere 6.5\nrequirements:\n- python >= 2.6\n- PyVmomi\noptions:\n cluster_name:\n description:\n - Name of cluster.\n - All host systems from given cluster used to manage lockdown.\n - Required parameter, if C(esxi_hostname) is not set.\n type: str\n esxi_hostname:\n description:\n - List of ESXi hostname to manage lockdown.\n - Required parameter, if C(cluster_name) is not set.\n - See examples for specifications.\n type: list\n elements: str\n state:\n description:\n - State of hosts system\n - If set to C(present), all host systems will be set in lockdown mode.\n - If host system is already in lockdown mode and set to C(present), no action will be taken.\n - If set to C(absent), all host systems will be removed from lockdown mode.\n - If host system is already out of lockdown mode and set to C(absent), no action will be taken.\n default: present\n choices: [ present, absent ]\n type: str\nextends_documentation_fragment:\n- community.vmware.vmware.documentation\n\n'''\n\nEXAMPLES = r'''\n- name: Enter host system into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n state: present\n delegate_to: localhost\n\n- name: Exit host systems from lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname: '{{ esxi_hostname }}'\n state: absent\n delegate_to: localhost\n\n- name: Enter host systems into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname:\n - '{{ esxi_hostname_1 }}'\n - '{{ esxi_hostname_2 }}'\n state: present\n delegate_to: localhost\n\n- name: Exit host systems from lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n esxi_hostname:\n - '{{ esxi_hostname_1 }}'\n - '{{ esxi_hostname_2 }}'\n state: absent\n delegate_to: localhost\n\n- name: Enter all host system from cluster into lockdown mode\n community.vmware.vmware_host_lockdown:\n hostname: '{{ vcenter_hostname }}'\n username: '{{ vcenter_username }}'\n password: '{{ vcenter_password }}'\n cluster_name: '{{ cluster_name }}'\n state: present\n delegate_to: localhost\n'''\n\nRETURN = r'''\nresults:\n description: metadata about state of Host system lock down\n returned: always\n type: dict\n sample: {\n \"host_lockdown_state\": {\n \"DC0_C0\": {\n \"current_state\": \"present\",\n \"previous_state\": \"absent\",\n \"desired_state\": \"present\",\n },\n }\n }\n'''\n\ntry:\n from pyVmomi import vim\n HAS_PYVMOMI = True\nexcept ImportError:\n HAS_PYVMOMI = False\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.community.vmware.plugins.module_utils.vmware import vmware_argument_spec, PyVmomi\nfrom ansible.module_utils._text import to_native\n\n\nclass VmwareLockdownManager(PyVmomi):\n def __init__(self, module):\n super(VmwareLockdownManager, self).__init__(module)\n if not self.is_vcenter():\n self.module.fail_json(msg=\"Lockdown operations are performed from vCenter only. \"\n \"hostname %s is an ESXi server. Please specify hostname \"\n \"as vCenter server.\" % self.module.params['hostname'])\n cluster_name = self.params.get('cluster_name', None)\n esxi_host_name = self.params.get('esxi_hostname', None)\n self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)\n\n def ensure(self):\n \"\"\"\n Function to manage internal state management\n \"\"\"\n results = dict(changed=False, host_lockdown_state=dict())\n change_list = []\n desired_state = self.params.get('state')\n for host in self.hosts:\n results['host_lockdown_state'][host.name] = dict(current_state='',\n desired_state=desired_state,\n previous_state=''\n )\n changed = False\n try:\n if host.config.adminDisabled:\n results['host_lockdown_state'][host.name]['previous_state'] = 'present'\n if desired_state == 'absent':\n if not self.module.check_mode:\n host.ExitLockdownMode()\n results['host_lockdown_state'][host.name]['current_state'] = 'absent'\n changed = True\n else:\n results['host_lockdown_state'][host.name]['current_state'] = 'present'\n elif not host.config.adminDisabled:\n results['host_lockdown_state'][host.name]['previous_state'] = 'absent'\n if desired_state == 'present':\n if not self.module.check_mode:\n host.EnterLockdownMode()\n results['host_lockdown_state'][host.name]['current_state'] = 'present'\n changed = True\n else:\n results['host_lockdown_state'][host.name]['current_state'] = 'absent'\n except vim.fault.HostConfigFault as host_config_fault:\n self.module.fail_json(msg=\"Failed to manage lockdown mode for esxi\"\n \" hostname %s : %s\" % (host.name, to_native(host_config_fault.msg)))\n except vim.fault.AdminDisabled as admin_disabled:\n self.module.fail_json(msg=\"Failed to manage lockdown mode as administrator \"\n \"permission has been disabled for \"\n \"esxi hostname %s : %s\" % (host.name, to_native(admin_disabled.msg)))\n except Exception as generic_exception:\n self.module.fail_json(msg=\"Failed to manage lockdown mode due to generic exception for esxi \"\n \"hostname %s : %s\" % (host.name, to_native(generic_exception)))\n change_list.append(changed)\n\n if any(change_list):\n results['changed'] = True\n\n self.module.exit_json(**results)\n\n\ndef main():\n argument_spec = vmware_argument_spec()\n argument_spec.update(\n cluster_name=dict(type='str', required=False),\n esxi_hostname=dict(type='list', required=False, elements='str'),\n state=dict(type='str', default='present', choices=['present', 'absent'], required=False),\n )\n\n module = AnsibleModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_one_of=[\n ['cluster_name', 'esxi_hostname'],\n ]\n )\n\n if not HAS_PYVMOMI:\n module.fail_json(msg='pyvmomi required for this module')\n\n vmware_lockdown_mgr = VmwareLockdownManager(module)\n vmware_lockdown_mgr.ensure()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "plugins/modules/vmware_host_lockdown.py"}]}
3,139
211
gh_patches_debug_1575
rasdani/github-patches
git_diff
wagtail__wagtail-6086
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Site.hostname should be lowercase to prevent duplicates ### Issue Summary Wagtail `Site.hostname` accepts duplicate hostnames. It possible to create `foo.com:80` and `Foo.com:80` but these are technically the same thus duplicates. Site.hostname is case sensitive field. Hostnames and domain names are a case insensitive. https://tools.ietf.org/html/rfc4343 `foo.com` and `Foo.com` should be treated as the same value and raise a validation error. ### Steps to Reproduce 1. Start a new project with `wagtail start myproject` 2. Go to Settings > Sites 3. Add site "foo.com" port 80 4. Add site "Foo.com" port 80 I expect a validation error: `Site with this Hostname and Port already exists.` I added a clean method on wagtail.core.models.Site that makes the hostname lowercase on save. ``` def clean(self): self.hostname = self.hostname.lower() ``` The form raises an error now, but the error isn't displayed in the templates! 🐞 wagtail/admin/templates/wagtailadmin/generic/create.html wagtail/admin/templates/wagtailadmin/generic/edit.html These templates need `{{ form.non_field_errors }}`. <img width="750" alt="Screenshot 2020-05-29 at 01 34 54" src="https://user-images.githubusercontent.com/1969342/83204661-a7060f00-a14c-11ea-8152-8568c0acef83.png"> * I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/models/sites.py` Content: ``` 1 from collections import namedtuple 2 3 from django.apps import apps 4 from django.conf import settings 5 from django.core.cache import cache 6 from django.core.exceptions import ValidationError 7 from django.db import models 8 from django.db.models import Case, IntegerField, Q, When 9 from django.db.models.functions import Lower 10 from django.http.request import split_domain_port 11 from django.utils.translation import gettext_lazy as _ 12 13 MATCH_HOSTNAME_PORT = 0 14 MATCH_HOSTNAME_DEFAULT = 1 15 MATCH_DEFAULT = 2 16 MATCH_HOSTNAME = 3 17 18 19 def get_site_for_hostname(hostname, port): 20 """Return the wagtailcore.Site object for the given hostname and port.""" 21 Site = apps.get_model("wagtailcore.Site") 22 23 sites = list( 24 Site.objects.annotate( 25 match=Case( 26 # annotate the results by best choice descending 27 # put exact hostname+port match first 28 When(hostname=hostname, port=port, then=MATCH_HOSTNAME_PORT), 29 # then put hostname+default (better than just hostname or just default) 30 When( 31 hostname=hostname, is_default_site=True, then=MATCH_HOSTNAME_DEFAULT 32 ), 33 # then match default with different hostname. there is only ever 34 # one default, so order it above (possibly multiple) hostname 35 # matches so we can use sites[0] below to access it 36 When(is_default_site=True, then=MATCH_DEFAULT), 37 # because of the filter below, if it's not default then its a hostname match 38 default=MATCH_HOSTNAME, 39 output_field=IntegerField(), 40 ) 41 ) 42 .filter(Q(hostname=hostname) | Q(is_default_site=True)) 43 .order_by("match") 44 .select_related("root_page") 45 ) 46 47 if sites: 48 # if there's a unique match or hostname (with port or default) match 49 if len(sites) == 1 or sites[0].match in ( 50 MATCH_HOSTNAME_PORT, 51 MATCH_HOSTNAME_DEFAULT, 52 ): 53 return sites[0] 54 55 # if there is a default match with a different hostname, see if 56 # there are many hostname matches. if only 1 then use that instead 57 # otherwise we use the default 58 if sites[0].match == MATCH_DEFAULT: 59 return sites[len(sites) == 2] 60 61 raise Site.DoesNotExist() 62 63 64 class SiteManager(models.Manager): 65 def get_queryset(self): 66 return super(SiteManager, self).get_queryset().order_by(Lower("hostname")) 67 68 def get_by_natural_key(self, hostname, port): 69 return self.get(hostname=hostname, port=port) 70 71 72 SiteRootPath = namedtuple("SiteRootPath", "site_id root_path root_url language_code") 73 74 SITE_ROOT_PATHS_CACHE_KEY = "wagtail_site_root_paths" 75 # Increase the cache version whenever the structure SiteRootPath tuple changes 76 SITE_ROOT_PATHS_CACHE_VERSION = 2 77 78 79 class Site(models.Model): 80 hostname = models.CharField( 81 verbose_name=_("hostname"), max_length=255, db_index=True 82 ) 83 port = models.IntegerField( 84 verbose_name=_("port"), 85 default=80, 86 help_text=_( 87 "Set this to something other than 80 if you need a specific port number to appear in URLs" 88 " (e.g. development on port 8000). Does not affect request handling (so port forwarding still works)." 89 ), 90 ) 91 site_name = models.CharField( 92 verbose_name=_("site name"), 93 max_length=255, 94 blank=True, 95 help_text=_("Human-readable name for the site."), 96 ) 97 root_page = models.ForeignKey( 98 "Page", 99 verbose_name=_("root page"), 100 related_name="sites_rooted_here", 101 on_delete=models.CASCADE, 102 ) 103 is_default_site = models.BooleanField( 104 verbose_name=_("is default site"), 105 default=False, 106 help_text=_( 107 "If true, this site will handle requests for all other hostnames that do not have a site entry of their own" 108 ), 109 ) 110 111 objects = SiteManager() 112 113 class Meta: 114 unique_together = ("hostname", "port") 115 verbose_name = _("site") 116 verbose_name_plural = _("sites") 117 118 def natural_key(self): 119 return (self.hostname, self.port) 120 121 def __str__(self): 122 default_suffix = " [{}]".format(_("default")) 123 if self.site_name: 124 return self.site_name + (default_suffix if self.is_default_site else "") 125 else: 126 return ( 127 self.hostname 128 + ("" if self.port == 80 else (":%d" % self.port)) 129 + (default_suffix if self.is_default_site else "") 130 ) 131 132 @staticmethod 133 def find_for_request(request): 134 """ 135 Find the site object responsible for responding to this HTTP 136 request object. Try: 137 138 * unique hostname first 139 * then hostname and port 140 * if there is no matching hostname at all, or no matching 141 hostname:port combination, fall back to the unique default site, 142 or raise an exception 143 144 NB this means that high-numbered ports on an extant hostname may 145 still be routed to a different hostname which is set as the default 146 147 The site will be cached via request._wagtail_site 148 """ 149 150 if request is None: 151 return None 152 153 if not hasattr(request, "_wagtail_site"): 154 site = Site._find_for_request(request) 155 setattr(request, "_wagtail_site", site) 156 return request._wagtail_site 157 158 @staticmethod 159 def _find_for_request(request): 160 hostname = split_domain_port(request.get_host())[0] 161 port = request.get_port() 162 site = None 163 try: 164 site = get_site_for_hostname(hostname, port) 165 except Site.DoesNotExist: 166 pass 167 # copy old SiteMiddleware behaviour 168 return site 169 170 @property 171 def root_url(self): 172 if self.port == 80: 173 return "http://%s" % self.hostname 174 elif self.port == 443: 175 return "https://%s" % self.hostname 176 else: 177 return "http://%s:%d" % (self.hostname, self.port) 178 179 def clean_fields(self, exclude=None): 180 super().clean_fields(exclude) 181 # Only one site can have the is_default_site flag set 182 try: 183 default = Site.objects.get(is_default_site=True) 184 except Site.DoesNotExist: 185 pass 186 except Site.MultipleObjectsReturned: 187 raise 188 else: 189 if self.is_default_site and self.pk != default.pk: 190 raise ValidationError( 191 { 192 "is_default_site": [ 193 _( 194 "%(hostname)s is already configured as the default site." 195 " You must unset that before you can save this site as default." 196 ) 197 % {"hostname": default.hostname} 198 ] 199 } 200 ) 201 202 @staticmethod 203 def get_site_root_paths(): 204 """ 205 Return a list of `SiteRootPath` instances, most specific path 206 first - used to translate url_paths into actual URLs with hostnames 207 208 Each root path is an instance of the `SiteRootPath` named tuple, 209 and have the following attributes: 210 211 - `site_id` - The ID of the Site record 212 - `root_path` - The internal URL path of the site's home page (for example '/home/') 213 - `root_url` - The scheme/domain name of the site (for example 'https://www.example.com/') 214 - `language_code` - The language code of the site (for example 'en') 215 """ 216 result = cache.get( 217 SITE_ROOT_PATHS_CACHE_KEY, version=SITE_ROOT_PATHS_CACHE_VERSION 218 ) 219 220 if result is None: 221 result = [] 222 223 for site in Site.objects.select_related( 224 "root_page", "root_page__locale" 225 ).order_by("-root_page__url_path", "-is_default_site", "hostname"): 226 if getattr(settings, "WAGTAIL_I18N_ENABLED", False): 227 result.extend( 228 [ 229 SiteRootPath( 230 site.id, 231 root_page.url_path, 232 site.root_url, 233 root_page.locale.language_code, 234 ) 235 for root_page in site.root_page.get_translations( 236 inclusive=True 237 ).select_related("locale") 238 ] 239 ) 240 else: 241 result.append( 242 SiteRootPath( 243 site.id, 244 site.root_page.url_path, 245 site.root_url, 246 site.root_page.locale.language_code, 247 ) 248 ) 249 250 cache.set( 251 SITE_ROOT_PATHS_CACHE_KEY, 252 result, 253 3600, 254 version=SITE_ROOT_PATHS_CACHE_VERSION, 255 ) 256 257 else: 258 # Convert the cache result to a list of SiteRootPath tuples, as some 259 # cache backends (e.g. Redis) don't support named tuples. 260 result = [SiteRootPath(*result) for result in result] 261 262 return result 263 264 @staticmethod 265 def clear_site_root_paths_cache(): 266 cache.delete(SITE_ROOT_PATHS_CACHE_KEY, version=SITE_ROOT_PATHS_CACHE_VERSION) 267 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/models/sites.py b/wagtail/models/sites.py --- a/wagtail/models/sites.py +++ b/wagtail/models/sites.py @@ -129,6 +129,9 @@ + (default_suffix if self.is_default_site else "") ) + def clean(self): + self.hostname = self.hostname.lower() + @staticmethod def find_for_request(request): """
{"golden_diff": "diff --git a/wagtail/models/sites.py b/wagtail/models/sites.py\n--- a/wagtail/models/sites.py\n+++ b/wagtail/models/sites.py\n@@ -129,6 +129,9 @@\n + (default_suffix if self.is_default_site else \"\")\n )\n \n+ def clean(self):\n+ self.hostname = self.hostname.lower()\n+\n @staticmethod\n def find_for_request(request):\n \"\"\"\n", "issue": "Site.hostname should be lowercase to prevent duplicates\n### Issue Summary\r\n\r\nWagtail `Site.hostname` accepts duplicate hostnames. \r\nIt possible to create `foo.com:80` and `Foo.com:80` but these are technically the same thus duplicates.\r\n\r\nSite.hostname is case sensitive field. Hostnames and domain names are a case insensitive. https://tools.ietf.org/html/rfc4343 \r\n\r\n`foo.com` and `Foo.com` should be treated as the same value and raise a validation error.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Start a new project with `wagtail start myproject`\r\n2. Go to Settings > Sites\r\n3. Add site \"foo.com\" port 80\r\n4. Add site \"Foo.com\" port 80\r\n\r\nI expect a validation error: `Site with this Hostname and Port already exists.`\r\n\r\nI added a clean method on wagtail.core.models.Site that makes the hostname lowercase on save.\r\n```\r\n def clean(self):\r\n self.hostname = self.hostname.lower()\r\n```\r\n\r\nThe form raises an error now, but the error isn't displayed in the templates! \ud83d\udc1e\r\nwagtail/admin/templates/wagtailadmin/generic/create.html\r\nwagtail/admin/templates/wagtailadmin/generic/edit.html\r\n\r\nThese templates need `{{ form.non_field_errors }}`.\r\n\r\n<img width=\"750\" alt=\"Screenshot 2020-05-29 at 01 34 54\" src=\"https://user-images.githubusercontent.com/1969342/83204661-a7060f00-a14c-11ea-8152-8568c0acef83.png\">\r\n\r\n* I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\n", "before_files": [{"content": "from collections import namedtuple\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Case, IntegerField, Q, When\nfrom django.db.models.functions import Lower\nfrom django.http.request import split_domain_port\nfrom django.utils.translation import gettext_lazy as _\n\nMATCH_HOSTNAME_PORT = 0\nMATCH_HOSTNAME_DEFAULT = 1\nMATCH_DEFAULT = 2\nMATCH_HOSTNAME = 3\n\n\ndef get_site_for_hostname(hostname, port):\n \"\"\"Return the wagtailcore.Site object for the given hostname and port.\"\"\"\n Site = apps.get_model(\"wagtailcore.Site\")\n\n sites = list(\n Site.objects.annotate(\n match=Case(\n # annotate the results by best choice descending\n # put exact hostname+port match first\n When(hostname=hostname, port=port, then=MATCH_HOSTNAME_PORT),\n # then put hostname+default (better than just hostname or just default)\n When(\n hostname=hostname, is_default_site=True, then=MATCH_HOSTNAME_DEFAULT\n ),\n # then match default with different hostname. there is only ever\n # one default, so order it above (possibly multiple) hostname\n # matches so we can use sites[0] below to access it\n When(is_default_site=True, then=MATCH_DEFAULT),\n # because of the filter below, if it's not default then its a hostname match\n default=MATCH_HOSTNAME,\n output_field=IntegerField(),\n )\n )\n .filter(Q(hostname=hostname) | Q(is_default_site=True))\n .order_by(\"match\")\n .select_related(\"root_page\")\n )\n\n if sites:\n # if there's a unique match or hostname (with port or default) match\n if len(sites) == 1 or sites[0].match in (\n MATCH_HOSTNAME_PORT,\n MATCH_HOSTNAME_DEFAULT,\n ):\n return sites[0]\n\n # if there is a default match with a different hostname, see if\n # there are many hostname matches. if only 1 then use that instead\n # otherwise we use the default\n if sites[0].match == MATCH_DEFAULT:\n return sites[len(sites) == 2]\n\n raise Site.DoesNotExist()\n\n\nclass SiteManager(models.Manager):\n def get_queryset(self):\n return super(SiteManager, self).get_queryset().order_by(Lower(\"hostname\"))\n\n def get_by_natural_key(self, hostname, port):\n return self.get(hostname=hostname, port=port)\n\n\nSiteRootPath = namedtuple(\"SiteRootPath\", \"site_id root_path root_url language_code\")\n\nSITE_ROOT_PATHS_CACHE_KEY = \"wagtail_site_root_paths\"\n# Increase the cache version whenever the structure SiteRootPath tuple changes\nSITE_ROOT_PATHS_CACHE_VERSION = 2\n\n\nclass Site(models.Model):\n hostname = models.CharField(\n verbose_name=_(\"hostname\"), max_length=255, db_index=True\n )\n port = models.IntegerField(\n verbose_name=_(\"port\"),\n default=80,\n help_text=_(\n \"Set this to something other than 80 if you need a specific port number to appear in URLs\"\n \" (e.g. development on port 8000). Does not affect request handling (so port forwarding still works).\"\n ),\n )\n site_name = models.CharField(\n verbose_name=_(\"site name\"),\n max_length=255,\n blank=True,\n help_text=_(\"Human-readable name for the site.\"),\n )\n root_page = models.ForeignKey(\n \"Page\",\n verbose_name=_(\"root page\"),\n related_name=\"sites_rooted_here\",\n on_delete=models.CASCADE,\n )\n is_default_site = models.BooleanField(\n verbose_name=_(\"is default site\"),\n default=False,\n help_text=_(\n \"If true, this site will handle requests for all other hostnames that do not have a site entry of their own\"\n ),\n )\n\n objects = SiteManager()\n\n class Meta:\n unique_together = (\"hostname\", \"port\")\n verbose_name = _(\"site\")\n verbose_name_plural = _(\"sites\")\n\n def natural_key(self):\n return (self.hostname, self.port)\n\n def __str__(self):\n default_suffix = \" [{}]\".format(_(\"default\"))\n if self.site_name:\n return self.site_name + (default_suffix if self.is_default_site else \"\")\n else:\n return (\n self.hostname\n + (\"\" if self.port == 80 else (\":%d\" % self.port))\n + (default_suffix if self.is_default_site else \"\")\n )\n\n @staticmethod\n def find_for_request(request):\n \"\"\"\n Find the site object responsible for responding to this HTTP\n request object. Try:\n\n * unique hostname first\n * then hostname and port\n * if there is no matching hostname at all, or no matching\n hostname:port combination, fall back to the unique default site,\n or raise an exception\n\n NB this means that high-numbered ports on an extant hostname may\n still be routed to a different hostname which is set as the default\n\n The site will be cached via request._wagtail_site\n \"\"\"\n\n if request is None:\n return None\n\n if not hasattr(request, \"_wagtail_site\"):\n site = Site._find_for_request(request)\n setattr(request, \"_wagtail_site\", site)\n return request._wagtail_site\n\n @staticmethod\n def _find_for_request(request):\n hostname = split_domain_port(request.get_host())[0]\n port = request.get_port()\n site = None\n try:\n site = get_site_for_hostname(hostname, port)\n except Site.DoesNotExist:\n pass\n # copy old SiteMiddleware behaviour\n return site\n\n @property\n def root_url(self):\n if self.port == 80:\n return \"http://%s\" % self.hostname\n elif self.port == 443:\n return \"https://%s\" % self.hostname\n else:\n return \"http://%s:%d\" % (self.hostname, self.port)\n\n def clean_fields(self, exclude=None):\n super().clean_fields(exclude)\n # Only one site can have the is_default_site flag set\n try:\n default = Site.objects.get(is_default_site=True)\n except Site.DoesNotExist:\n pass\n except Site.MultipleObjectsReturned:\n raise\n else:\n if self.is_default_site and self.pk != default.pk:\n raise ValidationError(\n {\n \"is_default_site\": [\n _(\n \"%(hostname)s is already configured as the default site.\"\n \" You must unset that before you can save this site as default.\"\n )\n % {\"hostname\": default.hostname}\n ]\n }\n )\n\n @staticmethod\n def get_site_root_paths():\n \"\"\"\n Return a list of `SiteRootPath` instances, most specific path\n first - used to translate url_paths into actual URLs with hostnames\n\n Each root path is an instance of the `SiteRootPath` named tuple,\n and have the following attributes:\n\n - `site_id` - The ID of the Site record\n - `root_path` - The internal URL path of the site's home page (for example '/home/')\n - `root_url` - The scheme/domain name of the site (for example 'https://www.example.com/')\n - `language_code` - The language code of the site (for example 'en')\n \"\"\"\n result = cache.get(\n SITE_ROOT_PATHS_CACHE_KEY, version=SITE_ROOT_PATHS_CACHE_VERSION\n )\n\n if result is None:\n result = []\n\n for site in Site.objects.select_related(\n \"root_page\", \"root_page__locale\"\n ).order_by(\"-root_page__url_path\", \"-is_default_site\", \"hostname\"):\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n result.extend(\n [\n SiteRootPath(\n site.id,\n root_page.url_path,\n site.root_url,\n root_page.locale.language_code,\n )\n for root_page in site.root_page.get_translations(\n inclusive=True\n ).select_related(\"locale\")\n ]\n )\n else:\n result.append(\n SiteRootPath(\n site.id,\n site.root_page.url_path,\n site.root_url,\n site.root_page.locale.language_code,\n )\n )\n\n cache.set(\n SITE_ROOT_PATHS_CACHE_KEY,\n result,\n 3600,\n version=SITE_ROOT_PATHS_CACHE_VERSION,\n )\n\n else:\n # Convert the cache result to a list of SiteRootPath tuples, as some\n # cache backends (e.g. Redis) don't support named tuples.\n result = [SiteRootPath(*result) for result in result]\n\n return result\n\n @staticmethod\n def clear_site_root_paths_cache():\n cache.delete(SITE_ROOT_PATHS_CACHE_KEY, version=SITE_ROOT_PATHS_CACHE_VERSION)\n", "path": "wagtail/models/sites.py"}], "after_files": [{"content": "from collections import namedtuple\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.cache import cache\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.db.models import Case, IntegerField, Q, When\nfrom django.db.models.functions import Lower\nfrom django.http.request import split_domain_port\nfrom django.utils.translation import gettext_lazy as _\n\nMATCH_HOSTNAME_PORT = 0\nMATCH_HOSTNAME_DEFAULT = 1\nMATCH_DEFAULT = 2\nMATCH_HOSTNAME = 3\n\n\ndef get_site_for_hostname(hostname, port):\n \"\"\"Return the wagtailcore.Site object for the given hostname and port.\"\"\"\n Site = apps.get_model(\"wagtailcore.Site\")\n\n sites = list(\n Site.objects.annotate(\n match=Case(\n # annotate the results by best choice descending\n # put exact hostname+port match first\n When(hostname=hostname, port=port, then=MATCH_HOSTNAME_PORT),\n # then put hostname+default (better than just hostname or just default)\n When(\n hostname=hostname, is_default_site=True, then=MATCH_HOSTNAME_DEFAULT\n ),\n # then match default with different hostname. there is only ever\n # one default, so order it above (possibly multiple) hostname\n # matches so we can use sites[0] below to access it\n When(is_default_site=True, then=MATCH_DEFAULT),\n # because of the filter below, if it's not default then its a hostname match\n default=MATCH_HOSTNAME,\n output_field=IntegerField(),\n )\n )\n .filter(Q(hostname=hostname) | Q(is_default_site=True))\n .order_by(\"match\")\n .select_related(\"root_page\")\n )\n\n if sites:\n # if there's a unique match or hostname (with port or default) match\n if len(sites) == 1 or sites[0].match in (\n MATCH_HOSTNAME_PORT,\n MATCH_HOSTNAME_DEFAULT,\n ):\n return sites[0]\n\n # if there is a default match with a different hostname, see if\n # there are many hostname matches. if only 1 then use that instead\n # otherwise we use the default\n if sites[0].match == MATCH_DEFAULT:\n return sites[len(sites) == 2]\n\n raise Site.DoesNotExist()\n\n\nclass SiteManager(models.Manager):\n def get_queryset(self):\n return super(SiteManager, self).get_queryset().order_by(Lower(\"hostname\"))\n\n def get_by_natural_key(self, hostname, port):\n return self.get(hostname=hostname, port=port)\n\n\nSiteRootPath = namedtuple(\"SiteRootPath\", \"site_id root_path root_url language_code\")\n\nSITE_ROOT_PATHS_CACHE_KEY = \"wagtail_site_root_paths\"\n# Increase the cache version whenever the structure SiteRootPath tuple changes\nSITE_ROOT_PATHS_CACHE_VERSION = 2\n\n\nclass Site(models.Model):\n hostname = models.CharField(\n verbose_name=_(\"hostname\"), max_length=255, db_index=True\n )\n port = models.IntegerField(\n verbose_name=_(\"port\"),\n default=80,\n help_text=_(\n \"Set this to something other than 80 if you need a specific port number to appear in URLs\"\n \" (e.g. development on port 8000). Does not affect request handling (so port forwarding still works).\"\n ),\n )\n site_name = models.CharField(\n verbose_name=_(\"site name\"),\n max_length=255,\n blank=True,\n help_text=_(\"Human-readable name for the site.\"),\n )\n root_page = models.ForeignKey(\n \"Page\",\n verbose_name=_(\"root page\"),\n related_name=\"sites_rooted_here\",\n on_delete=models.CASCADE,\n )\n is_default_site = models.BooleanField(\n verbose_name=_(\"is default site\"),\n default=False,\n help_text=_(\n \"If true, this site will handle requests for all other hostnames that do not have a site entry of their own\"\n ),\n )\n\n objects = SiteManager()\n\n class Meta:\n unique_together = (\"hostname\", \"port\")\n verbose_name = _(\"site\")\n verbose_name_plural = _(\"sites\")\n\n def natural_key(self):\n return (self.hostname, self.port)\n\n def __str__(self):\n default_suffix = \" [{}]\".format(_(\"default\"))\n if self.site_name:\n return self.site_name + (default_suffix if self.is_default_site else \"\")\n else:\n return (\n self.hostname\n + (\"\" if self.port == 80 else (\":%d\" % self.port))\n + (default_suffix if self.is_default_site else \"\")\n )\n\n def clean(self):\n self.hostname = self.hostname.lower()\n\n @staticmethod\n def find_for_request(request):\n \"\"\"\n Find the site object responsible for responding to this HTTP\n request object. Try:\n\n * unique hostname first\n * then hostname and port\n * if there is no matching hostname at all, or no matching\n hostname:port combination, fall back to the unique default site,\n or raise an exception\n\n NB this means that high-numbered ports on an extant hostname may\n still be routed to a different hostname which is set as the default\n\n The site will be cached via request._wagtail_site\n \"\"\"\n\n if request is None:\n return None\n\n if not hasattr(request, \"_wagtail_site\"):\n site = Site._find_for_request(request)\n setattr(request, \"_wagtail_site\", site)\n return request._wagtail_site\n\n @staticmethod\n def _find_for_request(request):\n hostname = split_domain_port(request.get_host())[0]\n port = request.get_port()\n site = None\n try:\n site = get_site_for_hostname(hostname, port)\n except Site.DoesNotExist:\n pass\n # copy old SiteMiddleware behaviour\n return site\n\n @property\n def root_url(self):\n if self.port == 80:\n return \"http://%s\" % self.hostname\n elif self.port == 443:\n return \"https://%s\" % self.hostname\n else:\n return \"http://%s:%d\" % (self.hostname, self.port)\n\n def clean_fields(self, exclude=None):\n super().clean_fields(exclude)\n # Only one site can have the is_default_site flag set\n try:\n default = Site.objects.get(is_default_site=True)\n except Site.DoesNotExist:\n pass\n except Site.MultipleObjectsReturned:\n raise\n else:\n if self.is_default_site and self.pk != default.pk:\n raise ValidationError(\n {\n \"is_default_site\": [\n _(\n \"%(hostname)s is already configured as the default site.\"\n \" You must unset that before you can save this site as default.\"\n )\n % {\"hostname\": default.hostname}\n ]\n }\n )\n\n @staticmethod\n def get_site_root_paths():\n \"\"\"\n Return a list of `SiteRootPath` instances, most specific path\n first - used to translate url_paths into actual URLs with hostnames\n\n Each root path is an instance of the `SiteRootPath` named tuple,\n and have the following attributes:\n\n - `site_id` - The ID of the Site record\n - `root_path` - The internal URL path of the site's home page (for example '/home/')\n - `root_url` - The scheme/domain name of the site (for example 'https://www.example.com/')\n - `language_code` - The language code of the site (for example 'en')\n \"\"\"\n result = cache.get(\n SITE_ROOT_PATHS_CACHE_KEY, version=SITE_ROOT_PATHS_CACHE_VERSION\n )\n\n if result is None:\n result = []\n\n for site in Site.objects.select_related(\n \"root_page\", \"root_page__locale\"\n ).order_by(\"-root_page__url_path\", \"-is_default_site\", \"hostname\"):\n if getattr(settings, \"WAGTAIL_I18N_ENABLED\", False):\n result.extend(\n [\n SiteRootPath(\n site.id,\n root_page.url_path,\n site.root_url,\n root_page.locale.language_code,\n )\n for root_page in site.root_page.get_translations(\n inclusive=True\n ).select_related(\"locale\")\n ]\n )\n else:\n result.append(\n SiteRootPath(\n site.id,\n site.root_page.url_path,\n site.root_url,\n site.root_page.locale.language_code,\n )\n )\n\n cache.set(\n SITE_ROOT_PATHS_CACHE_KEY,\n result,\n 3600,\n version=SITE_ROOT_PATHS_CACHE_VERSION,\n )\n\n else:\n # Convert the cache result to a list of SiteRootPath tuples, as some\n # cache backends (e.g. Redis) don't support named tuples.\n result = [SiteRootPath(*result) for result in result]\n\n return result\n\n @staticmethod\n def clear_site_root_paths_cache():\n cache.delete(SITE_ROOT_PATHS_CACHE_KEY, version=SITE_ROOT_PATHS_CACHE_VERSION)\n", "path": "wagtail/models/sites.py"}]}
3,281
94
gh_patches_debug_49770
rasdani/github-patches
git_diff
getsentry__sentry-17425
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Event migration 9.1.2 -> 10 <!-- Do you want to ask a question? Are you looking for support? The Sentry message board is the best place for getting support: https://forum.sentry.io --> ## Important Details How are you running Sentry? * [X] On-Premise docker [Version 9.1.2] * [ ] Saas (sentry.io) * [ ] Other [briefly describe your environment] ## Description I followed the migration guide, alongside all fixes and workaround and managed to get to the actual migration routine. Sentry tries to process all existing postgres events but fails to (for every event): ``` An error occured while trying to instert the following event: <sentry.eventstore.models.Event object at 0x7f2f08e552d0> .---- insert() takes at least 8 arguments (8 given) [...] Event migration done. Migrated 0 of 197988 events. ``` ## Steps to Reproduce 1. Have a 9.1.2 onpremise setup and have event data 2. Upgrade to 10 (dev-master), run `install.sh` etc. ### What you expected to happen Migration scripts succeeds and I have all event data in the new version. ### Possible Solution Error message suggests a syntax error? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/migrations/0024_auto_20191230_2052.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Generated by Django 1.9.13 on 2019-12-30 20:52 3 from __future__ import unicode_literals, print_function 4 5 import os 6 import types 7 from datetime import timedelta, datetime 8 9 from django.db import migrations 10 from django.utils import timezone 11 12 from sentry import options 13 from sentry.eventstore.models import Event as NewEvent 14 15 16 def backfill_eventstream(apps, schema_editor): 17 """ 18 Inserts Postgres events into the eventstream if there are recent events in Postgres. 19 20 This is for open source users migrating from 9.x who want to keep their events. 21 If there are no recent events in Postgres, skip the backfill. 22 """ 23 from sentry import eventstore, eventstream 24 from sentry.utils.query import RangeQuerySetWrapper 25 26 Event = apps.get_model("sentry", "Event") 27 Group = apps.get_model("sentry", "Group") 28 Project = apps.get_model("sentry", "Project") 29 30 # Kill switch to skip this migration 31 skip_backfill = os.environ.get("SENTRY_SKIP_EVENTS_BACKFILL_FOR_10", False) 32 33 # Use 90 day retention if the option has not been set or set to 0 34 DEFAULT_RETENTION = 90 35 retention_days = options.get("system.event-retention-days") or DEFAULT_RETENTION 36 37 def get_events(last_days): 38 to_date = timezone.now() 39 from_date = to_date - timedelta(days=last_days) 40 return Event.objects.filter( 41 datetime__gte=from_date, datetime__lte=to_date, group_id__isnull=False 42 ) 43 44 def _attach_related(_events): 45 project_ids = set() 46 group_ids = set() 47 for event in _events: 48 project_ids.add(event.project_id) 49 group_ids.add(event.group_id) 50 projects = {p.id: p for p in Project.objects.filter(id__in=project_ids)} 51 groups = {g.id: g for g in Group.objects.filter(id__in=group_ids)} 52 53 for event in _events: 54 event.project = projects.get(event.project_id) 55 event.group = groups.get(event.group_id) 56 eventstore.bind_nodes(_events, "data") 57 58 if skip_backfill: 59 print("Skipping backfill.\n") 60 return 61 62 events = get_events(retention_days) 63 count = events.count() 64 65 if count == 0: 66 print("Nothing to do, skipping migration.\n") 67 return 68 69 print("Events to process: {}\n".format(count)) 70 71 processed = 0 72 for e in RangeQuerySetWrapper(events, step=100, callbacks=(_attach_related,)): 73 event = NewEvent( 74 project_id=e.project_id, event_id=e.event_id, group_id=e.group_id, data=e.data.data 75 ) 76 primary_hash = event.get_primary_hash() 77 if event.project is None or event.group is None: 78 print("Skipped {} as group or project information is invalid.\n".format(event)) 79 continue 80 81 try: 82 eventstream.insert( 83 group=event.group, 84 event=event, 85 is_new=False, 86 is_regression=False, 87 is_new_group_environment=False, 88 primary_hash=primary_hash, 89 skip_consume=True, 90 ) 91 processed += 1 92 except Exception as error: 93 print( 94 "An error occured while trying to instert the following event: {}\n.----\n{}".format( 95 event, error 96 ) 97 ) 98 99 print("Event migration done. Migrated {} of {} events.\n".format(processed, count)) 100 101 102 class Migration(migrations.Migration): 103 # This flag is used to mark that a migration shouldn't be automatically run in 104 # production. We set this to True for operations that we think are risky and want 105 # someone from ops to run manually and monitor. 106 # General advice is that if in doubt, mark your migration as `is_dangerous`. 107 # Some things you should always mark as dangerous: 108 # - Adding indexes to large tables. These indexes should be created concurrently, 109 # unfortunately we can't run migrations outside of a transaction until Django 110 # 1.10. So until then these should be run manually. 111 # - Large data migrations. Typically we want these to be run manually by ops so that 112 # they can be monitored. Since data migrations will now hold a transaction open 113 # this is even more important. 114 # - Adding columns to highly active tables, even ones that are NULL. 115 is_dangerous = True 116 117 dependencies = [ 118 ("sentry", "0023_hide_environment_none_20191126"), 119 ] 120 121 operations = [ 122 migrations.RunPython(backfill_eventstream, reverse_code=migrations.RunPython.noop), 123 ] 124 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/migrations/0024_auto_20191230_2052.py b/src/sentry/migrations/0024_auto_20191230_2052.py --- a/src/sentry/migrations/0024_auto_20191230_2052.py +++ b/src/sentry/migrations/0024_auto_20191230_2052.py @@ -86,6 +86,8 @@ is_regression=False, is_new_group_environment=False, primary_hash=primary_hash, + received_timestamp=event.data.get("received") + or float(event.datetime.strftime("%s")), skip_consume=True, ) processed += 1
{"golden_diff": "diff --git a/src/sentry/migrations/0024_auto_20191230_2052.py b/src/sentry/migrations/0024_auto_20191230_2052.py\n--- a/src/sentry/migrations/0024_auto_20191230_2052.py\n+++ b/src/sentry/migrations/0024_auto_20191230_2052.py\n@@ -86,6 +86,8 @@\n is_regression=False,\n is_new_group_environment=False,\n primary_hash=primary_hash,\n+ received_timestamp=event.data.get(\"received\")\n+ or float(event.datetime.strftime(\"%s\")),\n skip_consume=True,\n )\n processed += 1\n", "issue": "Event migration 9.1.2 -> 10\n<!--\r\n\r\nDo you want to ask a question? Are you looking for support? The Sentry message\r\nboard is the best place for getting support: https://forum.sentry.io\r\n-->\r\n\r\n## Important Details\r\n\r\nHow are you running Sentry?\r\n\r\n* [X] On-Premise docker [Version 9.1.2]\r\n* [ ] Saas (sentry.io)\r\n* [ ] Other [briefly describe your environment]\r\n\r\n## Description\r\n\r\nI followed the migration guide, alongside all fixes and workaround and managed to get to the actual migration routine. Sentry tries to process all existing postgres events but fails to (for every event):\r\n\r\n```\r\nAn error occured while trying to instert the following event: <sentry.eventstore.models.Event object at 0x7f2f08e552d0>\r\n.----\r\ninsert() takes at least 8 arguments (8 given)\r\n[...]\r\nEvent migration done. Migrated 0 of 197988 events.\r\n```\r\n\r\n## Steps to Reproduce\r\n\r\n1. Have a 9.1.2 onpremise setup and have event data\r\n2. Upgrade to 10 (dev-master), run `install.sh` etc.\r\n\r\n### What you expected to happen\r\n\r\nMigration scripts succeeds and I have all event data in the new version.\r\n\r\n### Possible Solution\r\n\r\nError message suggests a syntax error?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.13 on 2019-12-30 20:52\nfrom __future__ import unicode_literals, print_function\n\nimport os\nimport types\nfrom datetime import timedelta, datetime\n\nfrom django.db import migrations\nfrom django.utils import timezone\n\nfrom sentry import options\nfrom sentry.eventstore.models import Event as NewEvent\n\n\ndef backfill_eventstream(apps, schema_editor):\n \"\"\"\n Inserts Postgres events into the eventstream if there are recent events in Postgres.\n\n This is for open source users migrating from 9.x who want to keep their events.\n If there are no recent events in Postgres, skip the backfill.\n \"\"\"\n from sentry import eventstore, eventstream\n from sentry.utils.query import RangeQuerySetWrapper\n\n Event = apps.get_model(\"sentry\", \"Event\")\n Group = apps.get_model(\"sentry\", \"Group\")\n Project = apps.get_model(\"sentry\", \"Project\")\n\n # Kill switch to skip this migration\n skip_backfill = os.environ.get(\"SENTRY_SKIP_EVENTS_BACKFILL_FOR_10\", False)\n\n # Use 90 day retention if the option has not been set or set to 0\n DEFAULT_RETENTION = 90\n retention_days = options.get(\"system.event-retention-days\") or DEFAULT_RETENTION\n\n def get_events(last_days):\n to_date = timezone.now()\n from_date = to_date - timedelta(days=last_days)\n return Event.objects.filter(\n datetime__gte=from_date, datetime__lte=to_date, group_id__isnull=False\n )\n\n def _attach_related(_events):\n project_ids = set()\n group_ids = set()\n for event in _events:\n project_ids.add(event.project_id)\n group_ids.add(event.group_id)\n projects = {p.id: p for p in Project.objects.filter(id__in=project_ids)}\n groups = {g.id: g for g in Group.objects.filter(id__in=group_ids)}\n\n for event in _events:\n event.project = projects.get(event.project_id)\n event.group = groups.get(event.group_id)\n eventstore.bind_nodes(_events, \"data\")\n\n if skip_backfill:\n print(\"Skipping backfill.\\n\")\n return\n\n events = get_events(retention_days)\n count = events.count()\n\n if count == 0:\n print(\"Nothing to do, skipping migration.\\n\")\n return\n\n print(\"Events to process: {}\\n\".format(count))\n\n processed = 0\n for e in RangeQuerySetWrapper(events, step=100, callbacks=(_attach_related,)):\n event = NewEvent(\n project_id=e.project_id, event_id=e.event_id, group_id=e.group_id, data=e.data.data\n )\n primary_hash = event.get_primary_hash()\n if event.project is None or event.group is None:\n print(\"Skipped {} as group or project information is invalid.\\n\".format(event))\n continue\n\n try:\n eventstream.insert(\n group=event.group,\n event=event,\n is_new=False,\n is_regression=False,\n is_new_group_environment=False,\n primary_hash=primary_hash,\n skip_consume=True,\n )\n processed += 1\n except Exception as error:\n print(\n \"An error occured while trying to instert the following event: {}\\n.----\\n{}\".format(\n event, error\n )\n )\n\n print(\"Event migration done. Migrated {} of {} events.\\n\".format(processed, count))\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Adding indexes to large tables. These indexes should be created concurrently,\n # unfortunately we can't run migrations outside of a transaction until Django\n # 1.10. So until then these should be run manually.\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n dependencies = [\n (\"sentry\", \"0023_hide_environment_none_20191126\"),\n ]\n\n operations = [\n migrations.RunPython(backfill_eventstream, reverse_code=migrations.RunPython.noop),\n ]\n", "path": "src/sentry/migrations/0024_auto_20191230_2052.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Generated by Django 1.9.13 on 2019-12-30 20:52\nfrom __future__ import unicode_literals, print_function\n\nimport os\nimport types\nfrom datetime import timedelta, datetime\n\nfrom django.db import migrations\nfrom django.utils import timezone\n\nfrom sentry import options\nfrom sentry.eventstore.models import Event as NewEvent\n\n\ndef backfill_eventstream(apps, schema_editor):\n \"\"\"\n Inserts Postgres events into the eventstream if there are recent events in Postgres.\n\n This is for open source users migrating from 9.x who want to keep their events.\n If there are no recent events in Postgres, skip the backfill.\n \"\"\"\n from sentry import eventstore, eventstream\n from sentry.utils.query import RangeQuerySetWrapper\n\n Event = apps.get_model(\"sentry\", \"Event\")\n Group = apps.get_model(\"sentry\", \"Group\")\n Project = apps.get_model(\"sentry\", \"Project\")\n\n # Kill switch to skip this migration\n skip_backfill = os.environ.get(\"SENTRY_SKIP_EVENTS_BACKFILL_FOR_10\", False)\n\n # Use 90 day retention if the option has not been set or set to 0\n DEFAULT_RETENTION = 90\n retention_days = options.get(\"system.event-retention-days\") or DEFAULT_RETENTION\n\n def get_events(last_days):\n to_date = timezone.now()\n from_date = to_date - timedelta(days=last_days)\n return Event.objects.filter(\n datetime__gte=from_date, datetime__lte=to_date, group_id__isnull=False\n )\n\n def _attach_related(_events):\n project_ids = set()\n group_ids = set()\n for event in _events:\n project_ids.add(event.project_id)\n group_ids.add(event.group_id)\n projects = {p.id: p for p in Project.objects.filter(id__in=project_ids)}\n groups = {g.id: g for g in Group.objects.filter(id__in=group_ids)}\n\n for event in _events:\n event.project = projects.get(event.project_id)\n event.group = groups.get(event.group_id)\n eventstore.bind_nodes(_events, \"data\")\n\n if skip_backfill:\n print(\"Skipping backfill.\\n\")\n return\n\n events = get_events(retention_days)\n count = events.count()\n\n if count == 0:\n print(\"Nothing to do, skipping migration.\\n\")\n return\n\n print(\"Events to process: {}\\n\".format(count))\n\n processed = 0\n for e in RangeQuerySetWrapper(events, step=100, callbacks=(_attach_related,)):\n event = NewEvent(\n project_id=e.project_id, event_id=e.event_id, group_id=e.group_id, data=e.data.data\n )\n primary_hash = event.get_primary_hash()\n if event.project is None or event.group is None:\n print(\"Skipped {} as group or project information is invalid.\\n\".format(event))\n continue\n\n try:\n eventstream.insert(\n group=event.group,\n event=event,\n is_new=False,\n is_regression=False,\n is_new_group_environment=False,\n primary_hash=primary_hash,\n received_timestamp=event.data.get(\"received\")\n or float(event.datetime.strftime(\"%s\")),\n skip_consume=True,\n )\n processed += 1\n except Exception as error:\n print(\n \"An error occured while trying to instert the following event: {}\\n.----\\n{}\".format(\n event, error\n )\n )\n\n print(\"Event migration done. Migrated {} of {} events.\\n\".format(processed, count))\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Adding indexes to large tables. These indexes should be created concurrently,\n # unfortunately we can't run migrations outside of a transaction until Django\n # 1.10. So until then these should be run manually.\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n dependencies = [\n (\"sentry\", \"0023_hide_environment_none_20191126\"),\n ]\n\n operations = [\n migrations.RunPython(backfill_eventstream, reverse_code=migrations.RunPython.noop),\n ]\n", "path": "src/sentry/migrations/0024_auto_20191230_2052.py"}]}
1,894
181
gh_patches_debug_25785
rasdani/github-patches
git_diff
canonical__snapcraft-4295
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- LXD errors from Craft Providers not properly presented ### Bug Description When there's a Craft Providers error related to running LXD, as a user we get a traceback ### To Reproduce On a fresh system: - install snapcraft - run a core22 snapcraft project You will be queried to install LXD, and that works well, but we are later presented with an error not being setup. ### Environment Fresh Ubuntu 22.04.2 LTS ### snapcraft.yaml ```shell name: foo version: irrelevant summary: reproducer description: will only trigger if LXD has not been configured to work correctly base: core22 parts: foo: plugin: nil ``` ### Relevant log output ```shell sergiusens@imladris:~/projects/snaps/darktable$ snapcraft LXD is required but not installed. Do you wish to install LXD and configure it with the defaults? [y/N]: y [sudo] password for sergiusens: lxd 5.15-002fa0f from Canonical✓ installed WARNING: There is 1 new warning. See 'snap warnings'. Traceback (most recent call last): File "/snap/snapcraft/9362/bin/snapcraft", line 8, in <module> sys.exit(run()) File "/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/cli.py", line 255, in run _run_dispatcher(dispatcher, global_args) File "/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/cli.py", line 228, in _run_dispatcher dispatcher.run() File "/snap/snapcraft/9362/lib/python3.8/site-packages/craft_cli/dispatcher.py", line 448, in run return self._loaded_command.run(self._parsed_command_args) File "/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/commands/lifecycle.py", line 265, in run super().run(parsed_args) File "/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/commands/lifecycle.py", line 138, in run parts_lifecycle.run(self.name, parsed_args) File "/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/parts/lifecycle.py", line 216, in run _run_command( File "/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/parts/lifecycle.py", line 262, in _run_command _run_in_provider(project, command_name, parsed_args) File "/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/parts/lifecycle.py", line 487, in _run_in_provider providers.ensure_provider_is_available(provider) File "/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/providers.py", line 148, in ensure_provider_is_available LXDProvider.ensure_provider_is_available() File "/snap/snapcraft/9362/lib/python3.8/site-packages/craft_providers/lxd/lxd_provider.py", line 69, in ensure_provider_is_available install() File "/snap/snapcraft/9362/lib/python3.8/site-packages/craft_providers/lxd/installer.py", line 71, in install raise errors.LXDInstallationError( craft_providers.lxd.errors.LXDInstallationError: Failed to install LXD: user must be manually added to 'lxd' group before using LXD. Visit https://linuxcontainers.org/lxd/getting-started-cli/ for instructions on installing and configuring LXD for your operating system. ``` ### Additional context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `snapcraft/cli.py` Content: ``` 1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- 2 # 3 # Copyright 2022 Canonical Ltd. 4 # 5 # This program is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License version 3 as 7 # published by the Free Software Foundation. 8 # 9 # This program is distributed in the hope that it will be useful, 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 # GNU General Public License for more details. 13 # 14 # You should have received a copy of the GNU General Public License 15 # along with this program. If not, see <http://www.gnu.org/licenses/>. 16 17 """Command-line application entry point.""" 18 19 import argparse 20 import contextlib 21 import logging 22 import os 23 import sys 24 from typing import Any, Dict 25 26 import craft_cli 27 import craft_store 28 from craft_cli import ArgumentParsingError, EmitterMode, ProvideHelpException, emit 29 30 import snapcraft 31 import snapcraft_legacy 32 from snapcraft import __version__, errors, store, utils 33 from snapcraft.parts import plugins 34 from snapcraft_legacy.cli import legacy 35 36 from . import commands 37 from .legacy_cli import _LIB_NAMES, _ORIGINAL_LIB_NAME_LOG_LEVEL, run_legacy 38 39 COMMAND_GROUPS = [ 40 craft_cli.CommandGroup( 41 "Lifecycle", 42 [ 43 commands.CleanCommand, 44 commands.PullCommand, 45 commands.BuildCommand, 46 commands.StageCommand, 47 commands.PrimeCommand, 48 commands.PackCommand, 49 commands.RemoteBuildCommand, 50 commands.SnapCommand, # hidden (legacy compatibility) 51 commands.PluginsCommand, 52 commands.ListPluginsCommand, 53 commands.TryCommand, 54 ], 55 ), 56 craft_cli.CommandGroup( 57 "Extensions", 58 [ 59 commands.ListExtensionsCommand, 60 commands.ExtensionsCommand, # hidden (alias to list-extensions) 61 commands.ExpandExtensionsCommand, 62 ], 63 ), 64 craft_cli.CommandGroup( 65 "Store Account", 66 [ 67 commands.StoreLoginCommand, 68 commands.StoreExportLoginCommand, 69 commands.StoreLogoutCommand, 70 commands.StoreWhoAmICommand, 71 ], 72 ), 73 craft_cli.CommandGroup( 74 "Store Snap Names", 75 [ 76 commands.StoreRegisterCommand, 77 commands.StoreNamesCommand, 78 commands.StoreLegacyListRegisteredCommand, 79 commands.StoreLegacyListCommand, 80 commands.StoreLegacyMetricsCommand, 81 commands.StoreLegacyUploadMetadataCommand, 82 ], 83 ), 84 craft_cli.CommandGroup( 85 "Store Snap Release Management", 86 [ 87 commands.StoreReleaseCommand, 88 commands.StoreCloseCommand, 89 commands.StoreStatusCommand, 90 commands.StoreUploadCommand, 91 commands.StoreLegacyPushCommand, # hidden (legacy for upload) 92 commands.StoreLegacyPromoteCommand, 93 commands.StoreListRevisionsCommand, 94 commands.StoreRevisionsCommand, # hidden (alias to list-revisions) 95 ], 96 ), 97 craft_cli.CommandGroup( 98 "Store Snap Tracks", 99 [ 100 commands.StoreListTracksCommand, 101 commands.StoreTracksCommand, # hidden (alias to list-tracks) 102 commands.StoreLegacySetDefaultTrackCommand, 103 ], 104 ), 105 craft_cli.CommandGroup( 106 "Store Key Management", 107 [ 108 commands.StoreLegacyCreateKeyCommand, 109 commands.StoreLegacyRegisterKeyCommand, 110 commands.StoreLegacySignBuildCommand, 111 commands.StoreLegacyListKeysCommand, 112 ], 113 ), 114 craft_cli.CommandGroup( 115 "Store Validation Sets", 116 [ 117 commands.StoreEditValidationSetsCommand, 118 commands.StoreLegacyListValidationSetsCommand, 119 commands.StoreLegacyValidateCommand, 120 commands.StoreLegacyGatedCommand, 121 ], 122 ), 123 craft_cli.CommandGroup( 124 "Other", 125 [ 126 commands.VersionCommand, 127 commands.LintCommand, 128 commands.InitCommand, 129 ], 130 ), 131 ] 132 133 GLOBAL_ARGS = [ 134 craft_cli.GlobalArgument( 135 "version", "flag", "-V", "--version", "Show the application version and exit" 136 ), 137 craft_cli.GlobalArgument("trace", "flag", "-t", "--trace", argparse.SUPPRESS), 138 ] 139 140 141 def get_verbosity() -> EmitterMode: 142 """Return the verbosity level to use. 143 144 if SNAPCRAFT_ENABLE_DEVELOPER_DEBUG is set, the 145 default verbosity will be set to EmitterMode.DEBUG. 146 147 If stdin is closed, the default verbosity will be 148 set to EmitterMode.VERBOSE. 149 """ 150 verbosity = EmitterMode.BRIEF 151 152 if not sys.stdin.isatty(): 153 verbosity = EmitterMode.VERBOSE 154 155 with contextlib.suppress(ValueError): 156 # Parse environment variable for backwards compatibility with launchpad 157 if utils.strtobool(os.getenv("SNAPCRAFT_ENABLE_DEVELOPER_DEBUG", "n").strip()): 158 verbosity = EmitterMode.DEBUG 159 160 # if defined, use environmental variable SNAPCRAFT_VERBOSITY_LEVEL 161 verbosity_env = os.getenv("SNAPCRAFT_VERBOSITY_LEVEL") 162 if verbosity_env: 163 try: 164 verbosity = EmitterMode[verbosity_env.strip().upper()] 165 except KeyError: 166 values = utils.humanize_list( 167 [e.name.lower() for e in EmitterMode], "and", sort=False 168 ) 169 raise ArgumentParsingError( 170 f"cannot parse verbosity level {verbosity_env!r} from environment " 171 f"variable SNAPCRAFT_VERBOSITY_LEVEL (valid values are {values})" 172 ) from KeyError 173 174 return verbosity 175 176 177 def get_dispatcher() -> craft_cli.Dispatcher: 178 """Return an instance of Dispatcher. 179 180 Run all the checks and setup required to ensure the Dispatcher can run. 181 """ 182 # Run the legacy implementation if inside a legacy managed environment. 183 if os.getenv("SNAPCRAFT_BUILD_ENVIRONMENT") == "managed-host": 184 snapcraft.ProjectOptions = snapcraft_legacy.ProjectOptions # type: ignore 185 legacy.legacy_run() 186 187 # set lib loggers to debug level so that all messages are sent to Emitter 188 for lib_name in _LIB_NAMES: 189 logger = logging.getLogger(lib_name) 190 _ORIGINAL_LIB_NAME_LOG_LEVEL[lib_name] = logger.level 191 logger.setLevel(logging.DEBUG) 192 193 if utils.is_managed_mode(): 194 log_filepath = utils.get_managed_environment_log_path() 195 else: 196 log_filepath = None 197 198 emit.init( 199 mode=get_verbosity(), 200 appname="snapcraft", 201 greeting=f"Starting Snapcraft {__version__}", 202 log_filepath=log_filepath, 203 ) 204 205 return craft_cli.Dispatcher( 206 "snapcraft", 207 COMMAND_GROUPS, 208 summary="Package, distribute, and update snaps for Linux and IoT", 209 extra_global_args=GLOBAL_ARGS, 210 default_command=commands.PackCommand, 211 ) 212 213 214 def _run_dispatcher( 215 dispatcher: craft_cli.Dispatcher, global_args: Dict[str, Any] 216 ) -> None: 217 if global_args.get("version"): 218 emit.message(f"snapcraft {__version__}") 219 else: 220 if global_args.get("trace"): 221 emit.message( 222 "Options -t and --trace are deprecated, use --verbosity=debug instead." 223 ) 224 emit.set_mode(EmitterMode.DEBUG) 225 226 dispatcher.load_command(None) 227 dispatcher.run() 228 emit.ended_ok() 229 230 231 def _emit_error(error, cause=None): 232 """Emit the error in a centralized way so we can alter it consistently.""" 233 # set the cause, if any 234 if cause is not None: 235 error.__cause__ = cause 236 237 # Do not report the internal logpath if running inside instance 238 if utils.is_managed_mode(): 239 error.logpath_report = False 240 241 emit.error(error) 242 243 244 def run(): # noqa: C901 245 """Run the CLI.""" 246 dispatcher = get_dispatcher() 247 retcode = 1 248 249 try: 250 # Register our own plugins 251 global_args = dispatcher.pre_parse_args(sys.argv[1:]) 252 plugins.register() 253 254 _run_dispatcher(dispatcher, global_args) 255 retcode = 0 256 except ArgumentParsingError as err: 257 # TODO https://github.com/canonical/craft-cli/issues/78 258 with contextlib.suppress(KeyError, IndexError): 259 if ( 260 err.__context__ is not None 261 and err.__context__.args[0] # pylint: disable=no-member 262 not in dispatcher.commands 263 ): 264 run_legacy(err) 265 print(err, file=sys.stderr) # to stderr, as argparse normally does 266 emit.ended_ok() 267 retcode = 1 268 except ProvideHelpException as err: 269 print(err, file=sys.stderr) # to stderr, as argparse normally does 270 emit.ended_ok() 271 retcode = 0 272 except errors.LegacyFallback as err: 273 run_legacy(err) 274 except KeyboardInterrupt as err: 275 _emit_error(craft_cli.errors.CraftError("Interrupted."), cause=err) 276 retcode = 1 277 except craft_store.errors.NoKeyringError as err: 278 _emit_error( 279 craft_cli.errors.CraftError( 280 f"craft-store error: {err}", 281 resolution=( 282 "Ensure the keyring is working or " 283 f"{store.constants.ENVIRONMENT_STORE_CREDENTIALS} " 284 "is correctly exported into the environment" 285 ), 286 docs_url="https://snapcraft.io/docs/snapcraft-authentication", 287 ) 288 ) 289 retcode = 1 290 except craft_store.errors.CraftStoreError as err: 291 _emit_error(craft_cli.errors.CraftError(f"craft-store error: {err}")) 292 retcode = 1 293 except errors.LinterError as err: 294 emit.error(craft_cli.errors.CraftError(f"linter error: {err}")) 295 retcode = err.exit_code 296 except errors.SnapcraftError as err: 297 _emit_error(err) 298 retcode = 1 299 300 return retcode 301 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/snapcraft/cli.py b/snapcraft/cli.py --- a/snapcraft/cli.py +++ b/snapcraft/cli.py @@ -1,6 +1,6 @@ # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- # -# Copyright 2022 Canonical Ltd. +# Copyright 2022-2023 Canonical Ltd. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as @@ -26,6 +26,7 @@ import craft_cli import craft_store from craft_cli import ArgumentParsingError, EmitterMode, ProvideHelpException, emit +from craft_providers import ProviderError import snapcraft import snapcraft_legacy @@ -290,6 +291,9 @@ except craft_store.errors.CraftStoreError as err: _emit_error(craft_cli.errors.CraftError(f"craft-store error: {err}")) retcode = 1 + except ProviderError as err: + _emit_error(craft_cli.errors.CraftError(f"craft-providers error: {err}")) + retcode = 1 except errors.LinterError as err: emit.error(craft_cli.errors.CraftError(f"linter error: {err}")) retcode = err.exit_code
{"golden_diff": "diff --git a/snapcraft/cli.py b/snapcraft/cli.py\n--- a/snapcraft/cli.py\n+++ b/snapcraft/cli.py\n@@ -1,6 +1,6 @@\n # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n #\n-# Copyright 2022 Canonical Ltd.\n+# Copyright 2022-2023 Canonical Ltd.\n #\n # This program is free software: you can redistribute it and/or modify\n # it under the terms of the GNU General Public License version 3 as\n@@ -26,6 +26,7 @@\n import craft_cli\n import craft_store\n from craft_cli import ArgumentParsingError, EmitterMode, ProvideHelpException, emit\n+from craft_providers import ProviderError\n \n import snapcraft\n import snapcraft_legacy\n@@ -290,6 +291,9 @@\n except craft_store.errors.CraftStoreError as err:\n _emit_error(craft_cli.errors.CraftError(f\"craft-store error: {err}\"))\n retcode = 1\n+ except ProviderError as err:\n+ _emit_error(craft_cli.errors.CraftError(f\"craft-providers error: {err}\"))\n+ retcode = 1\n except errors.LinterError as err:\n emit.error(craft_cli.errors.CraftError(f\"linter error: {err}\"))\n retcode = err.exit_code\n", "issue": "LXD errors from Craft Providers not properly presented\n### Bug Description\n\nWhen there's a Craft Providers error related to running LXD, as a user we get a traceback\n\n### To Reproduce\n\nOn a fresh system:\r\n\r\n- install snapcraft\r\n- run a core22 snapcraft project\r\n\r\nYou will be queried to install LXD, and that works well, but we are later presented with an error not being setup.\n\n### Environment\n\nFresh Ubuntu 22.04.2 LTS\n\n### snapcraft.yaml\n\n```shell\nname: foo\r\nversion: irrelevant\r\nsummary: reproducer\r\ndescription: will only trigger if LXD has not been configured to work correctly\r\nbase: core22\r\n\r\nparts:\r\n foo:\r\n plugin: nil\n```\n\n\n### Relevant log output\n\n```shell\nsergiusens@imladris:~/projects/snaps/darktable$ snapcraft \r\nLXD is required but not installed. Do you wish to install LXD and configure it with the defaults? [y/N]: y\r\n[sudo] password for sergiusens: \r\nlxd 5.15-002fa0f from Canonical\u2713 installed\r\nWARNING: There is 1 new warning. See 'snap warnings'.\r\nTraceback (most recent call last):\r\n File \"/snap/snapcraft/9362/bin/snapcraft\", line 8, in <module>\r\n sys.exit(run())\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/cli.py\", line 255, in run\r\n _run_dispatcher(dispatcher, global_args)\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/cli.py\", line 228, in _run_dispatcher\r\n dispatcher.run()\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/craft_cli/dispatcher.py\", line 448, in run\r\n return self._loaded_command.run(self._parsed_command_args)\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/commands/lifecycle.py\", line 265, in run\r\n super().run(parsed_args)\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/commands/lifecycle.py\", line 138, in run\r\n parts_lifecycle.run(self.name, parsed_args)\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/parts/lifecycle.py\", line 216, in run\r\n _run_command(\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/parts/lifecycle.py\", line 262, in _run_command\r\n _run_in_provider(project, command_name, parsed_args)\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/parts/lifecycle.py\", line 487, in _run_in_provider\r\n providers.ensure_provider_is_available(provider)\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/snapcraft/providers.py\", line 148, in ensure_provider_is_available\r\n LXDProvider.ensure_provider_is_available()\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/craft_providers/lxd/lxd_provider.py\", line 69, in ensure_provider_is_available\r\n install()\r\n File \"/snap/snapcraft/9362/lib/python3.8/site-packages/craft_providers/lxd/installer.py\", line 71, in install\r\n raise errors.LXDInstallationError(\r\ncraft_providers.lxd.errors.LXDInstallationError: Failed to install LXD: user must be manually added to 'lxd' group before using LXD.\r\nVisit https://linuxcontainers.org/lxd/getting-started-cli/ for instructions on installing and configuring LXD for your operating system.\n```\n\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2022 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Command-line application entry point.\"\"\"\n\nimport argparse\nimport contextlib\nimport logging\nimport os\nimport sys\nfrom typing import Any, Dict\n\nimport craft_cli\nimport craft_store\nfrom craft_cli import ArgumentParsingError, EmitterMode, ProvideHelpException, emit\n\nimport snapcraft\nimport snapcraft_legacy\nfrom snapcraft import __version__, errors, store, utils\nfrom snapcraft.parts import plugins\nfrom snapcraft_legacy.cli import legacy\n\nfrom . import commands\nfrom .legacy_cli import _LIB_NAMES, _ORIGINAL_LIB_NAME_LOG_LEVEL, run_legacy\n\nCOMMAND_GROUPS = [\n craft_cli.CommandGroup(\n \"Lifecycle\",\n [\n commands.CleanCommand,\n commands.PullCommand,\n commands.BuildCommand,\n commands.StageCommand,\n commands.PrimeCommand,\n commands.PackCommand,\n commands.RemoteBuildCommand,\n commands.SnapCommand, # hidden (legacy compatibility)\n commands.PluginsCommand,\n commands.ListPluginsCommand,\n commands.TryCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Extensions\",\n [\n commands.ListExtensionsCommand,\n commands.ExtensionsCommand, # hidden (alias to list-extensions)\n commands.ExpandExtensionsCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Account\",\n [\n commands.StoreLoginCommand,\n commands.StoreExportLoginCommand,\n commands.StoreLogoutCommand,\n commands.StoreWhoAmICommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Snap Names\",\n [\n commands.StoreRegisterCommand,\n commands.StoreNamesCommand,\n commands.StoreLegacyListRegisteredCommand,\n commands.StoreLegacyListCommand,\n commands.StoreLegacyMetricsCommand,\n commands.StoreLegacyUploadMetadataCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Snap Release Management\",\n [\n commands.StoreReleaseCommand,\n commands.StoreCloseCommand,\n commands.StoreStatusCommand,\n commands.StoreUploadCommand,\n commands.StoreLegacyPushCommand, # hidden (legacy for upload)\n commands.StoreLegacyPromoteCommand,\n commands.StoreListRevisionsCommand,\n commands.StoreRevisionsCommand, # hidden (alias to list-revisions)\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Snap Tracks\",\n [\n commands.StoreListTracksCommand,\n commands.StoreTracksCommand, # hidden (alias to list-tracks)\n commands.StoreLegacySetDefaultTrackCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Key Management\",\n [\n commands.StoreLegacyCreateKeyCommand,\n commands.StoreLegacyRegisterKeyCommand,\n commands.StoreLegacySignBuildCommand,\n commands.StoreLegacyListKeysCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Validation Sets\",\n [\n commands.StoreEditValidationSetsCommand,\n commands.StoreLegacyListValidationSetsCommand,\n commands.StoreLegacyValidateCommand,\n commands.StoreLegacyGatedCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Other\",\n [\n commands.VersionCommand,\n commands.LintCommand,\n commands.InitCommand,\n ],\n ),\n]\n\nGLOBAL_ARGS = [\n craft_cli.GlobalArgument(\n \"version\", \"flag\", \"-V\", \"--version\", \"Show the application version and exit\"\n ),\n craft_cli.GlobalArgument(\"trace\", \"flag\", \"-t\", \"--trace\", argparse.SUPPRESS),\n]\n\n\ndef get_verbosity() -> EmitterMode:\n \"\"\"Return the verbosity level to use.\n\n if SNAPCRAFT_ENABLE_DEVELOPER_DEBUG is set, the\n default verbosity will be set to EmitterMode.DEBUG.\n\n If stdin is closed, the default verbosity will be\n set to EmitterMode.VERBOSE.\n \"\"\"\n verbosity = EmitterMode.BRIEF\n\n if not sys.stdin.isatty():\n verbosity = EmitterMode.VERBOSE\n\n with contextlib.suppress(ValueError):\n # Parse environment variable for backwards compatibility with launchpad\n if utils.strtobool(os.getenv(\"SNAPCRAFT_ENABLE_DEVELOPER_DEBUG\", \"n\").strip()):\n verbosity = EmitterMode.DEBUG\n\n # if defined, use environmental variable SNAPCRAFT_VERBOSITY_LEVEL\n verbosity_env = os.getenv(\"SNAPCRAFT_VERBOSITY_LEVEL\")\n if verbosity_env:\n try:\n verbosity = EmitterMode[verbosity_env.strip().upper()]\n except KeyError:\n values = utils.humanize_list(\n [e.name.lower() for e in EmitterMode], \"and\", sort=False\n )\n raise ArgumentParsingError(\n f\"cannot parse verbosity level {verbosity_env!r} from environment \"\n f\"variable SNAPCRAFT_VERBOSITY_LEVEL (valid values are {values})\"\n ) from KeyError\n\n return verbosity\n\n\ndef get_dispatcher() -> craft_cli.Dispatcher:\n \"\"\"Return an instance of Dispatcher.\n\n Run all the checks and setup required to ensure the Dispatcher can run.\n \"\"\"\n # Run the legacy implementation if inside a legacy managed environment.\n if os.getenv(\"SNAPCRAFT_BUILD_ENVIRONMENT\") == \"managed-host\":\n snapcraft.ProjectOptions = snapcraft_legacy.ProjectOptions # type: ignore\n legacy.legacy_run()\n\n # set lib loggers to debug level so that all messages are sent to Emitter\n for lib_name in _LIB_NAMES:\n logger = logging.getLogger(lib_name)\n _ORIGINAL_LIB_NAME_LOG_LEVEL[lib_name] = logger.level\n logger.setLevel(logging.DEBUG)\n\n if utils.is_managed_mode():\n log_filepath = utils.get_managed_environment_log_path()\n else:\n log_filepath = None\n\n emit.init(\n mode=get_verbosity(),\n appname=\"snapcraft\",\n greeting=f\"Starting Snapcraft {__version__}\",\n log_filepath=log_filepath,\n )\n\n return craft_cli.Dispatcher(\n \"snapcraft\",\n COMMAND_GROUPS,\n summary=\"Package, distribute, and update snaps for Linux and IoT\",\n extra_global_args=GLOBAL_ARGS,\n default_command=commands.PackCommand,\n )\n\n\ndef _run_dispatcher(\n dispatcher: craft_cli.Dispatcher, global_args: Dict[str, Any]\n) -> None:\n if global_args.get(\"version\"):\n emit.message(f\"snapcraft {__version__}\")\n else:\n if global_args.get(\"trace\"):\n emit.message(\n \"Options -t and --trace are deprecated, use --verbosity=debug instead.\"\n )\n emit.set_mode(EmitterMode.DEBUG)\n\n dispatcher.load_command(None)\n dispatcher.run()\n emit.ended_ok()\n\n\ndef _emit_error(error, cause=None):\n \"\"\"Emit the error in a centralized way so we can alter it consistently.\"\"\"\n # set the cause, if any\n if cause is not None:\n error.__cause__ = cause\n\n # Do not report the internal logpath if running inside instance\n if utils.is_managed_mode():\n error.logpath_report = False\n\n emit.error(error)\n\n\ndef run(): # noqa: C901\n \"\"\"Run the CLI.\"\"\"\n dispatcher = get_dispatcher()\n retcode = 1\n\n try:\n # Register our own plugins\n global_args = dispatcher.pre_parse_args(sys.argv[1:])\n plugins.register()\n\n _run_dispatcher(dispatcher, global_args)\n retcode = 0\n except ArgumentParsingError as err:\n # TODO https://github.com/canonical/craft-cli/issues/78\n with contextlib.suppress(KeyError, IndexError):\n if (\n err.__context__ is not None\n and err.__context__.args[0] # pylint: disable=no-member\n not in dispatcher.commands\n ):\n run_legacy(err)\n print(err, file=sys.stderr) # to stderr, as argparse normally does\n emit.ended_ok()\n retcode = 1\n except ProvideHelpException as err:\n print(err, file=sys.stderr) # to stderr, as argparse normally does\n emit.ended_ok()\n retcode = 0\n except errors.LegacyFallback as err:\n run_legacy(err)\n except KeyboardInterrupt as err:\n _emit_error(craft_cli.errors.CraftError(\"Interrupted.\"), cause=err)\n retcode = 1\n except craft_store.errors.NoKeyringError as err:\n _emit_error(\n craft_cli.errors.CraftError(\n f\"craft-store error: {err}\",\n resolution=(\n \"Ensure the keyring is working or \"\n f\"{store.constants.ENVIRONMENT_STORE_CREDENTIALS} \"\n \"is correctly exported into the environment\"\n ),\n docs_url=\"https://snapcraft.io/docs/snapcraft-authentication\",\n )\n )\n retcode = 1\n except craft_store.errors.CraftStoreError as err:\n _emit_error(craft_cli.errors.CraftError(f\"craft-store error: {err}\"))\n retcode = 1\n except errors.LinterError as err:\n emit.error(craft_cli.errors.CraftError(f\"linter error: {err}\"))\n retcode = err.exit_code\n except errors.SnapcraftError as err:\n _emit_error(err)\n retcode = 1\n\n return retcode\n", "path": "snapcraft/cli.py"}], "after_files": [{"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright 2022-2023 Canonical Ltd.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\n\"\"\"Command-line application entry point.\"\"\"\n\nimport argparse\nimport contextlib\nimport logging\nimport os\nimport sys\nfrom typing import Any, Dict\n\nimport craft_cli\nimport craft_store\nfrom craft_cli import ArgumentParsingError, EmitterMode, ProvideHelpException, emit\nfrom craft_providers import ProviderError\n\nimport snapcraft\nimport snapcraft_legacy\nfrom snapcraft import __version__, errors, store, utils\nfrom snapcraft.parts import plugins\nfrom snapcraft_legacy.cli import legacy\n\nfrom . import commands\nfrom .legacy_cli import _LIB_NAMES, _ORIGINAL_LIB_NAME_LOG_LEVEL, run_legacy\n\nCOMMAND_GROUPS = [\n craft_cli.CommandGroup(\n \"Lifecycle\",\n [\n commands.CleanCommand,\n commands.PullCommand,\n commands.BuildCommand,\n commands.StageCommand,\n commands.PrimeCommand,\n commands.PackCommand,\n commands.RemoteBuildCommand,\n commands.SnapCommand, # hidden (legacy compatibility)\n commands.PluginsCommand,\n commands.ListPluginsCommand,\n commands.TryCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Extensions\",\n [\n commands.ListExtensionsCommand,\n commands.ExtensionsCommand, # hidden (alias to list-extensions)\n commands.ExpandExtensionsCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Account\",\n [\n commands.StoreLoginCommand,\n commands.StoreExportLoginCommand,\n commands.StoreLogoutCommand,\n commands.StoreWhoAmICommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Snap Names\",\n [\n commands.StoreRegisterCommand,\n commands.StoreNamesCommand,\n commands.StoreLegacyListRegisteredCommand,\n commands.StoreLegacyListCommand,\n commands.StoreLegacyMetricsCommand,\n commands.StoreLegacyUploadMetadataCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Snap Release Management\",\n [\n commands.StoreReleaseCommand,\n commands.StoreCloseCommand,\n commands.StoreStatusCommand,\n commands.StoreUploadCommand,\n commands.StoreLegacyPushCommand, # hidden (legacy for upload)\n commands.StoreLegacyPromoteCommand,\n commands.StoreListRevisionsCommand,\n commands.StoreRevisionsCommand, # hidden (alias to list-revisions)\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Snap Tracks\",\n [\n commands.StoreListTracksCommand,\n commands.StoreTracksCommand, # hidden (alias to list-tracks)\n commands.StoreLegacySetDefaultTrackCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Key Management\",\n [\n commands.StoreLegacyCreateKeyCommand,\n commands.StoreLegacyRegisterKeyCommand,\n commands.StoreLegacySignBuildCommand,\n commands.StoreLegacyListKeysCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Store Validation Sets\",\n [\n commands.StoreEditValidationSetsCommand,\n commands.StoreLegacyListValidationSetsCommand,\n commands.StoreLegacyValidateCommand,\n commands.StoreLegacyGatedCommand,\n ],\n ),\n craft_cli.CommandGroup(\n \"Other\",\n [\n commands.VersionCommand,\n commands.LintCommand,\n commands.InitCommand,\n ],\n ),\n]\n\nGLOBAL_ARGS = [\n craft_cli.GlobalArgument(\n \"version\", \"flag\", \"-V\", \"--version\", \"Show the application version and exit\"\n ),\n craft_cli.GlobalArgument(\"trace\", \"flag\", \"-t\", \"--trace\", argparse.SUPPRESS),\n]\n\n\ndef get_verbosity() -> EmitterMode:\n \"\"\"Return the verbosity level to use.\n\n if SNAPCRAFT_ENABLE_DEVELOPER_DEBUG is set, the\n default verbosity will be set to EmitterMode.DEBUG.\n\n If stdin is closed, the default verbosity will be\n set to EmitterMode.VERBOSE.\n \"\"\"\n verbosity = EmitterMode.BRIEF\n\n if not sys.stdin.isatty():\n verbosity = EmitterMode.VERBOSE\n\n with contextlib.suppress(ValueError):\n # Parse environment variable for backwards compatibility with launchpad\n if utils.strtobool(os.getenv(\"SNAPCRAFT_ENABLE_DEVELOPER_DEBUG\", \"n\").strip()):\n verbosity = EmitterMode.DEBUG\n\n # if defined, use environmental variable SNAPCRAFT_VERBOSITY_LEVEL\n verbosity_env = os.getenv(\"SNAPCRAFT_VERBOSITY_LEVEL\")\n if verbosity_env:\n try:\n verbosity = EmitterMode[verbosity_env.strip().upper()]\n except KeyError:\n values = utils.humanize_list(\n [e.name.lower() for e in EmitterMode], \"and\", sort=False\n )\n raise ArgumentParsingError(\n f\"cannot parse verbosity level {verbosity_env!r} from environment \"\n f\"variable SNAPCRAFT_VERBOSITY_LEVEL (valid values are {values})\"\n ) from KeyError\n\n return verbosity\n\n\ndef get_dispatcher() -> craft_cli.Dispatcher:\n \"\"\"Return an instance of Dispatcher.\n\n Run all the checks and setup required to ensure the Dispatcher can run.\n \"\"\"\n # Run the legacy implementation if inside a legacy managed environment.\n if os.getenv(\"SNAPCRAFT_BUILD_ENVIRONMENT\") == \"managed-host\":\n snapcraft.ProjectOptions = snapcraft_legacy.ProjectOptions # type: ignore\n legacy.legacy_run()\n\n # set lib loggers to debug level so that all messages are sent to Emitter\n for lib_name in _LIB_NAMES:\n logger = logging.getLogger(lib_name)\n _ORIGINAL_LIB_NAME_LOG_LEVEL[lib_name] = logger.level\n logger.setLevel(logging.DEBUG)\n\n if utils.is_managed_mode():\n log_filepath = utils.get_managed_environment_log_path()\n else:\n log_filepath = None\n\n emit.init(\n mode=get_verbosity(),\n appname=\"snapcraft\",\n greeting=f\"Starting Snapcraft {__version__}\",\n log_filepath=log_filepath,\n )\n\n return craft_cli.Dispatcher(\n \"snapcraft\",\n COMMAND_GROUPS,\n summary=\"Package, distribute, and update snaps for Linux and IoT\",\n extra_global_args=GLOBAL_ARGS,\n default_command=commands.PackCommand,\n )\n\n\ndef _run_dispatcher(\n dispatcher: craft_cli.Dispatcher, global_args: Dict[str, Any]\n) -> None:\n if global_args.get(\"version\"):\n emit.message(f\"snapcraft {__version__}\")\n else:\n if global_args.get(\"trace\"):\n emit.message(\n \"Options -t and --trace are deprecated, use --verbosity=debug instead.\"\n )\n emit.set_mode(EmitterMode.DEBUG)\n\n dispatcher.load_command(None)\n dispatcher.run()\n emit.ended_ok()\n\n\ndef _emit_error(error, cause=None):\n \"\"\"Emit the error in a centralized way so we can alter it consistently.\"\"\"\n # set the cause, if any\n if cause is not None:\n error.__cause__ = cause\n\n # Do not report the internal logpath if running inside instance\n if utils.is_managed_mode():\n error.logpath_report = False\n\n emit.error(error)\n\n\ndef run(): # noqa: C901\n \"\"\"Run the CLI.\"\"\"\n dispatcher = get_dispatcher()\n retcode = 1\n\n try:\n # Register our own plugins\n global_args = dispatcher.pre_parse_args(sys.argv[1:])\n plugins.register()\n\n _run_dispatcher(dispatcher, global_args)\n retcode = 0\n except ArgumentParsingError as err:\n # TODO https://github.com/canonical/craft-cli/issues/78\n with contextlib.suppress(KeyError, IndexError):\n if (\n err.__context__ is not None\n and err.__context__.args[0] # pylint: disable=no-member\n not in dispatcher.commands\n ):\n run_legacy(err)\n print(err, file=sys.stderr) # to stderr, as argparse normally does\n emit.ended_ok()\n retcode = 1\n except ProvideHelpException as err:\n print(err, file=sys.stderr) # to stderr, as argparse normally does\n emit.ended_ok()\n retcode = 0\n except errors.LegacyFallback as err:\n run_legacy(err)\n except KeyboardInterrupt as err:\n _emit_error(craft_cli.errors.CraftError(\"Interrupted.\"), cause=err)\n retcode = 1\n except craft_store.errors.NoKeyringError as err:\n _emit_error(\n craft_cli.errors.CraftError(\n f\"craft-store error: {err}\",\n resolution=(\n \"Ensure the keyring is working or \"\n f\"{store.constants.ENVIRONMENT_STORE_CREDENTIALS} \"\n \"is correctly exported into the environment\"\n ),\n docs_url=\"https://snapcraft.io/docs/snapcraft-authentication\",\n )\n )\n retcode = 1\n except craft_store.errors.CraftStoreError as err:\n _emit_error(craft_cli.errors.CraftError(f\"craft-store error: {err}\"))\n retcode = 1\n except ProviderError as err:\n _emit_error(craft_cli.errors.CraftError(f\"craft-providers error: {err}\"))\n retcode = 1\n except errors.LinterError as err:\n emit.error(craft_cli.errors.CraftError(f\"linter error: {err}\"))\n retcode = err.exit_code\n except errors.SnapcraftError as err:\n _emit_error(err)\n retcode = 1\n\n return retcode\n", "path": "snapcraft/cli.py"}]}
3,999
303
gh_patches_debug_38940
rasdani/github-patches
git_diff
streamlink__streamlink-205
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- picarto updated streamlink no longer works Hey guys picarto no longer works because they said they updated the player so html5 can be default soon. when you run the program it says found matching plugin picarto for url https:// https://picarto.tv/picknamehere then the it says error: no stream on this URL: https://picarto.tv/picknamehere. thanks guys for the awesome program hopefully it gets solved soon! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/streamlink/plugins/picarto.py` Content: ``` 1 import re 2 3 from streamlink.plugin import Plugin 4 from streamlink.plugin.api import http 5 from streamlink.stream import RTMPStream 6 7 API_CHANNEL_INFO = "https://picarto.tv/process/channel" 8 RTMP_URL = "rtmp://{}:1935/play/" 9 RTMP_PLAYPATH = "golive+{}?token={}" 10 11 _url_re = re.compile(r""" 12 https?://(\w+\.)?picarto\.tv/[^&?/] 13 """, re.VERBOSE) 14 15 _channel_casing_re = re.compile(r""" 16 <script>placeStreamChannel(Flash)?\('(?P<channel>[^']+)',[^,]+,[^,]+,'(?P<visibility>[^']+)'(,[^,]+)?\);</script> 17 """, re.VERBOSE) 18 19 20 class Picarto(Plugin): 21 @classmethod 22 def can_handle_url(self, url): 23 return _url_re.match(url) 24 25 def _get_streams(self): 26 page_res = http.get(self.url) 27 match = _channel_casing_re.search(page_res.text) 28 29 if not match: 30 return {} 31 32 channel = match.group("channel") 33 visibility = match.group("visibility") 34 35 channel_server_res = http.post(API_CHANNEL_INFO, data={ 36 "loadbalancinginfo": channel 37 }) 38 39 streams = {} 40 streams["live"] = RTMPStream(self.session, { 41 "rtmp": RTMP_URL.format(channel_server_res.text), 42 "playpath": RTMP_PLAYPATH.format(channel, visibility), 43 "pageUrl": self.url, 44 "live": True 45 }) 46 return streams 47 48 __plugin__ = Picarto 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/streamlink/plugins/picarto.py b/src/streamlink/plugins/picarto.py --- a/src/streamlink/plugins/picarto.py +++ b/src/streamlink/plugins/picarto.py @@ -2,47 +2,69 @@ from streamlink.plugin import Plugin from streamlink.plugin.api import http +from streamlink.stream import HLSStream from streamlink.stream import RTMPStream API_CHANNEL_INFO = "https://picarto.tv/process/channel" RTMP_URL = "rtmp://{}:1935/play/" RTMP_PLAYPATH = "golive+{}?token={}" +HLS_URL = "https://{}/hls/{}/index.m3u8?token={}" _url_re = re.compile(r""" https?://(\w+\.)?picarto\.tv/[^&?/] """, re.VERBOSE) +# placeStream(channel, playerID, product, offlineImage, online, token, tech) _channel_casing_re = re.compile(r""" - <script>placeStreamChannel(Flash)?\('(?P<channel>[^']+)',[^,]+,[^,]+,'(?P<visibility>[^']+)'(,[^,]+)?\);</script> + <script>\s*placeStream\s*\((.*?)\);?\s*</script> """, re.VERBOSE) class Picarto(Plugin): @classmethod - def can_handle_url(self, url): - return _url_re.match(url) + def can_handle_url(cls, url): + return _url_re.match(url) is not None + + @staticmethod + def _get_stream_arguments(page): + match = _channel_casing_re.search(page.text) + if not match: + raise ValueError + + # transform the arguments + channel, player_id, product, offline_image, online, visibility, is_flash = \ + map(lambda a: a.strip("' \""), match.group(1).split(",")) + player_id, product, offline_image, online, is_flash = \ + map(lambda a: bool(int(a)), [player_id, product, offline_image, online, is_flash]) + + return channel, player_id, product, offline_image, online, visibility, is_flash def _get_streams(self): - page_res = http.get(self.url) - match = _channel_casing_re.search(page_res.text) + page = http.get(self.url) - if not match: - return {} + try: + channel, _, _, _, online, visibility, is_flash = self._get_stream_arguments(page) + except ValueError: + return - channel = match.group("channel") - visibility = match.group("visibility") + if not online: + self.logger.error("This stream is currently offline") + return channel_server_res = http.post(API_CHANNEL_INFO, data={ "loadbalancinginfo": channel }) - streams = {} - streams["live"] = RTMPStream(self.session, { - "rtmp": RTMP_URL.format(channel_server_res.text), - "playpath": RTMP_PLAYPATH.format(channel, visibility), - "pageUrl": self.url, - "live": True - }) - return streams + if is_flash: + return {"live": RTMPStream(self.session, { + "rtmp": RTMP_URL.format(channel_server_res.text), + "playpath": RTMP_PLAYPATH.format(channel, visibility), + "pageUrl": self.url, + "live": True + })} + else: + return HLSStream.parse_variant_playlist(self.session, + HLS_URL.format(channel_server_res.text, channel, visibility), + verify=False) __plugin__ = Picarto
{"golden_diff": "diff --git a/src/streamlink/plugins/picarto.py b/src/streamlink/plugins/picarto.py\n--- a/src/streamlink/plugins/picarto.py\n+++ b/src/streamlink/plugins/picarto.py\n@@ -2,47 +2,69 @@\n \n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http\n+from streamlink.stream import HLSStream\n from streamlink.stream import RTMPStream\n \n API_CHANNEL_INFO = \"https://picarto.tv/process/channel\"\n RTMP_URL = \"rtmp://{}:1935/play/\"\n RTMP_PLAYPATH = \"golive+{}?token={}\"\n+HLS_URL = \"https://{}/hls/{}/index.m3u8?token={}\"\n \n _url_re = re.compile(r\"\"\"\n https?://(\\w+\\.)?picarto\\.tv/[^&?/]\n \"\"\", re.VERBOSE)\n \n+# placeStream(channel, playerID, product, offlineImage, online, token, tech)\n _channel_casing_re = re.compile(r\"\"\"\n- <script>placeStreamChannel(Flash)?\\('(?P<channel>[^']+)',[^,]+,[^,]+,'(?P<visibility>[^']+)'(,[^,]+)?\\);</script>\n+ <script>\\s*placeStream\\s*\\((.*?)\\);?\\s*</script>\n \"\"\", re.VERBOSE)\n \n \n class Picarto(Plugin):\n @classmethod\n- def can_handle_url(self, url):\n- return _url_re.match(url)\n+ def can_handle_url(cls, url):\n+ return _url_re.match(url) is not None\n+\n+ @staticmethod\n+ def _get_stream_arguments(page):\n+ match = _channel_casing_re.search(page.text)\n+ if not match:\n+ raise ValueError\n+\n+ # transform the arguments\n+ channel, player_id, product, offline_image, online, visibility, is_flash = \\\n+ map(lambda a: a.strip(\"' \\\"\"), match.group(1).split(\",\"))\n+ player_id, product, offline_image, online, is_flash = \\\n+ map(lambda a: bool(int(a)), [player_id, product, offline_image, online, is_flash])\n+\n+ return channel, player_id, product, offline_image, online, visibility, is_flash\n \n def _get_streams(self):\n- page_res = http.get(self.url)\n- match = _channel_casing_re.search(page_res.text)\n+ page = http.get(self.url)\n \n- if not match:\n- return {}\n+ try:\n+ channel, _, _, _, online, visibility, is_flash = self._get_stream_arguments(page)\n+ except ValueError:\n+ return\n \n- channel = match.group(\"channel\")\n- visibility = match.group(\"visibility\")\n+ if not online:\n+ self.logger.error(\"This stream is currently offline\")\n+ return\n \n channel_server_res = http.post(API_CHANNEL_INFO, data={\n \"loadbalancinginfo\": channel\n })\n \n- streams = {}\n- streams[\"live\"] = RTMPStream(self.session, {\n- \"rtmp\": RTMP_URL.format(channel_server_res.text),\n- \"playpath\": RTMP_PLAYPATH.format(channel, visibility),\n- \"pageUrl\": self.url,\n- \"live\": True\n- })\n- return streams\n+ if is_flash:\n+ return {\"live\": RTMPStream(self.session, {\n+ \"rtmp\": RTMP_URL.format(channel_server_res.text),\n+ \"playpath\": RTMP_PLAYPATH.format(channel, visibility),\n+ \"pageUrl\": self.url,\n+ \"live\": True\n+ })}\n+ else:\n+ return HLSStream.parse_variant_playlist(self.session,\n+ HLS_URL.format(channel_server_res.text, channel, visibility),\n+ verify=False)\n \n __plugin__ = Picarto\n", "issue": "picarto updated streamlink no longer works\nHey guys picarto no longer works because they said they updated the player so html5 can be default soon.\r\nwhen you run the program it says found matching plugin picarto for url https:// https://picarto.tv/picknamehere\r\nthen the it says error: no stream on this URL: https://picarto.tv/picknamehere.\r\nthanks guys for the awesome program hopefully it gets solved soon!\n", "before_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import RTMPStream\n\nAPI_CHANNEL_INFO = \"https://picarto.tv/process/channel\"\nRTMP_URL = \"rtmp://{}:1935/play/\"\nRTMP_PLAYPATH = \"golive+{}?token={}\"\n\n_url_re = re.compile(r\"\"\"\n https?://(\\w+\\.)?picarto\\.tv/[^&?/]\n\"\"\", re.VERBOSE)\n\n_channel_casing_re = re.compile(r\"\"\"\n <script>placeStreamChannel(Flash)?\\('(?P<channel>[^']+)',[^,]+,[^,]+,'(?P<visibility>[^']+)'(,[^,]+)?\\);</script>\n\"\"\", re.VERBOSE)\n\n\nclass Picarto(Plugin):\n @classmethod\n def can_handle_url(self, url):\n return _url_re.match(url)\n\n def _get_streams(self):\n page_res = http.get(self.url)\n match = _channel_casing_re.search(page_res.text)\n\n if not match:\n return {}\n\n channel = match.group(\"channel\")\n visibility = match.group(\"visibility\")\n\n channel_server_res = http.post(API_CHANNEL_INFO, data={\n \"loadbalancinginfo\": channel\n })\n\n streams = {}\n streams[\"live\"] = RTMPStream(self.session, {\n \"rtmp\": RTMP_URL.format(channel_server_res.text),\n \"playpath\": RTMP_PLAYPATH.format(channel, visibility),\n \"pageUrl\": self.url,\n \"live\": True\n })\n return streams\n\n__plugin__ = Picarto\n", "path": "src/streamlink/plugins/picarto.py"}], "after_files": [{"content": "import re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.stream import HLSStream\nfrom streamlink.stream import RTMPStream\n\nAPI_CHANNEL_INFO = \"https://picarto.tv/process/channel\"\nRTMP_URL = \"rtmp://{}:1935/play/\"\nRTMP_PLAYPATH = \"golive+{}?token={}\"\nHLS_URL = \"https://{}/hls/{}/index.m3u8?token={}\"\n\n_url_re = re.compile(r\"\"\"\n https?://(\\w+\\.)?picarto\\.tv/[^&?/]\n\"\"\", re.VERBOSE)\n\n# placeStream(channel, playerID, product, offlineImage, online, token, tech)\n_channel_casing_re = re.compile(r\"\"\"\n <script>\\s*placeStream\\s*\\((.*?)\\);?\\s*</script>\n\"\"\", re.VERBOSE)\n\n\nclass Picarto(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url) is not None\n\n @staticmethod\n def _get_stream_arguments(page):\n match = _channel_casing_re.search(page.text)\n if not match:\n raise ValueError\n\n # transform the arguments\n channel, player_id, product, offline_image, online, visibility, is_flash = \\\n map(lambda a: a.strip(\"' \\\"\"), match.group(1).split(\",\"))\n player_id, product, offline_image, online, is_flash = \\\n map(lambda a: bool(int(a)), [player_id, product, offline_image, online, is_flash])\n\n return channel, player_id, product, offline_image, online, visibility, is_flash\n\n def _get_streams(self):\n page = http.get(self.url)\n\n try:\n channel, _, _, _, online, visibility, is_flash = self._get_stream_arguments(page)\n except ValueError:\n return\n\n if not online:\n self.logger.error(\"This stream is currently offline\")\n return\n\n channel_server_res = http.post(API_CHANNEL_INFO, data={\n \"loadbalancinginfo\": channel\n })\n\n if is_flash:\n return {\"live\": RTMPStream(self.session, {\n \"rtmp\": RTMP_URL.format(channel_server_res.text),\n \"playpath\": RTMP_PLAYPATH.format(channel, visibility),\n \"pageUrl\": self.url,\n \"live\": True\n })}\n else:\n return HLSStream.parse_variant_playlist(self.session,\n HLS_URL.format(channel_server_res.text, channel, visibility),\n verify=False)\n\n__plugin__ = Picarto\n", "path": "src/streamlink/plugins/picarto.py"}]}
796
826
gh_patches_debug_5938
rasdani/github-patches
git_diff
cupy__cupy-4289
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enhancement: enable CUB backend for ReductionKernel Currently the CUB reduction is disabled for `ReductionKernel` (by the last `False` argument): https://github.com/cupy/cupy/blob/c1bf1aab6eb79a1cfcd5bb374460082d8eb2f3ca/cupy/core/_reduction.pyx#L724-L727 But we should revisit this at some point. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/core/_fusion_op.py` Content: ``` 1 import string 2 3 import numpy 4 5 from cupy.core._fusion_variable import _TraceVariable 6 from cupy.core._fusion_variable import _TraceArray 7 from cupy.core._fusion_variable import _VariableSet 8 from cupy.core import _fusion_thread_local 9 from cupy.core import _fusion_emit_code 10 from cupy.core import _kernel 11 from cupy.core import _reduction 12 13 14 _dtype_to_ctype = _fusion_emit_code._dtype_to_ctype 15 16 17 class _UfuncRoutine: 18 """A device function for single elementwise operations. 19 """ 20 21 def __init__( 22 self, name, ufunc, routine_code, in_params, out_params, 23 compute_dtypes): 24 assert isinstance(name, str) 25 assert isinstance(ufunc, _kernel.ufunc) 26 assert isinstance(routine_code, str) 27 assert isinstance(compute_dtypes, tuple) 28 assert all(isinstance(t, numpy.dtype) for t in compute_dtypes) 29 assert isinstance(in_params, list) 30 assert all(isinstance(p, _TraceVariable) for p in in_params) 31 assert isinstance(out_params, list) 32 assert all(isinstance(p, _TraceArray) for p in out_params) 33 34 self.name = name 35 self.in_params = in_params 36 self.out_params = out_params 37 self.preamble = ufunc._preamble 38 self.routine_code = routine_code 39 self.compute_dtypes = compute_dtypes 40 41 def emit_code(self): 42 """Returns a CUDA device function code. 43 44 Returns a string like: 45 ``` 46 __device__ void cupy_add_0(int &in0_, float &in1_, double &out0_) { 47 typedef double in0_type; 48 typedef double in1_type; 49 typedef double out0_type; 50 double in0 = (double) in0_; 51 double in1 = (double) in1_; 52 double out0 = (double) out0_; 53 out0 = in0 + in1; 54 out0_ = out0; 55 } 56 ``` 57 """ 58 nin = len(self.in_params) 59 dtypes = self.compute_dtypes 60 assert len(self.in_params) == len(self.compute_dtypes[:nin]) 61 in_params = [ 62 (_dtype_to_ctype[p.dtype], _dtype_to_ctype[t], 'in{}'.format(i)) 63 for i, (p, t) in enumerate(zip(self.in_params, dtypes[:nin])) 64 ] 65 out_params = [ 66 (_dtype_to_ctype[p.dtype], _dtype_to_ctype[t], 'out{}'.format(i)) 67 for i, (p, t) in enumerate(zip(self.out_params, dtypes[nin:])) 68 ] 69 params = in_params + out_params 70 71 params_code = ', '.join(['{} &{}_'.format(t, s) for t, _, s in params]) 72 typedef = ['typedef {} {}_type;'.format(t, s) for _, t, s in params] 73 read = ['{} {} = ({}) {}_;'.format(t, s, t, s) for _, t, s in params] 74 write = ['{}_ = {};'.format(s, s, s) for _, _, s in out_params] 75 76 return _fusion_emit_code._CodeBlock( 77 '__device__ void {}({})'.format(self.name, params_code), 78 typedef + read + [self.routine_code + ';'] + write) 79 80 def emit_call_code(self): 81 params = self.in_params + self.out_params 82 return '{op_name}({params});'.format( 83 op_name=self.name, 84 params=', '.join([var.lvar_name for var in params])) 85 86 87 class _ElementwiseTraceOp: 88 """Ufunc or elementwise kernel with types. 89 """ 90 91 def __init__(self, ufunc_routines, in_params, out_params, ashape): 92 # The `in_params` and `out_params` should be already broadcasted to 93 # `ashape`, but they don't guarantee to be exactly same as 94 # `param.ashape`. 95 96 _fusion_thread_local.check_not_runtime() 97 assert isinstance(ufunc_routines, list) 98 assert all(isinstance(r, _UfuncRoutine) for r in ufunc_routines) 99 assert isinstance(ashape, tuple) 100 101 self.ops = ufunc_routines 102 self.in_params = _VariableSet(*in_params) 103 self.out_params = _VariableSet(*out_params) 104 self.ashape = ashape 105 106 @property 107 def params(self): 108 """Returns the set of all variable the loop uses. 109 """ 110 res = _VariableSet() 111 for op in self.ops: 112 res += _VariableSet(*op.in_params) 113 res += _VariableSet(*op.out_params) 114 return res 115 116 @staticmethod 117 def _emit_declaration(params, in_params): 118 """Returns a tuple of size 2. 119 120 1. CUDA code: declaring local variables. 121 2. The set of arrays which require indexer. 122 """ 123 _fusion_thread_local.check_not_runtime() 124 125 indexed_arrays = _VariableSet() 126 code = [] 127 for var in params: 128 if var in in_params: 129 if isinstance(var, _TraceArray): 130 indexed_arrays.add(var) 131 f = '${type} ${lvar} = ${var}[${indexer}.get()];' 132 else: 133 f = '${type} ${lvar} = ${var};' 134 else: 135 f = '${type} ${lvar};' 136 code.append(var.format(f)) 137 138 return code, indexed_arrays 139 140 @staticmethod 141 def _emit_after_operation(out_params): 142 """Returns a tuple of size 2. 143 1. CUDA code: writing the results of operations back to global memory. 144 2. The set of arrays which require indexer. 145 """ 146 147 _fusion_thread_local.check_not_runtime() 148 149 indexed_arrays = _VariableSet() 150 codes = [] 151 for var in out_params: 152 if isinstance(var, _TraceArray): 153 indexed_arrays.add(var) 154 f = '${var}[${indexer}.get()] = ${lvar};' 155 else: 156 f = '${var} = ${lvar};' 157 codes.append(var.format(f)) 158 159 return codes, indexed_arrays 160 161 @staticmethod 162 def _emit_set_index(indexed_params, tid): 163 """Returns a CUDA code: setting a raw index to indexers. 164 """ 165 _fusion_thread_local.check_not_runtime() 166 assert isinstance(indexed_params, _VariableSet) 167 168 return [ 169 p.format('${indexer}.set(${tid});', tid=tid) 170 for p in indexed_params 171 ] 172 173 def emit_code(self): 174 _fusion_thread_local.check_not_runtime() 175 176 declaration, s1 = self._emit_declaration(self.params, self.in_params) 177 operation = [op.emit_call_code() for op in self.ops] 178 after_operation, s2 = self._emit_after_operation(self.out_params) 179 index_name = 'i' 180 indexed_array = s1 + s2 181 indexer_name = next(iter(indexed_array)).indexer_name 182 indexer_setup = self._emit_set_index(indexed_array, index_name) 183 184 return _fusion_emit_code._CodeBlock( 185 'CUPY_FOR({}, {}.size())'.format(index_name, indexer_name), 186 indexer_setup + declaration + operation + after_operation) 187 188 def emit_preamble_codes(self): 189 return [subm.preamble for subm in self.ops if subm.preamble != ''] 190 191 def emit_submodule_codes(self): 192 return [str(subm.emit_code()) for subm in self.ops] 193 194 195 class _ReductionTraceOp: 196 def __init__(self, name, reduce_func, expr, in_param, out_param, axis): 197 """Reduction operation. 198 """ 199 _fusion_thread_local.check_not_runtime() 200 assert isinstance(name, str) 201 assert isinstance(reduce_func, _reduction._SimpleReductionKernel) 202 assert isinstance(in_param, _TraceArray) 203 assert isinstance(out_param, _TraceArray) 204 assert isinstance(axis, tuple) 205 assert all([0 <= x < in_param.ndim for x in axis]) 206 207 self.name = name 208 self.preamble = reduce_func._preamble 209 self.in_params = _VariableSet(in_param) 210 self.out_params = _VariableSet(out_param) 211 self.block_stride_name = 'block_stride_' + name 212 self.axis = axis 213 214 if reduce_func.identity is None: 215 self.identity = '' 216 else: 217 self.identity = str(reduce_func.identity) 218 219 _, self.expr, self.postmap_cast_code, self.reduce_ctype = expr 220 if self.reduce_ctype is None: 221 out_param, = self.out_params 222 self.reduce_ctype = _dtype_to_ctype[out_param.dtype] 223 224 self.premap_op = None 225 self.postmap_op = None 226 227 @property 228 def params(self): 229 return self.in_params + self.out_params 230 231 def emit_code(self): 232 _fusion_thread_local.check_not_runtime() 233 assert len(self.in_params) == 1 234 assert len(self.out_params) == 1 235 in_param = list(self.in_params)[0] 236 out_param = list(self.out_params)[0] 237 params = ', '.join([ 238 in_param.var_name, 239 out_param.var_name, 240 in_param.indexer_name, 241 out_param.indexer_name, 242 ]) 243 return '{}({}, {});'.format( 244 self.name, params, self.block_stride_name) 245 246 def emit_preamble_codes(self): 247 preamble = self.preamble 248 return [preamble] if preamble != '' else [] 249 250 def emit_submodule_codes(self): 251 """Returns a CUDA device function code. 252 253 The emitted code assumes that ``block_stride`` and `blockDim.x` is a 254 power of 2. 255 """ 256 257 in_param, = self.in_params 258 out_param, = self.out_params 259 op_name = '{}_op'.format(self.name) 260 postmap_name = '{}_postmap'.format(self.name) 261 262 code = string.Template(''' 263 #define ${op_name}(a, b) (${reduce_expr}) 264 #define ${postmap_name}(a, out0) (${postmap_cast}) 265 266 template <typename InType, typename OutType, typename InIndexerType, typename OutIndexerType> 267 __device__ void ${name}( 268 InType in_arr, OutType out_arr, 269 InIndexerType in_ind, OutIndexerType out_ind, int block_stride) { 270 typedef ${in_type} type_in0_raw; 271 typedef ${out_type} type_out0_raw; 272 typedef ${reduce_ctype} _type_reduce; 273 extern __shared__ char _sdata_raw[]; 274 _type_reduce *sdata = reinterpret_cast<_type_reduce*>(_sdata_raw); 275 unsigned int tid = threadIdx.x; 276 int _J = tid >> __popc(block_stride - 1); 277 ptrdiff_t _j = (ptrdiff_t)_J * out_ind.size(); 278 int J_stride = blockDim.x >> __popc(block_stride - 1); 279 ptrdiff_t j_stride = (ptrdiff_t)J_stride * out_ind.size(); 280 281 for (ptrdiff_t _i = (ptrdiff_t)blockIdx.x * block_stride; _i < out_ind.size(); _i += (ptrdiff_t)gridDim.x * block_stride) { 282 _type_reduce s = _type_reduce(${identity}); 283 ptrdiff_t i = _i + (tid & (block_stride - 1)); 284 for (ptrdiff_t j = i + _j; j < in_ind.size(); j += j_stride) { 285 in_ind.set(j); 286 s = ${op_name}(s, static_cast<_type_reduce>(in_arr[in_ind.get()])); 287 } 288 sdata[tid] = s; 289 __syncthreads(); 290 for (unsigned int block = blockDim.x / 2; block >= block_stride; block >>= 1) { 291 if (tid < block) { 292 sdata[tid] = ${op_name}(sdata[tid], sdata[tid + block]); 293 } 294 __syncthreads(); 295 } 296 if (tid < block_stride) { 297 s = sdata[tid]; 298 } 299 if (tid < block_stride && i < out_ind.size()) { 300 out_ind.set(i); 301 ${postmap_name}(s, out_arr[out_ind.get()]); 302 } 303 __syncthreads(); 304 } 305 }''' # NOQA 306 ).substitute( 307 name=self.name, 308 op_name=op_name, 309 postmap_name=postmap_name, 310 in_type=_dtype_to_ctype[in_param.dtype], 311 out_type=_dtype_to_ctype[out_param.dtype], 312 reduce_ctype=self.reduce_ctype, 313 reduce_expr=self.expr, 314 identity=self.identity, 315 postmap_cast=self.postmap_cast_code) 316 317 return [code] 318 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/core/_fusion_op.py b/cupy/core/_fusion_op.py --- a/cupy/core/_fusion_op.py +++ b/cupy/core/_fusion_op.py @@ -205,7 +205,7 @@ assert all([0 <= x < in_param.ndim for x in axis]) self.name = name - self.preamble = reduce_func._preamble + self.preamble = reduce_func.preamble self.in_params = _VariableSet(in_param) self.out_params = _VariableSet(out_param) self.block_stride_name = 'block_stride_' + name
{"golden_diff": "diff --git a/cupy/core/_fusion_op.py b/cupy/core/_fusion_op.py\n--- a/cupy/core/_fusion_op.py\n+++ b/cupy/core/_fusion_op.py\n@@ -205,7 +205,7 @@\n assert all([0 <= x < in_param.ndim for x in axis])\n \n self.name = name\n- self.preamble = reduce_func._preamble\n+ self.preamble = reduce_func.preamble\n self.in_params = _VariableSet(in_param)\n self.out_params = _VariableSet(out_param)\n self.block_stride_name = 'block_stride_' + name\n", "issue": "Enhancement: enable CUB backend for ReductionKernel\nCurrently the CUB reduction is disabled for `ReductionKernel` (by the last `False` argument):\r\nhttps://github.com/cupy/cupy/blob/c1bf1aab6eb79a1cfcd5bb374460082d8eb2f3ca/cupy/core/_reduction.pyx#L724-L727\r\nBut we should revisit this at some point.\n", "before_files": [{"content": "import string\n\nimport numpy\n\nfrom cupy.core._fusion_variable import _TraceVariable\nfrom cupy.core._fusion_variable import _TraceArray\nfrom cupy.core._fusion_variable import _VariableSet\nfrom cupy.core import _fusion_thread_local\nfrom cupy.core import _fusion_emit_code\nfrom cupy.core import _kernel\nfrom cupy.core import _reduction\n\n\n_dtype_to_ctype = _fusion_emit_code._dtype_to_ctype\n\n\nclass _UfuncRoutine:\n \"\"\"A device function for single elementwise operations.\n \"\"\"\n\n def __init__(\n self, name, ufunc, routine_code, in_params, out_params,\n compute_dtypes):\n assert isinstance(name, str)\n assert isinstance(ufunc, _kernel.ufunc)\n assert isinstance(routine_code, str)\n assert isinstance(compute_dtypes, tuple)\n assert all(isinstance(t, numpy.dtype) for t in compute_dtypes)\n assert isinstance(in_params, list)\n assert all(isinstance(p, _TraceVariable) for p in in_params)\n assert isinstance(out_params, list)\n assert all(isinstance(p, _TraceArray) for p in out_params)\n\n self.name = name\n self.in_params = in_params\n self.out_params = out_params\n self.preamble = ufunc._preamble\n self.routine_code = routine_code\n self.compute_dtypes = compute_dtypes\n\n def emit_code(self):\n \"\"\"Returns a CUDA device function code.\n\n Returns a string like:\n ```\n __device__ void cupy_add_0(int &in0_, float &in1_, double &out0_) {\n typedef double in0_type;\n typedef double in1_type;\n typedef double out0_type;\n double in0 = (double) in0_;\n double in1 = (double) in1_;\n double out0 = (double) out0_;\n out0 = in0 + in1;\n out0_ = out0;\n }\n ```\n \"\"\"\n nin = len(self.in_params)\n dtypes = self.compute_dtypes\n assert len(self.in_params) == len(self.compute_dtypes[:nin])\n in_params = [\n (_dtype_to_ctype[p.dtype], _dtype_to_ctype[t], 'in{}'.format(i))\n for i, (p, t) in enumerate(zip(self.in_params, dtypes[:nin]))\n ]\n out_params = [\n (_dtype_to_ctype[p.dtype], _dtype_to_ctype[t], 'out{}'.format(i))\n for i, (p, t) in enumerate(zip(self.out_params, dtypes[nin:]))\n ]\n params = in_params + out_params\n\n params_code = ', '.join(['{} &{}_'.format(t, s) for t, _, s in params])\n typedef = ['typedef {} {}_type;'.format(t, s) for _, t, s in params]\n read = ['{} {} = ({}) {}_;'.format(t, s, t, s) for _, t, s in params]\n write = ['{}_ = {};'.format(s, s, s) for _, _, s in out_params]\n\n return _fusion_emit_code._CodeBlock(\n '__device__ void {}({})'.format(self.name, params_code),\n typedef + read + [self.routine_code + ';'] + write)\n\n def emit_call_code(self):\n params = self.in_params + self.out_params\n return '{op_name}({params});'.format(\n op_name=self.name,\n params=', '.join([var.lvar_name for var in params]))\n\n\nclass _ElementwiseTraceOp:\n \"\"\"Ufunc or elementwise kernel with types.\n \"\"\"\n\n def __init__(self, ufunc_routines, in_params, out_params, ashape):\n # The `in_params` and `out_params` should be already broadcasted to\n # `ashape`, but they don't guarantee to be exactly same as\n # `param.ashape`.\n\n _fusion_thread_local.check_not_runtime()\n assert isinstance(ufunc_routines, list)\n assert all(isinstance(r, _UfuncRoutine) for r in ufunc_routines)\n assert isinstance(ashape, tuple)\n\n self.ops = ufunc_routines\n self.in_params = _VariableSet(*in_params)\n self.out_params = _VariableSet(*out_params)\n self.ashape = ashape\n\n @property\n def params(self):\n \"\"\"Returns the set of all variable the loop uses.\n \"\"\"\n res = _VariableSet()\n for op in self.ops:\n res += _VariableSet(*op.in_params)\n res += _VariableSet(*op.out_params)\n return res\n\n @staticmethod\n def _emit_declaration(params, in_params):\n \"\"\"Returns a tuple of size 2.\n\n 1. CUDA code: declaring local variables.\n 2. The set of arrays which require indexer.\n \"\"\"\n _fusion_thread_local.check_not_runtime()\n\n indexed_arrays = _VariableSet()\n code = []\n for var in params:\n if var in in_params:\n if isinstance(var, _TraceArray):\n indexed_arrays.add(var)\n f = '${type} ${lvar} = ${var}[${indexer}.get()];'\n else:\n f = '${type} ${lvar} = ${var};'\n else:\n f = '${type} ${lvar};'\n code.append(var.format(f))\n\n return code, indexed_arrays\n\n @staticmethod\n def _emit_after_operation(out_params):\n \"\"\"Returns a tuple of size 2.\n 1. CUDA code: writing the results of operations back to global memory.\n 2. The set of arrays which require indexer.\n \"\"\"\n\n _fusion_thread_local.check_not_runtime()\n\n indexed_arrays = _VariableSet()\n codes = []\n for var in out_params:\n if isinstance(var, _TraceArray):\n indexed_arrays.add(var)\n f = '${var}[${indexer}.get()] = ${lvar};'\n else:\n f = '${var} = ${lvar};'\n codes.append(var.format(f))\n\n return codes, indexed_arrays\n\n @staticmethod\n def _emit_set_index(indexed_params, tid):\n \"\"\"Returns a CUDA code: setting a raw index to indexers.\n \"\"\"\n _fusion_thread_local.check_not_runtime()\n assert isinstance(indexed_params, _VariableSet)\n\n return [\n p.format('${indexer}.set(${tid});', tid=tid)\n for p in indexed_params\n ]\n\n def emit_code(self):\n _fusion_thread_local.check_not_runtime()\n\n declaration, s1 = self._emit_declaration(self.params, self.in_params)\n operation = [op.emit_call_code() for op in self.ops]\n after_operation, s2 = self._emit_after_operation(self.out_params)\n index_name = 'i'\n indexed_array = s1 + s2\n indexer_name = next(iter(indexed_array)).indexer_name\n indexer_setup = self._emit_set_index(indexed_array, index_name)\n\n return _fusion_emit_code._CodeBlock(\n 'CUPY_FOR({}, {}.size())'.format(index_name, indexer_name),\n indexer_setup + declaration + operation + after_operation)\n\n def emit_preamble_codes(self):\n return [subm.preamble for subm in self.ops if subm.preamble != '']\n\n def emit_submodule_codes(self):\n return [str(subm.emit_code()) for subm in self.ops]\n\n\nclass _ReductionTraceOp:\n def __init__(self, name, reduce_func, expr, in_param, out_param, axis):\n \"\"\"Reduction operation.\n \"\"\"\n _fusion_thread_local.check_not_runtime()\n assert isinstance(name, str)\n assert isinstance(reduce_func, _reduction._SimpleReductionKernel)\n assert isinstance(in_param, _TraceArray)\n assert isinstance(out_param, _TraceArray)\n assert isinstance(axis, tuple)\n assert all([0 <= x < in_param.ndim for x in axis])\n\n self.name = name\n self.preamble = reduce_func._preamble\n self.in_params = _VariableSet(in_param)\n self.out_params = _VariableSet(out_param)\n self.block_stride_name = 'block_stride_' + name\n self.axis = axis\n\n if reduce_func.identity is None:\n self.identity = ''\n else:\n self.identity = str(reduce_func.identity)\n\n _, self.expr, self.postmap_cast_code, self.reduce_ctype = expr\n if self.reduce_ctype is None:\n out_param, = self.out_params\n self.reduce_ctype = _dtype_to_ctype[out_param.dtype]\n\n self.premap_op = None\n self.postmap_op = None\n\n @property\n def params(self):\n return self.in_params + self.out_params\n\n def emit_code(self):\n _fusion_thread_local.check_not_runtime()\n assert len(self.in_params) == 1\n assert len(self.out_params) == 1\n in_param = list(self.in_params)[0]\n out_param = list(self.out_params)[0]\n params = ', '.join([\n in_param.var_name,\n out_param.var_name,\n in_param.indexer_name,\n out_param.indexer_name,\n ])\n return '{}({}, {});'.format(\n self.name, params, self.block_stride_name)\n\n def emit_preamble_codes(self):\n preamble = self.preamble\n return [preamble] if preamble != '' else []\n\n def emit_submodule_codes(self):\n \"\"\"Returns a CUDA device function code.\n\n The emitted code assumes that ``block_stride`` and `blockDim.x` is a\n power of 2.\n \"\"\"\n\n in_param, = self.in_params\n out_param, = self.out_params\n op_name = '{}_op'.format(self.name)\n postmap_name = '{}_postmap'.format(self.name)\n\n code = string.Template('''\n#define ${op_name}(a, b) (${reduce_expr})\n#define ${postmap_name}(a, out0) (${postmap_cast})\n\ntemplate <typename InType, typename OutType, typename InIndexerType, typename OutIndexerType>\n__device__ void ${name}(\n InType in_arr, OutType out_arr,\n InIndexerType in_ind, OutIndexerType out_ind, int block_stride) {\n typedef ${in_type} type_in0_raw;\n typedef ${out_type} type_out0_raw;\n typedef ${reduce_ctype} _type_reduce;\n extern __shared__ char _sdata_raw[];\n _type_reduce *sdata = reinterpret_cast<_type_reduce*>(_sdata_raw);\n unsigned int tid = threadIdx.x;\n int _J = tid >> __popc(block_stride - 1);\n ptrdiff_t _j = (ptrdiff_t)_J * out_ind.size();\n int J_stride = blockDim.x >> __popc(block_stride - 1);\n ptrdiff_t j_stride = (ptrdiff_t)J_stride * out_ind.size();\n\n for (ptrdiff_t _i = (ptrdiff_t)blockIdx.x * block_stride; _i < out_ind.size(); _i += (ptrdiff_t)gridDim.x * block_stride) {\n _type_reduce s = _type_reduce(${identity});\n ptrdiff_t i = _i + (tid & (block_stride - 1));\n for (ptrdiff_t j = i + _j; j < in_ind.size(); j += j_stride) {\n in_ind.set(j);\n s = ${op_name}(s, static_cast<_type_reduce>(in_arr[in_ind.get()]));\n }\n sdata[tid] = s;\n __syncthreads();\n for (unsigned int block = blockDim.x / 2; block >= block_stride; block >>= 1) {\n if (tid < block) {\n sdata[tid] = ${op_name}(sdata[tid], sdata[tid + block]);\n }\n __syncthreads();\n }\n if (tid < block_stride) {\n s = sdata[tid];\n }\n if (tid < block_stride && i < out_ind.size()) {\n out_ind.set(i);\n ${postmap_name}(s, out_arr[out_ind.get()]);\n }\n __syncthreads();\n }\n}''' # NOQA\n ).substitute(\n name=self.name,\n op_name=op_name,\n postmap_name=postmap_name,\n in_type=_dtype_to_ctype[in_param.dtype],\n out_type=_dtype_to_ctype[out_param.dtype],\n reduce_ctype=self.reduce_ctype,\n reduce_expr=self.expr,\n identity=self.identity,\n postmap_cast=self.postmap_cast_code)\n\n return [code]\n", "path": "cupy/core/_fusion_op.py"}], "after_files": [{"content": "import string\n\nimport numpy\n\nfrom cupy.core._fusion_variable import _TraceVariable\nfrom cupy.core._fusion_variable import _TraceArray\nfrom cupy.core._fusion_variable import _VariableSet\nfrom cupy.core import _fusion_thread_local\nfrom cupy.core import _fusion_emit_code\nfrom cupy.core import _kernel\nfrom cupy.core import _reduction\n\n\n_dtype_to_ctype = _fusion_emit_code._dtype_to_ctype\n\n\nclass _UfuncRoutine:\n \"\"\"A device function for single elementwise operations.\n \"\"\"\n\n def __init__(\n self, name, ufunc, routine_code, in_params, out_params,\n compute_dtypes):\n assert isinstance(name, str)\n assert isinstance(ufunc, _kernel.ufunc)\n assert isinstance(routine_code, str)\n assert isinstance(compute_dtypes, tuple)\n assert all(isinstance(t, numpy.dtype) for t in compute_dtypes)\n assert isinstance(in_params, list)\n assert all(isinstance(p, _TraceVariable) for p in in_params)\n assert isinstance(out_params, list)\n assert all(isinstance(p, _TraceArray) for p in out_params)\n\n self.name = name\n self.in_params = in_params\n self.out_params = out_params\n self.preamble = ufunc._preamble\n self.routine_code = routine_code\n self.compute_dtypes = compute_dtypes\n\n def emit_code(self):\n \"\"\"Returns a CUDA device function code.\n\n Returns a string like:\n ```\n __device__ void cupy_add_0(int &in0_, float &in1_, double &out0_) {\n typedef double in0_type;\n typedef double in1_type;\n typedef double out0_type;\n double in0 = (double) in0_;\n double in1 = (double) in1_;\n double out0 = (double) out0_;\n out0 = in0 + in1;\n out0_ = out0;\n }\n ```\n \"\"\"\n nin = len(self.in_params)\n dtypes = self.compute_dtypes\n assert len(self.in_params) == len(self.compute_dtypes[:nin])\n in_params = [\n (_dtype_to_ctype[p.dtype], _dtype_to_ctype[t], 'in{}'.format(i))\n for i, (p, t) in enumerate(zip(self.in_params, dtypes[:nin]))\n ]\n out_params = [\n (_dtype_to_ctype[p.dtype], _dtype_to_ctype[t], 'out{}'.format(i))\n for i, (p, t) in enumerate(zip(self.out_params, dtypes[nin:]))\n ]\n params = in_params + out_params\n\n params_code = ', '.join(['{} &{}_'.format(t, s) for t, _, s in params])\n typedef = ['typedef {} {}_type;'.format(t, s) for _, t, s in params]\n read = ['{} {} = ({}) {}_;'.format(t, s, t, s) for _, t, s in params]\n write = ['{}_ = {};'.format(s, s, s) for _, _, s in out_params]\n\n return _fusion_emit_code._CodeBlock(\n '__device__ void {}({})'.format(self.name, params_code),\n typedef + read + [self.routine_code + ';'] + write)\n\n def emit_call_code(self):\n params = self.in_params + self.out_params\n return '{op_name}({params});'.format(\n op_name=self.name,\n params=', '.join([var.lvar_name for var in params]))\n\n\nclass _ElementwiseTraceOp:\n \"\"\"Ufunc or elementwise kernel with types.\n \"\"\"\n\n def __init__(self, ufunc_routines, in_params, out_params, ashape):\n # The `in_params` and `out_params` should be already broadcasted to\n # `ashape`, but they don't guarantee to be exactly same as\n # `param.ashape`.\n\n _fusion_thread_local.check_not_runtime()\n assert isinstance(ufunc_routines, list)\n assert all(isinstance(r, _UfuncRoutine) for r in ufunc_routines)\n assert isinstance(ashape, tuple)\n\n self.ops = ufunc_routines\n self.in_params = _VariableSet(*in_params)\n self.out_params = _VariableSet(*out_params)\n self.ashape = ashape\n\n @property\n def params(self):\n \"\"\"Returns the set of all variable the loop uses.\n \"\"\"\n res = _VariableSet()\n for op in self.ops:\n res += _VariableSet(*op.in_params)\n res += _VariableSet(*op.out_params)\n return res\n\n @staticmethod\n def _emit_declaration(params, in_params):\n \"\"\"Returns a tuple of size 2.\n\n 1. CUDA code: declaring local variables.\n 2. The set of arrays which require indexer.\n \"\"\"\n _fusion_thread_local.check_not_runtime()\n\n indexed_arrays = _VariableSet()\n code = []\n for var in params:\n if var in in_params:\n if isinstance(var, _TraceArray):\n indexed_arrays.add(var)\n f = '${type} ${lvar} = ${var}[${indexer}.get()];'\n else:\n f = '${type} ${lvar} = ${var};'\n else:\n f = '${type} ${lvar};'\n code.append(var.format(f))\n\n return code, indexed_arrays\n\n @staticmethod\n def _emit_after_operation(out_params):\n \"\"\"Returns a tuple of size 2.\n 1. CUDA code: writing the results of operations back to global memory.\n 2. The set of arrays which require indexer.\n \"\"\"\n\n _fusion_thread_local.check_not_runtime()\n\n indexed_arrays = _VariableSet()\n codes = []\n for var in out_params:\n if isinstance(var, _TraceArray):\n indexed_arrays.add(var)\n f = '${var}[${indexer}.get()] = ${lvar};'\n else:\n f = '${var} = ${lvar};'\n codes.append(var.format(f))\n\n return codes, indexed_arrays\n\n @staticmethod\n def _emit_set_index(indexed_params, tid):\n \"\"\"Returns a CUDA code: setting a raw index to indexers.\n \"\"\"\n _fusion_thread_local.check_not_runtime()\n assert isinstance(indexed_params, _VariableSet)\n\n return [\n p.format('${indexer}.set(${tid});', tid=tid)\n for p in indexed_params\n ]\n\n def emit_code(self):\n _fusion_thread_local.check_not_runtime()\n\n declaration, s1 = self._emit_declaration(self.params, self.in_params)\n operation = [op.emit_call_code() for op in self.ops]\n after_operation, s2 = self._emit_after_operation(self.out_params)\n index_name = 'i'\n indexed_array = s1 + s2\n indexer_name = next(iter(indexed_array)).indexer_name\n indexer_setup = self._emit_set_index(indexed_array, index_name)\n\n return _fusion_emit_code._CodeBlock(\n 'CUPY_FOR({}, {}.size())'.format(index_name, indexer_name),\n indexer_setup + declaration + operation + after_operation)\n\n def emit_preamble_codes(self):\n return [subm.preamble for subm in self.ops if subm.preamble != '']\n\n def emit_submodule_codes(self):\n return [str(subm.emit_code()) for subm in self.ops]\n\n\nclass _ReductionTraceOp:\n def __init__(self, name, reduce_func, expr, in_param, out_param, axis):\n \"\"\"Reduction operation.\n \"\"\"\n _fusion_thread_local.check_not_runtime()\n assert isinstance(name, str)\n assert isinstance(reduce_func, _reduction._SimpleReductionKernel)\n assert isinstance(in_param, _TraceArray)\n assert isinstance(out_param, _TraceArray)\n assert isinstance(axis, tuple)\n assert all([0 <= x < in_param.ndim for x in axis])\n\n self.name = name\n self.preamble = reduce_func.preamble\n self.in_params = _VariableSet(in_param)\n self.out_params = _VariableSet(out_param)\n self.block_stride_name = 'block_stride_' + name\n self.axis = axis\n\n if reduce_func.identity is None:\n self.identity = ''\n else:\n self.identity = str(reduce_func.identity)\n\n _, self.expr, self.postmap_cast_code, self.reduce_ctype = expr\n if self.reduce_ctype is None:\n out_param, = self.out_params\n self.reduce_ctype = _dtype_to_ctype[out_param.dtype]\n\n self.premap_op = None\n self.postmap_op = None\n\n @property\n def params(self):\n return self.in_params + self.out_params\n\n def emit_code(self):\n _fusion_thread_local.check_not_runtime()\n assert len(self.in_params) == 1\n assert len(self.out_params) == 1\n in_param = list(self.in_params)[0]\n out_param = list(self.out_params)[0]\n params = ', '.join([\n in_param.var_name,\n out_param.var_name,\n in_param.indexer_name,\n out_param.indexer_name,\n ])\n return '{}({}, {});'.format(\n self.name, params, self.block_stride_name)\n\n def emit_preamble_codes(self):\n preamble = self.preamble\n return [preamble] if preamble != '' else []\n\n def emit_submodule_codes(self):\n \"\"\"Returns a CUDA device function code.\n\n The emitted code assumes that ``block_stride`` and `blockDim.x` is a\n power of 2.\n \"\"\"\n\n in_param, = self.in_params\n out_param, = self.out_params\n op_name = '{}_op'.format(self.name)\n postmap_name = '{}_postmap'.format(self.name)\n\n code = string.Template('''\n#define ${op_name}(a, b) (${reduce_expr})\n#define ${postmap_name}(a, out0) (${postmap_cast})\n\ntemplate <typename InType, typename OutType, typename InIndexerType, typename OutIndexerType>\n__device__ void ${name}(\n InType in_arr, OutType out_arr,\n InIndexerType in_ind, OutIndexerType out_ind, int block_stride) {\n typedef ${in_type} type_in0_raw;\n typedef ${out_type} type_out0_raw;\n typedef ${reduce_ctype} _type_reduce;\n extern __shared__ char _sdata_raw[];\n _type_reduce *sdata = reinterpret_cast<_type_reduce*>(_sdata_raw);\n unsigned int tid = threadIdx.x;\n int _J = tid >> __popc(block_stride - 1);\n ptrdiff_t _j = (ptrdiff_t)_J * out_ind.size();\n int J_stride = blockDim.x >> __popc(block_stride - 1);\n ptrdiff_t j_stride = (ptrdiff_t)J_stride * out_ind.size();\n\n for (ptrdiff_t _i = (ptrdiff_t)blockIdx.x * block_stride; _i < out_ind.size(); _i += (ptrdiff_t)gridDim.x * block_stride) {\n _type_reduce s = _type_reduce(${identity});\n ptrdiff_t i = _i + (tid & (block_stride - 1));\n for (ptrdiff_t j = i + _j; j < in_ind.size(); j += j_stride) {\n in_ind.set(j);\n s = ${op_name}(s, static_cast<_type_reduce>(in_arr[in_ind.get()]));\n }\n sdata[tid] = s;\n __syncthreads();\n for (unsigned int block = blockDim.x / 2; block >= block_stride; block >>= 1) {\n if (tid < block) {\n sdata[tid] = ${op_name}(sdata[tid], sdata[tid + block]);\n }\n __syncthreads();\n }\n if (tid < block_stride) {\n s = sdata[tid];\n }\n if (tid < block_stride && i < out_ind.size()) {\n out_ind.set(i);\n ${postmap_name}(s, out_arr[out_ind.get()]);\n }\n __syncthreads();\n }\n}''' # NOQA\n ).substitute(\n name=self.name,\n op_name=op_name,\n postmap_name=postmap_name,\n in_type=_dtype_to_ctype[in_param.dtype],\n out_type=_dtype_to_ctype[out_param.dtype],\n reduce_ctype=self.reduce_ctype,\n reduce_expr=self.expr,\n identity=self.identity,\n postmap_cast=self.postmap_cast_code)\n\n return [code]\n", "path": "cupy/core/_fusion_op.py"}]}
3,953
135
gh_patches_debug_4348
rasdani/github-patches
git_diff
pwndbg__pwndbg-747
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bad unsigned casting ### Description `pwndbg.memory.u` returns signed integers (with minus `-` sign). ### Steps to reproduce ```c #include <stdio.h> #include <stdint.h> int main(int argc, char const *argv[]) { uint64_t x = 0xb60ad86e8fb52ea8; printf("%p\n", &x); getc(stdin); return 0; } ``` ``` clang bad_u.c -g -o bad_u gdb ./bad_u pwndbg> x/xg 0x7fffffffab18 0x7fffffffab18: 0xb60ad86e8fb52ea8 pwndbg> python-interactive >>> pwndbg.memory.u(0x7fffffffab18) -5329209239670542680 ``` Idk why it doesn't break the pwndbg visibly. Found it running `vis_heap_chunks` on arbitrary addresses (the minus were printed in few places). ### My setup ``` GNU gdb (Ubuntu 8.1-0ubuntu3.2) 8.1.0.20180409-git python: 3.6.9 (default, Nov 7 2019, 10:44:02) pwndbg: dev branch ``` Bad unsigned casting ### Description `pwndbg.memory.u` returns signed integers (with minus `-` sign). ### Steps to reproduce ```c #include <stdio.h> #include <stdint.h> int main(int argc, char const *argv[]) { uint64_t x = 0xb60ad86e8fb52ea8; printf("%p\n", &x); getc(stdin); return 0; } ``` ``` clang bad_u.c -g -o bad_u gdb ./bad_u pwndbg> x/xg 0x7fffffffab18 0x7fffffffab18: 0xb60ad86e8fb52ea8 pwndbg> python-interactive >>> pwndbg.memory.u(0x7fffffffab18) -5329209239670542680 ``` Idk why it doesn't break the pwndbg visibly. Found it running `vis_heap_chunks` on arbitrary addresses (the minus were printed in few places). ### My setup ``` GNU gdb (Ubuntu 8.1-0ubuntu3.2) 8.1.0.20180409-git python: 3.6.9 (default, Nov 7 2019, 10:44:02) pwndbg: dev branch ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/inthook.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 This hook is necessary for compatibility with Python2.7 versions of GDB 5 since they cannot directly cast to integer a gdb.Value object that is 6 not already an integer type. 7 """ 8 from __future__ import absolute_import 9 from __future__ import division 10 from __future__ import print_function 11 from __future__ import unicode_literals 12 13 import enum 14 import os 15 16 import gdb 17 import six 18 from future.utils import with_metaclass 19 20 import pwndbg.typeinfo 21 22 if six.PY2: 23 import __builtin__ as builtins 24 else: 25 import builtins 26 27 _int = builtins.int 28 29 30 # We need this class to get isinstance(7, xint) to return True 31 class IsAnInt(type): 32 def __instancecheck__(self, other): 33 return isinstance(other, _int) 34 35 36 class xint(with_metaclass(IsAnInt, builtins.int)): 37 def __new__(cls, value, *a, **kw): 38 if isinstance(value, gdb.Value): 39 if pwndbg.typeinfo.is_pointer(value): 40 value = value.cast(pwndbg.typeinfo.size_t) 41 else: 42 value = value.cast(pwndbg.typeinfo.ssize_t) 43 44 elif isinstance(value, gdb.Symbol): 45 symbol = value 46 value = symbol.value() 47 if symbol.is_function: 48 value = value.cast(pwndbg.typeinfo.size_t) 49 50 elif not isinstance(value, (six.string_types, six.integer_types)) \ 51 or isinstance(cls, enum.EnumMeta): 52 # without check for EnumMeta math operations with enums were failing e.g.: 53 # pwndbg> py import re; flags = 1 | re.MULTILINE 54 return _int.__new__(cls, value, *a, **kw) 55 56 return _int(_int(value, *a, **kw)) 57 58 # Do not hook 'int' if we are just generating documentation 59 if os.environ.get('SPHINX', None) is None: 60 builtins.int = xint 61 globals()['int'] = xint 62 if six.PY3: 63 builtins.long = xint 64 globals()['long'] = xint 65 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/inthook.py b/pwndbg/inthook.py --- a/pwndbg/inthook.py +++ b/pwndbg/inthook.py @@ -39,7 +39,7 @@ if pwndbg.typeinfo.is_pointer(value): value = value.cast(pwndbg.typeinfo.size_t) else: - value = value.cast(pwndbg.typeinfo.ssize_t) + return _int.__new__(cls, value, *a, **kw) elif isinstance(value, gdb.Symbol): symbol = value
{"golden_diff": "diff --git a/pwndbg/inthook.py b/pwndbg/inthook.py\n--- a/pwndbg/inthook.py\n+++ b/pwndbg/inthook.py\n@@ -39,7 +39,7 @@\n if pwndbg.typeinfo.is_pointer(value):\n value = value.cast(pwndbg.typeinfo.size_t)\n else:\n- value = value.cast(pwndbg.typeinfo.ssize_t)\n+ return _int.__new__(cls, value, *a, **kw)\n \n elif isinstance(value, gdb.Symbol):\n symbol = value\n", "issue": "Bad unsigned casting\n### Description\r\n\r\n`pwndbg.memory.u` returns signed integers (with minus `-` sign).\r\n\r\n### Steps to reproduce\r\n\r\n\r\n```c\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n\r\nint main(int argc, char const *argv[])\r\n{\r\n uint64_t x = 0xb60ad86e8fb52ea8;\r\n printf(\"%p\\n\", &x);\r\n getc(stdin);\r\n return 0;\r\n}\r\n```\r\n\r\n```\r\nclang bad_u.c -g -o bad_u\r\ngdb ./bad_u\r\n\r\npwndbg> x/xg 0x7fffffffab18\r\n0x7fffffffab18:\t0xb60ad86e8fb52ea8\r\npwndbg> python-interactive \r\n>>> pwndbg.memory.u(0x7fffffffab18)\r\n-5329209239670542680\r\n```\r\n\r\nIdk why it doesn't break the pwndbg visibly. Found it running `vis_heap_chunks` on arbitrary addresses (the minus were printed in few places).\r\n\r\n### My setup\r\n\r\n```\r\nGNU gdb (Ubuntu 8.1-0ubuntu3.2) 8.1.0.20180409-git\r\npython: 3.6.9 (default, Nov 7 2019, 10:44:02)\r\npwndbg: dev branch\r\n```\nBad unsigned casting\n### Description\r\n\r\n`pwndbg.memory.u` returns signed integers (with minus `-` sign).\r\n\r\n### Steps to reproduce\r\n\r\n\r\n```c\r\n#include <stdio.h>\r\n#include <stdint.h>\r\n\r\nint main(int argc, char const *argv[])\r\n{\r\n uint64_t x = 0xb60ad86e8fb52ea8;\r\n printf(\"%p\\n\", &x);\r\n getc(stdin);\r\n return 0;\r\n}\r\n```\r\n\r\n```\r\nclang bad_u.c -g -o bad_u\r\ngdb ./bad_u\r\n\r\npwndbg> x/xg 0x7fffffffab18\r\n0x7fffffffab18:\t0xb60ad86e8fb52ea8\r\npwndbg> python-interactive \r\n>>> pwndbg.memory.u(0x7fffffffab18)\r\n-5329209239670542680\r\n```\r\n\r\nIdk why it doesn't break the pwndbg visibly. Found it running `vis_heap_chunks` on arbitrary addresses (the minus were printed in few places).\r\n\r\n### My setup\r\n\r\n```\r\nGNU gdb (Ubuntu 8.1-0ubuntu3.2) 8.1.0.20180409-git\r\npython: 3.6.9 (default, Nov 7 2019, 10:44:02)\r\npwndbg: dev branch\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis hook is necessary for compatibility with Python2.7 versions of GDB\nsince they cannot directly cast to integer a gdb.Value object that is\nnot already an integer type.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport enum\nimport os\n\nimport gdb\nimport six\nfrom future.utils import with_metaclass\n\nimport pwndbg.typeinfo\n\nif six.PY2:\n import __builtin__ as builtins\nelse:\n import builtins\n\n_int = builtins.int\n\n\n# We need this class to get isinstance(7, xint) to return True\nclass IsAnInt(type):\n def __instancecheck__(self, other):\n return isinstance(other, _int)\n\n\nclass xint(with_metaclass(IsAnInt, builtins.int)):\n def __new__(cls, value, *a, **kw):\n if isinstance(value, gdb.Value):\n if pwndbg.typeinfo.is_pointer(value):\n value = value.cast(pwndbg.typeinfo.size_t)\n else:\n value = value.cast(pwndbg.typeinfo.ssize_t)\n\n elif isinstance(value, gdb.Symbol):\n symbol = value\n value = symbol.value()\n if symbol.is_function:\n value = value.cast(pwndbg.typeinfo.size_t)\n\n elif not isinstance(value, (six.string_types, six.integer_types)) \\\n or isinstance(cls, enum.EnumMeta):\n # without check for EnumMeta math operations with enums were failing e.g.:\n # pwndbg> py import re; flags = 1 | re.MULTILINE\n return _int.__new__(cls, value, *a, **kw)\n\n return _int(_int(value, *a, **kw))\n\n# Do not hook 'int' if we are just generating documentation\nif os.environ.get('SPHINX', None) is None:\n builtins.int = xint\n globals()['int'] = xint\n if six.PY3:\n builtins.long = xint\n globals()['long'] = xint\n", "path": "pwndbg/inthook.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nThis hook is necessary for compatibility with Python2.7 versions of GDB\nsince they cannot directly cast to integer a gdb.Value object that is\nnot already an integer type.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport enum\nimport os\n\nimport gdb\nimport six\nfrom future.utils import with_metaclass\n\nimport pwndbg.typeinfo\n\nif six.PY2:\n import __builtin__ as builtins\nelse:\n import builtins\n\n_int = builtins.int\n\n\n# We need this class to get isinstance(7, xint) to return True\nclass IsAnInt(type):\n def __instancecheck__(self, other):\n return isinstance(other, _int)\n\n\nclass xint(with_metaclass(IsAnInt, builtins.int)):\n def __new__(cls, value, *a, **kw):\n if isinstance(value, gdb.Value):\n if pwndbg.typeinfo.is_pointer(value):\n value = value.cast(pwndbg.typeinfo.size_t)\n else:\n return _int.__new__(cls, value, *a, **kw)\n\n elif isinstance(value, gdb.Symbol):\n symbol = value\n value = symbol.value()\n if symbol.is_function:\n value = value.cast(pwndbg.typeinfo.size_t)\n\n elif not isinstance(value, (six.string_types, six.integer_types)) \\\n or isinstance(cls, enum.EnumMeta):\n # without check for EnumMeta math operations with enums were failing e.g.:\n # pwndbg> py import re; flags = 1 | re.MULTILINE\n return _int.__new__(cls, value, *a, **kw)\n\n return _int(_int(value, *a, **kw))\n\n# Do not hook 'int' if we are just generating documentation\nif os.environ.get('SPHINX', None) is None:\n builtins.int = xint\n globals()['int'] = xint\n if six.PY3:\n builtins.long = xint\n globals()['long'] = xint\n", "path": "pwndbg/inthook.py"}]}
1,488
126
gh_patches_debug_2300
rasdani/github-patches
git_diff
pytorch__torchdynamo-1012
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dynamo WONT CONVERT for is_fx_tracing() Probably the same as #1009. Repro: ``` import torchdynamo from torch.fx._symbolic_trace import is_fx_tracing def my_compiler(gm, inputs): return gm.forward @torchdynamo.optimize(my_compiler) def fn(x, y): if is_fx_tracing(): return x else: return y fn(1, 2) ``` returns ``` torchdynamo.convert_frame: [ERROR] WON'T CONVERT fn /private/home/suo/scratch/test.py line 8 due to: Traceback (most recent call last): File "/raid/suo/torchdynamo/torchdynamo/variables/tensor.py", line 258, in create assert ( AssertionError: torch.* op returned non-Tensor bool call_function <function is_fx_tracing at 0x7f08b681e700> from user code: File "/private/home/suo/scratch/test.py", line 10, in fn if is_fx_tracing(): Set torchdynamo.config.verbose=True for more information ========== ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchdynamo/config.py` Content: ``` 1 import logging 2 import os 3 import sys 4 from os.path import abspath 5 from os.path import dirname 6 from types import ModuleType 7 8 import torch 9 10 try: 11 import torch._prims 12 import torch._refs 13 14 HAS_REFS_PRIMS = True 15 except ImportError: 16 HAS_REFS_PRIMS = False 17 18 19 class AccessLimitingConfig(ModuleType): 20 # log level (levels print what it says + all levels listed below it) 21 # DEBUG print full traces <-- lowest level + print tracing of every instruction 22 # INFO print compiled functions + graphs 23 # WARN print warnings (including graph breaks) 24 # ERROR print exceptions (and what user code was being processed when it occurred) 25 log_level = logging.WARNING 26 # Verbose will print full stack traces on warnings and errors 27 verbose = False 28 29 # verify the correctness of optimized backend 30 verify_correctness = False 31 32 # need this many ops to create an FX graph 33 minimum_call_count = 1 34 35 # turn on/off DCE pass 36 dead_code_elimination = True 37 38 # disable (for a function) when cache reaches this size 39 cache_size_limit = 64 40 41 # specializing int/float by default 42 specialize_int_float = True 43 44 # Assume these functions return constants 45 constant_functions = { 46 torch.jit.is_scripting: False, 47 torch.jit.is_tracing: False, 48 torch._C._get_tracing_state: None, 49 } 50 51 # root folder of the project 52 base_dir = dirname(dirname(abspath(__file__))) 53 54 # don't specialize on shapes and strides and put shape ops in graph 55 dynamic_shapes = os.environ.get("TORCHDYNAMO_DYNAMIC_SHAPES") == "1" 56 57 # Set this to False to assume nn.Modules() contents are immutable (similar assumption as freezing) 58 guard_nn_modules = False 59 60 # Run the FX graph as it is created to get better type information 61 dynamic_propagation = True 62 63 # Run the FX graph with FakeTensors 64 fake_tensor_propagation = True 65 66 # run FX normalization passes in optimizer 67 normalize_ir = True 68 69 # If a tensor subclass type is in this set, torchdynamo will inline the 70 # __torch_function__ logic of the subclass. 71 traceable_tensor_subclasses = set() 72 73 # Raise torchdynamo internal assertions 74 raise_on_assertion_error = False 75 76 # Propagate backend exceptions up to torchdynamo.optimize 77 raise_on_backend_error = True 78 79 # If a PyTorch module is in this allowlist, torchdynamo will be allowed 80 # to inline objects from it or its children. 81 skipfiles_inline_module_allowlist = {torch.nn, torch.distributions} 82 if HAS_REFS_PRIMS: 83 skipfiles_inline_module_allowlist |= { 84 torch._refs, 85 torch._prims, 86 torch._decomp, 87 } 88 89 # If a string representing a PyTorch module is in this ignorelist, 90 # the `allowed_functions.is_allowed` function will not consider it 91 # when creating a list of PyTorch functions that will appear in 92 # FX IR. 93 allowed_functions_module_string_ignorelist = { 94 "torch.distributions", 95 "torch.testing", 96 "torch._refs", 97 "torch._prims", 98 "torch._decomp", 99 } 100 101 # Compiler compilation debug info 102 # 0: Nothing printed out when compilation fails 103 # 1: Dump the graph out to repro.py if compilation fails 104 # 2: Dumps the graph out to minify_repro.py with a minifier if compilation fails 105 # 3: Always dumps the last graph ran out to minify_repro.py, useful for segfaults/irrecoverable errors 106 repro_level = int(os.environ.get("COMPILER_REPRO_LEVEL", 0)) 107 108 # Not all backends support scalars. Some calls on torch.Tensor (like .item()) return a scalar type. 109 # When this flag is set to False, we introduce a graph break instead of capturing. 110 capture_scalar_outputs = False 111 112 def __setattr__(self, name, value): 113 if sys.version_info > (3, 8): 114 assert hasattr( 115 self, name 116 ), f"Trying to set {name} - this value does not exist in torchdynamo.config" 117 object.__setattr__(self, name, value) 118 119 def __delattr__(self, name): 120 if sys.version_info > (3, 8): 121 assert hasattr( 122 self, name 123 ), f"Trying to del {name} - this value does not exist in torchdynamo.config" 124 object.__delattr__(self, name) 125 126 127 sys.modules[__name__] = AccessLimitingConfig("config") 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchdynamo/config.py b/torchdynamo/config.py --- a/torchdynamo/config.py +++ b/torchdynamo/config.py @@ -46,6 +46,8 @@ torch.jit.is_scripting: False, torch.jit.is_tracing: False, torch._C._get_tracing_state: None, + torch.fx._symbolic_trace.is_fx_tracing: False, + torch.onnx.is_in_onnx_export: False, } # root folder of the project
{"golden_diff": "diff --git a/torchdynamo/config.py b/torchdynamo/config.py\n--- a/torchdynamo/config.py\n+++ b/torchdynamo/config.py\n@@ -46,6 +46,8 @@\n torch.jit.is_scripting: False,\n torch.jit.is_tracing: False,\n torch._C._get_tracing_state: None,\n+ torch.fx._symbolic_trace.is_fx_tracing: False,\n+ torch.onnx.is_in_onnx_export: False,\n }\n \n # root folder of the project\n", "issue": "Dynamo WONT CONVERT for is_fx_tracing()\nProbably the same as #1009. Repro:\r\n```\r\nimport torchdynamo\r\nfrom torch.fx._symbolic_trace import is_fx_tracing\r\n\r\ndef my_compiler(gm, inputs):\r\n return gm.forward\r\n\r\[email protected](my_compiler)\r\ndef fn(x, y):\r\n if is_fx_tracing():\r\n return x\r\n else:\r\n return y\r\n\r\nfn(1, 2)\r\n```\r\nreturns\r\n```\r\ntorchdynamo.convert_frame: [ERROR] WON'T CONVERT fn /private/home/suo/scratch/test.py line 8\r\ndue to:\r\nTraceback (most recent call last):\r\n File \"/raid/suo/torchdynamo/torchdynamo/variables/tensor.py\", line 258, in create\r\n assert (\r\nAssertionError: torch.* op returned non-Tensor bool call_function <function is_fx_tracing at 0x7f08b681e700>\r\n\r\nfrom user code:\r\n File \"/private/home/suo/scratch/test.py\", line 10, in fn\r\n if is_fx_tracing():\r\n\r\nSet torchdynamo.config.verbose=True for more information\r\n==========\r\n```\n", "before_files": [{"content": "import logging\nimport os\nimport sys\nfrom os.path import abspath\nfrom os.path import dirname\nfrom types import ModuleType\n\nimport torch\n\ntry:\n import torch._prims\n import torch._refs\n\n HAS_REFS_PRIMS = True\nexcept ImportError:\n HAS_REFS_PRIMS = False\n\n\nclass AccessLimitingConfig(ModuleType):\n # log level (levels print what it says + all levels listed below it)\n # DEBUG print full traces <-- lowest level + print tracing of every instruction\n # INFO print compiled functions + graphs\n # WARN print warnings (including graph breaks)\n # ERROR print exceptions (and what user code was being processed when it occurred)\n log_level = logging.WARNING\n # Verbose will print full stack traces on warnings and errors\n verbose = False\n\n # verify the correctness of optimized backend\n verify_correctness = False\n\n # need this many ops to create an FX graph\n minimum_call_count = 1\n\n # turn on/off DCE pass\n dead_code_elimination = True\n\n # disable (for a function) when cache reaches this size\n cache_size_limit = 64\n\n # specializing int/float by default\n specialize_int_float = True\n\n # Assume these functions return constants\n constant_functions = {\n torch.jit.is_scripting: False,\n torch.jit.is_tracing: False,\n torch._C._get_tracing_state: None,\n }\n\n # root folder of the project\n base_dir = dirname(dirname(abspath(__file__)))\n\n # don't specialize on shapes and strides and put shape ops in graph\n dynamic_shapes = os.environ.get(\"TORCHDYNAMO_DYNAMIC_SHAPES\") == \"1\"\n\n # Set this to False to assume nn.Modules() contents are immutable (similar assumption as freezing)\n guard_nn_modules = False\n\n # Run the FX graph as it is created to get better type information\n dynamic_propagation = True\n\n # Run the FX graph with FakeTensors\n fake_tensor_propagation = True\n\n # run FX normalization passes in optimizer\n normalize_ir = True\n\n # If a tensor subclass type is in this set, torchdynamo will inline the\n # __torch_function__ logic of the subclass.\n traceable_tensor_subclasses = set()\n\n # Raise torchdynamo internal assertions\n raise_on_assertion_error = False\n\n # Propagate backend exceptions up to torchdynamo.optimize\n raise_on_backend_error = True\n\n # If a PyTorch module is in this allowlist, torchdynamo will be allowed\n # to inline objects from it or its children.\n skipfiles_inline_module_allowlist = {torch.nn, torch.distributions}\n if HAS_REFS_PRIMS:\n skipfiles_inline_module_allowlist |= {\n torch._refs,\n torch._prims,\n torch._decomp,\n }\n\n # If a string representing a PyTorch module is in this ignorelist,\n # the `allowed_functions.is_allowed` function will not consider it\n # when creating a list of PyTorch functions that will appear in\n # FX IR.\n allowed_functions_module_string_ignorelist = {\n \"torch.distributions\",\n \"torch.testing\",\n \"torch._refs\",\n \"torch._prims\",\n \"torch._decomp\",\n }\n\n # Compiler compilation debug info\n # 0: Nothing printed out when compilation fails\n # 1: Dump the graph out to repro.py if compilation fails\n # 2: Dumps the graph out to minify_repro.py with a minifier if compilation fails\n # 3: Always dumps the last graph ran out to minify_repro.py, useful for segfaults/irrecoverable errors\n repro_level = int(os.environ.get(\"COMPILER_REPRO_LEVEL\", 0))\n\n # Not all backends support scalars. Some calls on torch.Tensor (like .item()) return a scalar type.\n # When this flag is set to False, we introduce a graph break instead of capturing.\n capture_scalar_outputs = False\n\n def __setattr__(self, name, value):\n if sys.version_info > (3, 8):\n assert hasattr(\n self, name\n ), f\"Trying to set {name} - this value does not exist in torchdynamo.config\"\n object.__setattr__(self, name, value)\n\n def __delattr__(self, name):\n if sys.version_info > (3, 8):\n assert hasattr(\n self, name\n ), f\"Trying to del {name} - this value does not exist in torchdynamo.config\"\n object.__delattr__(self, name)\n\n\nsys.modules[__name__] = AccessLimitingConfig(\"config\")\n", "path": "torchdynamo/config.py"}], "after_files": [{"content": "import logging\nimport os\nimport sys\nfrom os.path import abspath\nfrom os.path import dirname\nfrom types import ModuleType\n\nimport torch\n\ntry:\n import torch._prims\n import torch._refs\n\n HAS_REFS_PRIMS = True\nexcept ImportError:\n HAS_REFS_PRIMS = False\n\n\nclass AccessLimitingConfig(ModuleType):\n # log level (levels print what it says + all levels listed below it)\n # DEBUG print full traces <-- lowest level + print tracing of every instruction\n # INFO print compiled functions + graphs\n # WARN print warnings (including graph breaks)\n # ERROR print exceptions (and what user code was being processed when it occurred)\n log_level = logging.WARNING\n # Verbose will print full stack traces on warnings and errors\n verbose = False\n\n # verify the correctness of optimized backend\n verify_correctness = False\n\n # need this many ops to create an FX graph\n minimum_call_count = 1\n\n # turn on/off DCE pass\n dead_code_elimination = True\n\n # disable (for a function) when cache reaches this size\n cache_size_limit = 64\n\n # specializing int/float by default\n specialize_int_float = True\n\n # Assume these functions return constants\n constant_functions = {\n torch.jit.is_scripting: False,\n torch.jit.is_tracing: False,\n torch._C._get_tracing_state: None,\n torch.fx._symbolic_trace.is_fx_tracing: False,\n torch.onnx.is_in_onnx_export: False,\n }\n\n # root folder of the project\n base_dir = dirname(dirname(abspath(__file__)))\n\n # don't specialize on shapes and strides and put shape ops in graph\n dynamic_shapes = os.environ.get(\"TORCHDYNAMO_DYNAMIC_SHAPES\") == \"1\"\n\n # Set this to False to assume nn.Modules() contents are immutable (similar assumption as freezing)\n guard_nn_modules = False\n\n # Run the FX graph as it is created to get better type information\n dynamic_propagation = True\n\n # Run the FX graph with FakeTensors\n fake_tensor_propagation = True\n\n # run FX normalization passes in optimizer\n normalize_ir = True\n\n # If a tensor subclass type is in this set, torchdynamo will inline the\n # __torch_function__ logic of the subclass.\n traceable_tensor_subclasses = set()\n\n # Raise torchdynamo internal assertions\n raise_on_assertion_error = False\n\n # Propagate backend exceptions up to torchdynamo.optimize\n raise_on_backend_error = True\n\n # If a PyTorch module is in this allowlist, torchdynamo will be allowed\n # to inline objects from it or its children.\n skipfiles_inline_module_allowlist = {torch.nn, torch.distributions}\n if HAS_REFS_PRIMS:\n skipfiles_inline_module_allowlist |= {\n torch._refs,\n torch._prims,\n torch._decomp,\n }\n\n # If a string representing a PyTorch module is in this ignorelist,\n # the `allowed_functions.is_allowed` function will not consider it\n # when creating a list of PyTorch functions that will appear in\n # FX IR.\n allowed_functions_module_string_ignorelist = {\n \"torch.distributions\",\n \"torch.testing\",\n \"torch._refs\",\n \"torch._prims\",\n \"torch._decomp\",\n }\n\n # Compiler compilation debug info\n # 0: Nothing printed out when compilation fails\n # 1: Dump the graph out to repro.py if compilation fails\n # 2: Dumps the graph out to minify_repro.py with a minifier if compilation fails\n # 3: Always dumps the last graph ran out to minify_repro.py, useful for segfaults/irrecoverable errors\n repro_level = int(os.environ.get(\"COMPILER_REPRO_LEVEL\", 0))\n\n # Not all backends support scalars. Some calls on torch.Tensor (like .item()) return a scalar type.\n # When this flag is set to False, we introduce a graph break instead of capturing.\n capture_scalar_outputs = False\n\n def __setattr__(self, name, value):\n if sys.version_info > (3, 8):\n assert hasattr(\n self, name\n ), f\"Trying to set {name} - this value does not exist in torchdynamo.config\"\n object.__setattr__(self, name, value)\n\n def __delattr__(self, name):\n if sys.version_info > (3, 8):\n assert hasattr(\n self, name\n ), f\"Trying to del {name} - this value does not exist in torchdynamo.config\"\n object.__delattr__(self, name)\n\n\nsys.modules[__name__] = AccessLimitingConfig(\"config\")\n", "path": "torchdynamo/config.py"}]}
1,825
119
gh_patches_debug_11149
rasdani/github-patches
git_diff
open-mmlab__mmocr-285
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- LineStrParser: separator behaviour I've a question regarding this snippet of code: https://github.com/open-mmlab/mmocr/blob/01d8d63be945882fb2d9eaca5e1c1b39cb45f274/mmocr/datasets/utils/parser.py#L33-L36 Is there a particular reason to use these 4 lines of code instead of simply `line_str = line_str.split(self.separator)`? I'm asking this because for my own use case I have: - a TSV file with `filename` and `text` as keys for text recognition task - some blank spaces in `filename` e.g. `my cropped image.png` Hence, LineStrParser is configured as follows: ```python parser=dict( type='LineStrParser', keys=['filename', 'text'], keys_idx=[0, 1], separator='\t')) ``` but with the 4-lines code snippet, the line parsing fails. Instead, with simply `line_str = line_str.split(self.separator)` everything works well. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mmocr/datasets/utils/parser.py` Content: ``` 1 import json 2 3 from mmocr.datasets.builder import PARSERS 4 5 6 @PARSERS.register_module() 7 class LineStrParser: 8 """Parse string of one line in annotation file to dict format. 9 10 Args: 11 keys (list[str]): Keys in result dict. 12 keys_idx (list[int]): Value index in sub-string list 13 for each key above. 14 separator (str): Separator to separate string to list of sub-string. 15 """ 16 17 def __init__(self, 18 keys=['filename', 'text'], 19 keys_idx=[0, 1], 20 separator=' '): 21 assert isinstance(keys, list) 22 assert isinstance(keys_idx, list) 23 assert isinstance(separator, str) 24 assert len(keys) > 0 25 assert len(keys) == len(keys_idx) 26 self.keys = keys 27 self.keys_idx = keys_idx 28 self.separator = separator 29 30 def get_item(self, data_ret, index): 31 map_index = index % len(data_ret) 32 line_str = data_ret[map_index] 33 for split_key in self.separator: 34 if split_key != ' ': 35 line_str = line_str.replace(split_key, ' ') 36 line_str = line_str.split() 37 if len(line_str) <= max(self.keys_idx): 38 raise Exception( 39 f'key index: {max(self.keys_idx)} out of range: {line_str}') 40 41 line_info = {} 42 for i, key in enumerate(self.keys): 43 line_info[key] = line_str[self.keys_idx[i]] 44 return line_info 45 46 47 @PARSERS.register_module() 48 class LineJsonParser: 49 """Parse json-string of one line in annotation file to dict format. 50 51 Args: 52 keys (list[str]): Keys in both json-string and result dict. 53 """ 54 55 def __init__(self, keys=[], **kwargs): 56 assert isinstance(keys, list) 57 assert len(keys) > 0 58 self.keys = keys 59 60 def get_item(self, data_ret, index): 61 map_index = index % len(data_ret) 62 line_json_obj = json.loads(data_ret[map_index]) 63 line_info = {} 64 for key in self.keys: 65 if key not in line_json_obj: 66 raise Exception(f'key {key} not in line json {line_json_obj}') 67 line_info[key] = line_json_obj[key] 68 69 return line_info 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mmocr/datasets/utils/parser.py b/mmocr/datasets/utils/parser.py --- a/mmocr/datasets/utils/parser.py +++ b/mmocr/datasets/utils/parser.py @@ -30,10 +30,7 @@ def get_item(self, data_ret, index): map_index = index % len(data_ret) line_str = data_ret[map_index] - for split_key in self.separator: - if split_key != ' ': - line_str = line_str.replace(split_key, ' ') - line_str = line_str.split() + line_str = line_str.split(self.separator) if len(line_str) <= max(self.keys_idx): raise Exception( f'key index: {max(self.keys_idx)} out of range: {line_str}')
{"golden_diff": "diff --git a/mmocr/datasets/utils/parser.py b/mmocr/datasets/utils/parser.py\n--- a/mmocr/datasets/utils/parser.py\n+++ b/mmocr/datasets/utils/parser.py\n@@ -30,10 +30,7 @@\n def get_item(self, data_ret, index):\n map_index = index % len(data_ret)\n line_str = data_ret[map_index]\n- for split_key in self.separator:\n- if split_key != ' ':\n- line_str = line_str.replace(split_key, ' ')\n- line_str = line_str.split()\n+ line_str = line_str.split(self.separator)\n if len(line_str) <= max(self.keys_idx):\n raise Exception(\n f'key index: {max(self.keys_idx)} out of range: {line_str}')\n", "issue": "LineStrParser: separator behaviour\nI've a question regarding this snippet of code:\r\nhttps://github.com/open-mmlab/mmocr/blob/01d8d63be945882fb2d9eaca5e1c1b39cb45f274/mmocr/datasets/utils/parser.py#L33-L36\r\n\r\nIs there a particular reason to use these 4 lines of code instead of simply `line_str = line_str.split(self.separator)`?\r\n\r\nI'm asking this because for my own use case I have:\r\n- a TSV file with `filename` and `text` as keys for text recognition task\r\n- some blank spaces in `filename` e.g. `my cropped image.png`\r\n \r\nHence, LineStrParser is configured as follows:\r\n```python\r\nparser=dict(\r\n type='LineStrParser',\r\n keys=['filename', 'text'],\r\n keys_idx=[0, 1],\r\n separator='\\t'))\r\n```\r\nbut with the 4-lines code snippet, the line parsing fails. Instead, with simply `line_str = line_str.split(self.separator)` everything works well.\n", "before_files": [{"content": "import json\n\nfrom mmocr.datasets.builder import PARSERS\n\n\[email protected]_module()\nclass LineStrParser:\n \"\"\"Parse string of one line in annotation file to dict format.\n\n Args:\n keys (list[str]): Keys in result dict.\n keys_idx (list[int]): Value index in sub-string list\n for each key above.\n separator (str): Separator to separate string to list of sub-string.\n \"\"\"\n\n def __init__(self,\n keys=['filename', 'text'],\n keys_idx=[0, 1],\n separator=' '):\n assert isinstance(keys, list)\n assert isinstance(keys_idx, list)\n assert isinstance(separator, str)\n assert len(keys) > 0\n assert len(keys) == len(keys_idx)\n self.keys = keys\n self.keys_idx = keys_idx\n self.separator = separator\n\n def get_item(self, data_ret, index):\n map_index = index % len(data_ret)\n line_str = data_ret[map_index]\n for split_key in self.separator:\n if split_key != ' ':\n line_str = line_str.replace(split_key, ' ')\n line_str = line_str.split()\n if len(line_str) <= max(self.keys_idx):\n raise Exception(\n f'key index: {max(self.keys_idx)} out of range: {line_str}')\n\n line_info = {}\n for i, key in enumerate(self.keys):\n line_info[key] = line_str[self.keys_idx[i]]\n return line_info\n\n\[email protected]_module()\nclass LineJsonParser:\n \"\"\"Parse json-string of one line in annotation file to dict format.\n\n Args:\n keys (list[str]): Keys in both json-string and result dict.\n \"\"\"\n\n def __init__(self, keys=[], **kwargs):\n assert isinstance(keys, list)\n assert len(keys) > 0\n self.keys = keys\n\n def get_item(self, data_ret, index):\n map_index = index % len(data_ret)\n line_json_obj = json.loads(data_ret[map_index])\n line_info = {}\n for key in self.keys:\n if key not in line_json_obj:\n raise Exception(f'key {key} not in line json {line_json_obj}')\n line_info[key] = line_json_obj[key]\n\n return line_info\n", "path": "mmocr/datasets/utils/parser.py"}], "after_files": [{"content": "import json\n\nfrom mmocr.datasets.builder import PARSERS\n\n\[email protected]_module()\nclass LineStrParser:\n \"\"\"Parse string of one line in annotation file to dict format.\n\n Args:\n keys (list[str]): Keys in result dict.\n keys_idx (list[int]): Value index in sub-string list\n for each key above.\n separator (str): Separator to separate string to list of sub-string.\n \"\"\"\n\n def __init__(self,\n keys=['filename', 'text'],\n keys_idx=[0, 1],\n separator=' '):\n assert isinstance(keys, list)\n assert isinstance(keys_idx, list)\n assert isinstance(separator, str)\n assert len(keys) > 0\n assert len(keys) == len(keys_idx)\n self.keys = keys\n self.keys_idx = keys_idx\n self.separator = separator\n\n def get_item(self, data_ret, index):\n map_index = index % len(data_ret)\n line_str = data_ret[map_index]\n line_str = line_str.split(self.separator)\n if len(line_str) <= max(self.keys_idx):\n raise Exception(\n f'key index: {max(self.keys_idx)} out of range: {line_str}')\n\n line_info = {}\n for i, key in enumerate(self.keys):\n line_info[key] = line_str[self.keys_idx[i]]\n return line_info\n\n\[email protected]_module()\nclass LineJsonParser:\n \"\"\"Parse json-string of one line in annotation file to dict format.\n\n Args:\n keys (list[str]): Keys in both json-string and result dict.\n \"\"\"\n\n def __init__(self, keys=[], **kwargs):\n assert isinstance(keys, list)\n assert len(keys) > 0\n self.keys = keys\n\n def get_item(self, data_ret, index):\n map_index = index % len(data_ret)\n line_json_obj = json.loads(data_ret[map_index])\n line_info = {}\n for key in self.keys:\n if key not in line_json_obj:\n raise Exception(f'key {key} not in line json {line_json_obj}')\n line_info[key] = line_json_obj[key]\n\n return line_info\n", "path": "mmocr/datasets/utils/parser.py"}]}
1,129
171
gh_patches_debug_52858
rasdani/github-patches
git_diff
getsentry__sentry-540
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- celery 3.0 causes import error (cannot import abbrtools from celery.utils) Release of celery 3.0 causes an import error at runtime upon any request. This is the stack trace: ``` ImportError: cannot import name abbrtask Error handling request Traceback (most recent call last): File "/Users/guzru/dev/django14/lib/python2.7/site-packages/gunicorn/workers/sync.py", line 107, in handle_request for item in respiter: File "/Users/guzru/dev/django14/lib/python2.7/site-packages/raven/middleware.py", line 28, in __call__ for event in self.application(environ, start_response): File "/Users/guzru/dev/django14/lib/python2.7/site-packages/django/core/handlers/wsgi.py", line 241, in __call__ response = self.get_response(request) File "/Users/guzru/dev/django14/lib/python2.7/site-packages/django/core/handlers/base.py", line 179, in get_response response = self.handle_uncaught_exception(request, resolver, sys.exc_info()) File "/Users/guzru/dev/django14/lib/python2.7/site-packages/django/core/handlers/base.py", line 224, in handle_uncaught_exception if resolver.urlconf_module is None: File "/Users/guzru/dev/django14/lib/python2.7/site-packages/django/core/urlresolvers.py", line 323, in urlconf_module self._urlconf_module = import_module(self.urlconf_name) File "/Users/guzru/dev/django14/lib/python2.7/site-packages/django/utils/importlib.py", line 35, in import_module __import__(name) File "/Users/guzru/dev/django14/lib/python2.7/site-packages/sentry/conf/urls.py", line 19, in <module> admin.autodiscover() File "/Users/guzru/dev/django14/lib/python2.7/site-packages/django/contrib/admin/__init__.py", line 29, in autodiscover import_module('%s.admin' % app) File "/Users/guzru/dev/django14/lib/python2.7/site-packages/django/utils/importlib.py", line 35, in import_module __import__(name) File "/Users/guzru/dev/django14/lib/python2.7/site-packages/djcelery/admin.py", line 19, in <module> from celery.utils import abbrtask ImportError: cannot import name abbrtask ``` Requirements line for celery should become: celery>=2.5.3,<3.0.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 """ 3 Sentry 4 ====== 5 6 Sentry is a realtime event logging and aggregation platform. It specializes 7 in monitoring errors and extracting all the information needed to do a proper 8 post-mortem without any of the hassle of the standard user feedback loop. 9 10 Sentry is a Server 11 ------------------ 12 13 The Sentry package, at its core, is just a simple server and web UI. It will 14 handle authentication clients (such as `Raven <https://github.com/dcramer/raven>`_) 15 and all of the logic behind storage and aggregation. 16 17 That said, Sentry is not limited to Python. The primary implementation is in 18 Python, but it contains a full API for sending events from any language, in 19 any application. 20 21 :copyright: (c) 2011-2012 by the Sentry Team, see AUTHORS for more details. 22 :license: BSD, see LICENSE for more details. 23 """ 24 25 from setuptools import setup, find_packages 26 27 # Hack to prevent stupid "TypeError: 'NoneType' object is not callable" error 28 # in multiprocessing/util.py _exit_function when running `python 29 # setup.py test` (see 30 # http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html) 31 try: 32 import multiprocessing 33 except ImportError: 34 pass 35 36 tests_require = [ 37 'django-nose==1.1', 38 'eventlet==0.9.16', 39 'nose==1.1.2', 40 'nydus==0.8.2', 41 'mock==0.8.0', 42 'pyflakes', 43 'pep8', 44 'redis', 45 'unittest2', 46 ] 47 48 49 install_requires = [ 50 'cssutils>=0.9.9', 51 'BeautifulSoup>=3.2.1', 52 'django-celery>=2.5.5,<3.0', 53 'django-crispy-forms>=1.1.4', 54 'Django>=1.2,<1.5', 55 'django-indexer>=0.3.0', 56 'django-paging>=0.2.4', 57 'django-picklefield>=0.2.0', 58 'django-templatetag-sugar>=0.1.0', 59 'gunicorn>=0.13.4', 60 'logan>=0.3.1', 61 'pynliner>=0.4.0', 62 'python-dateutil>=1.5.0,<2.0.0', 63 'pytz>=2011n', 64 'raven>=2.0.0', 65 'simplejson>=2.3.0,<2.5.0', 66 'South>=0.7', 67 'httpagentparser>=1.0.5' 68 ] 69 70 dependency_links = [ 71 'https://github.com/dcramer/pyflakes/tarball/master#egg=pyflakes', 72 ] 73 74 setup( 75 name='sentry', 76 version='4.8.1', 77 author='David Cramer', 78 author_email='[email protected]', 79 url='http://github.com/dcramer/sentry', 80 description='A realtime logging and aggregation server.', 81 long_description=__doc__, 82 packages=find_packages(exclude=['tests']), 83 zip_safe=False, 84 install_requires=install_requires, 85 tests_require=tests_require, 86 extras_require={'test': tests_require}, 87 dependency_links=dependency_links, 88 test_suite='runtests.runtests', 89 license='BSD', 90 include_package_data=True, 91 entry_points={ 92 'console_scripts': [ 93 'sentry = sentry.utils.runner:main', 94 ], 95 }, 96 classifiers=[ 97 'Framework :: Django', 98 'Intended Audience :: Developers', 99 'Intended Audience :: System Administrators', 100 'Operating System :: OS Independent', 101 'Topic :: Software Development' 102 ], 103 ) 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -50,6 +50,7 @@ 'cssutils>=0.9.9', 'BeautifulSoup>=3.2.1', 'django-celery>=2.5.5,<3.0', + 'celery>=2.5.3,<3.0', 'django-crispy-forms>=1.1.4', 'Django>=1.2,<1.5', 'django-indexer>=0.3.0',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -50,6 +50,7 @@\n 'cssutils>=0.9.9',\n 'BeautifulSoup>=3.2.1',\n 'django-celery>=2.5.5,<3.0',\n+ 'celery>=2.5.3,<3.0',\n 'django-crispy-forms>=1.1.4',\n 'Django>=1.2,<1.5',\n 'django-indexer>=0.3.0',\n", "issue": "celery 3.0 causes import error (cannot import abbrtools from celery.utils)\nRelease of celery 3.0 causes an import error at runtime upon any request.\n\nThis is the stack trace:\n\n```\nImportError: cannot import name abbrtask\nError handling request\nTraceback (most recent call last):\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/gunicorn/workers/sync.py\", line 107, in handle_request\n for item in respiter:\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/raven/middleware.py\", line 28, in __call__\n for event in self.application(environ, start_response):\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/django/core/handlers/wsgi.py\", line 241, in __call__\n response = self.get_response(request)\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/django/core/handlers/base.py\", line 179, in get_response\n response = self.handle_uncaught_exception(request, resolver, sys.exc_info())\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/django/core/handlers/base.py\", line 224, in handle_uncaught_exception\n if resolver.urlconf_module is None:\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/django/core/urlresolvers.py\", line 323, in urlconf_module\n self._urlconf_module = import_module(self.urlconf_name)\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/django/utils/importlib.py\", line 35, in import_module\n __import__(name)\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/sentry/conf/urls.py\", line 19, in <module>\n admin.autodiscover()\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/django/contrib/admin/__init__.py\", line 29, in autodiscover\n import_module('%s.admin' % app)\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/django/utils/importlib.py\", line 35, in import_module\n __import__(name)\n File \"/Users/guzru/dev/django14/lib/python2.7/site-packages/djcelery/admin.py\", line 19, in <module>\n from celery.utils import abbrtask\nImportError: cannot import name abbrtask\n```\n\nRequirements line for celery should become:\n\ncelery>=2.5.3,<3.0.0\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nSentry\n======\n\nSentry is a realtime event logging and aggregation platform. It specializes\nin monitoring errors and extracting all the information needed to do a proper\npost-mortem without any of the hassle of the standard user feedback loop.\n\nSentry is a Server\n------------------\n\nThe Sentry package, at its core, is just a simple server and web UI. It will\nhandle authentication clients (such as `Raven <https://github.com/dcramer/raven>`_)\nand all of the logic behind storage and aggregation.\n\nThat said, Sentry is not limited to Python. The primary implementation is in\nPython, but it contains a full API for sending events from any language, in\nany application.\n\n:copyright: (c) 2011-2012 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom setuptools import setup, find_packages\n\n# Hack to prevent stupid \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when running `python\n# setup.py test` (see\n# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\ntests_require = [\n 'django-nose==1.1',\n 'eventlet==0.9.16',\n 'nose==1.1.2',\n 'nydus==0.8.2',\n 'mock==0.8.0',\n 'pyflakes',\n 'pep8',\n 'redis',\n 'unittest2',\n]\n\n\ninstall_requires = [\n 'cssutils>=0.9.9',\n 'BeautifulSoup>=3.2.1',\n 'django-celery>=2.5.5,<3.0',\n 'django-crispy-forms>=1.1.4',\n 'Django>=1.2,<1.5',\n 'django-indexer>=0.3.0',\n 'django-paging>=0.2.4',\n 'django-picklefield>=0.2.0',\n 'django-templatetag-sugar>=0.1.0',\n 'gunicorn>=0.13.4',\n 'logan>=0.3.1',\n 'pynliner>=0.4.0',\n 'python-dateutil>=1.5.0,<2.0.0',\n 'pytz>=2011n',\n 'raven>=2.0.0',\n 'simplejson>=2.3.0,<2.5.0',\n 'South>=0.7',\n 'httpagentparser>=1.0.5'\n]\n\ndependency_links = [\n 'https://github.com/dcramer/pyflakes/tarball/master#egg=pyflakes',\n]\n\nsetup(\n name='sentry',\n version='4.8.1',\n author='David Cramer',\n author_email='[email protected]',\n url='http://github.com/dcramer/sentry',\n description='A realtime logging and aggregation server.',\n long_description=__doc__,\n packages=find_packages(exclude=['tests']),\n zip_safe=False,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require={'test': tests_require},\n dependency_links=dependency_links,\n test_suite='runtests.runtests',\n license='BSD',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'sentry = sentry.utils.runner:main',\n ],\n },\n classifiers=[\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development'\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nSentry\n======\n\nSentry is a realtime event logging and aggregation platform. It specializes\nin monitoring errors and extracting all the information needed to do a proper\npost-mortem without any of the hassle of the standard user feedback loop.\n\nSentry is a Server\n------------------\n\nThe Sentry package, at its core, is just a simple server and web UI. It will\nhandle authentication clients (such as `Raven <https://github.com/dcramer/raven>`_)\nand all of the logic behind storage and aggregation.\n\nThat said, Sentry is not limited to Python. The primary implementation is in\nPython, but it contains a full API for sending events from any language, in\nany application.\n\n:copyright: (c) 2011-2012 by the Sentry Team, see AUTHORS for more details.\n:license: BSD, see LICENSE for more details.\n\"\"\"\n\nfrom setuptools import setup, find_packages\n\n# Hack to prevent stupid \"TypeError: 'NoneType' object is not callable\" error\n# in multiprocessing/util.py _exit_function when running `python\n# setup.py test` (see\n# http://www.eby-sarna.com/pipermail/peak/2010-May/003357.html)\ntry:\n import multiprocessing\nexcept ImportError:\n pass\n\ntests_require = [\n 'django-nose==1.1',\n 'eventlet==0.9.16',\n 'nose==1.1.2',\n 'nydus==0.8.2',\n 'mock==0.8.0',\n 'pyflakes',\n 'pep8',\n 'redis',\n 'unittest2',\n]\n\n\ninstall_requires = [\n 'cssutils>=0.9.9',\n 'BeautifulSoup>=3.2.1',\n 'django-celery>=2.5.5,<3.0',\n 'celery>=2.5.3,<3.0',\n 'django-crispy-forms>=1.1.4',\n 'Django>=1.2,<1.5',\n 'django-indexer>=0.3.0',\n 'django-paging>=0.2.4',\n 'django-picklefield>=0.2.0',\n 'django-templatetag-sugar>=0.1.0',\n 'gunicorn>=0.13.4',\n 'logan>=0.3.1',\n 'pynliner>=0.4.0',\n 'python-dateutil>=1.5.0,<2.0.0',\n 'pytz>=2011n',\n 'raven>=2.0.0',\n 'simplejson>=2.3.0,<2.5.0',\n 'South>=0.7',\n 'httpagentparser>=1.0.5'\n]\n\ndependency_links = [\n 'https://github.com/dcramer/pyflakes/tarball/master#egg=pyflakes',\n]\n\nsetup(\n name='sentry',\n version='4.8.1',\n author='David Cramer',\n author_email='[email protected]',\n url='http://github.com/dcramer/sentry',\n description='A realtime logging and aggregation server.',\n long_description=__doc__,\n packages=find_packages(exclude=['tests']),\n zip_safe=False,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require={'test': tests_require},\n dependency_links=dependency_links,\n test_suite='runtests.runtests',\n license='BSD',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'sentry = sentry.utils.runner:main',\n ],\n },\n classifiers=[\n 'Framework :: Django',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Operating System :: OS Independent',\n 'Topic :: Software Development'\n ],\n)\n", "path": "setup.py"}]}
1,897
127
gh_patches_debug_23439
rasdani/github-patches
git_diff
ESMCI__cime-1490
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- python 3 format causing pylint fails In PR #1471 I objected to the usage of python3 syntax format statements instead of %s as used throughout the code. Now I've noticed that this usage was introduced to the trunk in PR #1388 Master is failing pylint tests due to #1388 - while fixing the pylint test can you please revert to %s usage throughout? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/lib/CIME/XML/compsets.py` Content: ``` 1 """ 2 Common interface to XML files which follow the compsets format, 3 """ 4 5 from CIME.XML.standard_module_setup import * 6 from CIME.XML.generic_xml import GenericXML 7 from CIME.XML.entry_id import EntryID 8 from CIME.XML.files import Files 9 10 11 logger = logging.getLogger(__name__) 12 13 class Compsets(GenericXML): 14 15 def __init__(self, infile=None, files=None): 16 if files is None: 17 files = Files() 18 schema = files.get_schema("COMPSETS_SPEC_FILE") 19 GenericXML.__init__(self, infile, schema=schema) 20 self.groups={} 21 22 def get_compset_match(self, name): 23 """ 24 science support is used in cesm to determine if this compset and grid 25 is scientifically supported. science_support is returned as an array of grids for this compset 26 """ 27 nodes = self.get_nodes("compset") 28 alias = None 29 lname = None 30 31 science_support = [] 32 33 for node in nodes: 34 alias = self.get_element_text("alias",root=node) 35 lname = self.get_element_text("lname",root=node) 36 if alias == name or lname == name: 37 science_support_nodes = self.get_nodes("science_support", root=node) 38 for node in science_support_nodes: 39 science_support.append(node.get("grid")) 40 user_mods_node = self.get_optional_node("user_mods", root=node) 41 if user_mods_node is not None: 42 user_mods = user_mods_node.text 43 else: 44 user_mods = None 45 logger.debug("Found node match with alias: {} and lname: {}".format(alias, lname)) 46 return (lname, alias, science_support, user_mods) 47 return (None, None, [False], None) 48 49 def get_compset_var_settings(self, compset, grid): 50 ''' 51 Variables can be set in config_compsets.xml in entry id settings with compset and grid attributes 52 find and return id value pairs here 53 ''' 54 nodes = self.get_nodes("entry") 55 # Get an empty entryid obj to use 56 entryidobj = EntryID() 57 result = [] 58 for node in nodes: 59 value = entryidobj.get_default_value(node, {"grid":grid, "compset":compset}) 60 if value is not None: 61 result.append((node.get("id"), value)) 62 return result 63 64 def get_value(self, name, attribute=None, resolved=False, subgroup=None): 65 expect(subgroup is None, "This class does not support subgroups") 66 if name == "help": 67 rootnode = self.get_node("help") 68 helptext = rootnode.text 69 return helptext 70 else: 71 compsets = {} 72 nodes = self.get_nodes(nodename="compset") 73 for node in nodes: 74 for child in node: 75 logger.debug ("Here child is {} with value {}".format(child.tag,child.text)) 76 if child.tag == "alias": 77 alias = child.text 78 if child.tag == "lname": 79 lname = child.text 80 compsets[alias] = lname 81 return compsets 82 83 def print_values(self, help=True): 84 help_text = self.get_value(name="help") 85 compsets_text = self.get_value("names") 86 if help: 87 logger.info(" {} ".format(help_text)) 88 89 logger.info(" --------------------------------------") 90 logger.info(" Compset Alias: Compset Long Name ") 91 logger.info(" --------------------------------------") 92 for v in compsets_text.iteritems(): 93 label, definition = v 94 logger.info(" {:20} : {}".format(label, definition)) 95 ``` Path: `scripts/lib/CIME/code_checker.py` Content: ``` 1 """ 2 Libraries for checking python code with pylint 3 """ 4 5 from CIME.XML.standard_module_setup import * 6 7 from CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable 8 9 from multiprocessing.dummy import Pool as ThreadPool 10 from distutils.spawn import find_executable 11 12 logger = logging.getLogger(__name__) 13 14 ############################################################################### 15 def _run_pylint(on_file, interactive): 16 ############################################################################### 17 pylint = find_executable("pylint") 18 19 cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement" 20 cimeroot = get_cime_root() 21 22 if "scripts/Tools" in on_file: 23 cmd_options +=",relative-import" 24 25 # add init-hook option 26 cmd_options += " --init-hook='sys.path.extend((\"%s\",\"%s\",\"%s\"))'"%\ 27 (os.path.join(cimeroot,"scripts","lib"), 28 os.path.join(cimeroot,"scripts","Tools"), 29 os.path.join(cimeroot,"scripts","fortran_unit_testing","python")) 30 31 cmd = "%s %s %s" % (pylint, cmd_options, on_file) 32 logger.debug("pylint command is %s"%cmd) 33 stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot) 34 if stat != 0: 35 if interactive: 36 logger.info("File %s has pylint problems, please fix\n Use command: %s" % (on_file, cmd)) 37 logger.info(out + "\n" + err) 38 return (on_file, out + "\n" + err) 39 else: 40 if interactive: 41 logger.info("File %s has no pylint problems" % on_file) 42 return (on_file, "") 43 44 ############################################################################### 45 def _matches(file_path, file_ends): 46 ############################################################################### 47 for file_end in file_ends: 48 if file_path.endswith(file_end): 49 return True 50 51 return False 52 53 ############################################################################### 54 def _should_pylint_skip(filepath): 55 ############################################################################### 56 # TODO - get rid of this 57 list_of_directories_to_ignore = ("xmlconvertors", "pointclm", "point_clm", "tools", "machines", "apidocs") 58 for dir_to_skip in list_of_directories_to_ignore: 59 if dir_to_skip in filepath: 60 return True 61 62 return False 63 64 ############################################################################### 65 def get_all_checkable_files(): 66 ############################################################################### 67 cimeroot = get_cime_root() 68 all_git_files = run_cmd_no_fail("git ls-files", from_dir=cimeroot, verbose=False).splitlines() 69 70 files_to_test = [item for item in all_git_files 71 if ((item.endswith(".py") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))] 72 return files_to_test 73 74 ############################################################################### 75 def check_code(files, num_procs=10, interactive=False): 76 ############################################################################### 77 """ 78 Check all python files in the given directory 79 80 Returns True if all files had no problems 81 """ 82 # Get list of files to check, we look to see if user-provided file argument 83 # is a valid file, if not, we search the repo for a file with similar name. 84 repo_files = run_cmd_no_fail('git ls-files --full-name %s' % get_cime_root(), verbose=False).splitlines() 85 files_to_check = [] 86 if files: 87 for filearg in files: 88 if os.path.exists(filearg): 89 files_to_check.append(os.path.abspath(filearg)) 90 else: 91 found = False 92 for repo_file in repo_files: 93 if repo_file.endswith(filearg): 94 found = True 95 files_to_check.append(repo_file) # could have multiple matches 96 97 if not found: 98 logger.warning("Could not find file matching argument '%s'" % filearg) 99 else: 100 # Check every python file 101 files_to_check = get_all_checkable_files() 102 103 expect(len(files_to_check) > 0, "No matching files found") 104 105 # No point in using more threads than files 106 if len(files_to_check) < num_procs: 107 num_procs = len(files_to_check) 108 109 pool = ThreadPool(num_procs) 110 results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check) 111 pool.close() 112 pool.join() 113 return dict(results) 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py --- a/scripts/lib/CIME/XML/compsets.py +++ b/scripts/lib/CIME/XML/compsets.py @@ -80,10 +80,10 @@ compsets[alias] = lname return compsets - def print_values(self, help=True): + def print_values(self, arg_help=True): help_text = self.get_value(name="help") compsets_text = self.get_value("names") - if help: + if arg_help: logger.info(" {} ".format(help_text)) logger.info(" --------------------------------------") diff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py --- a/scripts/lib/CIME/code_checker.py +++ b/scripts/lib/CIME/code_checker.py @@ -16,7 +16,7 @@ ############################################################################### pylint = find_executable("pylint") - cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement" + cmd_options = " --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement,logging-format-interpolation" cimeroot = get_cime_root() if "scripts/Tools" in on_file:
{"golden_diff": "diff --git a/scripts/lib/CIME/XML/compsets.py b/scripts/lib/CIME/XML/compsets.py\n--- a/scripts/lib/CIME/XML/compsets.py\n+++ b/scripts/lib/CIME/XML/compsets.py\n@@ -80,10 +80,10 @@\n compsets[alias] = lname\n return compsets\n \n- def print_values(self, help=True):\n+ def print_values(self, arg_help=True):\n help_text = self.get_value(name=\"help\")\n compsets_text = self.get_value(\"names\")\n- if help:\n+ if arg_help:\n logger.info(\" {} \".format(help_text))\n \n logger.info(\" --------------------------------------\")\ndiff --git a/scripts/lib/CIME/code_checker.py b/scripts/lib/CIME/code_checker.py\n--- a/scripts/lib/CIME/code_checker.py\n+++ b/scripts/lib/CIME/code_checker.py\n@@ -16,7 +16,7 @@\n ###############################################################################\n pylint = find_executable(\"pylint\")\n \n- cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n+ cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement,logging-format-interpolation\"\n cimeroot = get_cime_root()\n \n if \"scripts/Tools\" in on_file:\n", "issue": "python 3 format causing pylint fails \nIn PR #1471 I objected to the usage of python3 syntax format statements instead of %s as used throughout the code. Now I've noticed that this usage was introduced to the trunk in PR #1388\r\n\r\nMaster is failing pylint tests due to #1388 - while fixing the pylint test can you please revert to %s usage throughout?\n", "before_files": [{"content": "\"\"\"\nCommon interface to XML files which follow the compsets format,\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.XML.generic_xml import GenericXML\nfrom CIME.XML.entry_id import EntryID\nfrom CIME.XML.files import Files\n\n\nlogger = logging.getLogger(__name__)\n\nclass Compsets(GenericXML):\n\n def __init__(self, infile=None, files=None):\n if files is None:\n files = Files()\n schema = files.get_schema(\"COMPSETS_SPEC_FILE\")\n GenericXML.__init__(self, infile, schema=schema)\n self.groups={}\n\n def get_compset_match(self, name):\n \"\"\"\n science support is used in cesm to determine if this compset and grid\n is scientifically supported. science_support is returned as an array of grids for this compset\n \"\"\"\n nodes = self.get_nodes(\"compset\")\n alias = None\n lname = None\n\n science_support = []\n\n for node in nodes:\n alias = self.get_element_text(\"alias\",root=node)\n lname = self.get_element_text(\"lname\",root=node)\n if alias == name or lname == name:\n science_support_nodes = self.get_nodes(\"science_support\", root=node)\n for node in science_support_nodes:\n science_support.append(node.get(\"grid\"))\n user_mods_node = self.get_optional_node(\"user_mods\", root=node)\n if user_mods_node is not None:\n user_mods = user_mods_node.text\n else:\n user_mods = None\n logger.debug(\"Found node match with alias: {} and lname: {}\".format(alias, lname))\n return (lname, alias, science_support, user_mods)\n return (None, None, [False], None)\n\n def get_compset_var_settings(self, compset, grid):\n '''\n Variables can be set in config_compsets.xml in entry id settings with compset and grid attributes\n find and return id value pairs here\n '''\n nodes = self.get_nodes(\"entry\")\n # Get an empty entryid obj to use\n entryidobj = EntryID()\n result = []\n for node in nodes:\n value = entryidobj.get_default_value(node, {\"grid\":grid, \"compset\":compset})\n if value is not None:\n result.append((node.get(\"id\"), value))\n return result\n\n def get_value(self, name, attribute=None, resolved=False, subgroup=None):\n expect(subgroup is None, \"This class does not support subgroups\")\n if name == \"help\":\n rootnode = self.get_node(\"help\")\n helptext = rootnode.text\n return helptext\n else:\n compsets = {}\n nodes = self.get_nodes(nodename=\"compset\")\n for node in nodes:\n for child in node:\n logger.debug (\"Here child is {} with value {}\".format(child.tag,child.text))\n if child.tag == \"alias\":\n alias = child.text\n if child.tag == \"lname\":\n lname = child.text\n compsets[alias] = lname\n return compsets\n\n def print_values(self, help=True):\n help_text = self.get_value(name=\"help\")\n compsets_text = self.get_value(\"names\")\n if help:\n logger.info(\" {} \".format(help_text))\n\n logger.info(\" --------------------------------------\")\n logger.info(\" Compset Alias: Compset Long Name \")\n logger.info(\" --------------------------------------\")\n for v in compsets_text.iteritems():\n label, definition = v\n logger.info(\" {:20} : {}\".format(label, definition))\n", "path": "scripts/lib/CIME/XML/compsets.py"}, {"content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable\n\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom distutils.spawn import find_executable\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _run_pylint(on_file, interactive):\n###############################################################################\n pylint = find_executable(\"pylint\")\n\n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement\"\n cimeroot = get_cime_root()\n\n if \"scripts/Tools\" in on_file:\n cmd_options +=\",relative-import\"\n\n # add init-hook option\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"scripts\",\"lib\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"),\n os.path.join(cimeroot,\"scripts\",\"fortran_unit_testing\",\"python\"))\n\n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)\n if stat != 0:\n if interactive:\n logger.info(\"File %s has pylint problems, please fix\\n Use command: %s\" % (on_file, cmd))\n logger.info(out + \"\\n\" + err)\n return (on_file, out + \"\\n\" + err)\n else:\n if interactive:\n logger.info(\"File %s has no pylint problems\" % on_file)\n return (on_file, \"\")\n\n###############################################################################\ndef _matches(file_path, file_ends):\n###############################################################################\n for file_end in file_ends:\n if file_path.endswith(file_end):\n return True\n\n return False\n\n###############################################################################\ndef _should_pylint_skip(filepath):\n###############################################################################\n # TODO - get rid of this\n list_of_directories_to_ignore = (\"xmlconvertors\", \"pointclm\", \"point_clm\", \"tools\", \"machines\", \"apidocs\")\n for dir_to_skip in list_of_directories_to_ignore:\n if dir_to_skip in filepath:\n return True\n\n return False\n\n###############################################################################\ndef get_all_checkable_files():\n###############################################################################\n cimeroot = get_cime_root()\n all_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=cimeroot, verbose=False).splitlines()\n\n files_to_test = [item for item in all_git_files\n if ((item.endswith(\".py\") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]\n return files_to_test\n\n###############################################################################\ndef check_code(files, num_procs=10, interactive=False):\n###############################################################################\n \"\"\"\n Check all python files in the given directory\n\n Returns True if all files had no problems\n \"\"\"\n # Get list of files to check, we look to see if user-provided file argument\n # is a valid file, if not, we search the repo for a file with similar name.\n repo_files = run_cmd_no_fail('git ls-files --full-name %s' % get_cime_root(), verbose=False).splitlines()\n files_to_check = []\n if files:\n for filearg in files:\n if os.path.exists(filearg):\n files_to_check.append(os.path.abspath(filearg))\n else:\n found = False\n for repo_file in repo_files:\n if repo_file.endswith(filearg):\n found = True\n files_to_check.append(repo_file) # could have multiple matches\n\n if not found:\n logger.warning(\"Could not find file matching argument '%s'\" % filearg)\n else:\n # Check every python file\n files_to_check = get_all_checkable_files()\n\n expect(len(files_to_check) > 0, \"No matching files found\")\n\n # No point in using more threads than files\n if len(files_to_check) < num_procs:\n num_procs = len(files_to_check)\n\n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n pool.close()\n pool.join()\n return dict(results)\n", "path": "scripts/lib/CIME/code_checker.py"}], "after_files": [{"content": "\"\"\"\nCommon interface to XML files which follow the compsets format,\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\nfrom CIME.XML.generic_xml import GenericXML\nfrom CIME.XML.entry_id import EntryID\nfrom CIME.XML.files import Files\n\n\nlogger = logging.getLogger(__name__)\n\nclass Compsets(GenericXML):\n\n def __init__(self, infile=None, files=None):\n if files is None:\n files = Files()\n schema = files.get_schema(\"COMPSETS_SPEC_FILE\")\n GenericXML.__init__(self, infile, schema=schema)\n self.groups={}\n\n def get_compset_match(self, name):\n \"\"\"\n science support is used in cesm to determine if this compset and grid\n is scientifically supported. science_support is returned as an array of grids for this compset\n \"\"\"\n nodes = self.get_nodes(\"compset\")\n alias = None\n lname = None\n\n science_support = []\n\n for node in nodes:\n alias = self.get_element_text(\"alias\",root=node)\n lname = self.get_element_text(\"lname\",root=node)\n if alias == name or lname == name:\n science_support_nodes = self.get_nodes(\"science_support\", root=node)\n for node in science_support_nodes:\n science_support.append(node.get(\"grid\"))\n user_mods_node = self.get_optional_node(\"user_mods\", root=node)\n if user_mods_node is not None:\n user_mods = user_mods_node.text\n else:\n user_mods = None\n logger.debug(\"Found node match with alias: {} and lname: {}\".format(alias, lname))\n return (lname, alias, science_support, user_mods)\n return (None, None, [False], None)\n\n def get_compset_var_settings(self, compset, grid):\n '''\n Variables can be set in config_compsets.xml in entry id settings with compset and grid attributes\n find and return id value pairs here\n '''\n nodes = self.get_nodes(\"entry\")\n # Get an empty entryid obj to use\n entryidobj = EntryID()\n result = []\n for node in nodes:\n value = entryidobj.get_default_value(node, {\"grid\":grid, \"compset\":compset})\n if value is not None:\n result.append((node.get(\"id\"), value))\n return result\n\n def get_value(self, name, attribute=None, resolved=False, subgroup=None):\n expect(subgroup is None, \"This class does not support subgroups\")\n if name == \"help\":\n rootnode = self.get_node(\"help\")\n helptext = rootnode.text\n return helptext\n else:\n compsets = {}\n nodes = self.get_nodes(nodename=\"compset\")\n for node in nodes:\n for child in node:\n logger.debug (\"Here child is {} with value {}\".format(child.tag,child.text))\n if child.tag == \"alias\":\n alias = child.text\n if child.tag == \"lname\":\n lname = child.text\n compsets[alias] = lname\n return compsets\n\n def print_values(self, arg_help=True):\n help_text = self.get_value(name=\"help\")\n compsets_text = self.get_value(\"names\")\n if arg_help:\n logger.info(\" {} \".format(help_text))\n\n logger.info(\" --------------------------------------\")\n logger.info(\" Compset Alias: Compset Long Name \")\n logger.info(\" --------------------------------------\")\n for v in compsets_text.iteritems():\n label, definition = v\n logger.info(\" {:20} : {}\".format(label, definition))\n", "path": "scripts/lib/CIME/XML/compsets.py"}, {"content": "\"\"\"\nLibraries for checking python code with pylint\n\"\"\"\n\nfrom CIME.XML.standard_module_setup import *\n\nfrom CIME.utils import run_cmd, run_cmd_no_fail, expect, get_cime_root, is_python_executable\n\nfrom multiprocessing.dummy import Pool as ThreadPool\nfrom distutils.spawn import find_executable\n\nlogger = logging.getLogger(__name__)\n\n###############################################################################\ndef _run_pylint(on_file, interactive):\n###############################################################################\n pylint = find_executable(\"pylint\")\n\n cmd_options = \" --disable=I,C,R,logging-not-lazy,wildcard-import,unused-wildcard-import,fixme,broad-except,bare-except,eval-used,exec-used,global-statement,logging-format-interpolation\"\n cimeroot = get_cime_root()\n\n if \"scripts/Tools\" in on_file:\n cmd_options +=\",relative-import\"\n\n # add init-hook option\n cmd_options += \" --init-hook='sys.path.extend((\\\"%s\\\",\\\"%s\\\",\\\"%s\\\"))'\"%\\\n (os.path.join(cimeroot,\"scripts\",\"lib\"),\n os.path.join(cimeroot,\"scripts\",\"Tools\"),\n os.path.join(cimeroot,\"scripts\",\"fortran_unit_testing\",\"python\"))\n\n cmd = \"%s %s %s\" % (pylint, cmd_options, on_file)\n logger.debug(\"pylint command is %s\"%cmd)\n stat, out, err = run_cmd(cmd, verbose=False, from_dir=cimeroot)\n if stat != 0:\n if interactive:\n logger.info(\"File %s has pylint problems, please fix\\n Use command: %s\" % (on_file, cmd))\n logger.info(out + \"\\n\" + err)\n return (on_file, out + \"\\n\" + err)\n else:\n if interactive:\n logger.info(\"File %s has no pylint problems\" % on_file)\n return (on_file, \"\")\n\n###############################################################################\ndef _matches(file_path, file_ends):\n###############################################################################\n for file_end in file_ends:\n if file_path.endswith(file_end):\n return True\n\n return False\n\n###############################################################################\ndef _should_pylint_skip(filepath):\n###############################################################################\n # TODO - get rid of this\n list_of_directories_to_ignore = (\"xmlconvertors\", \"pointclm\", \"point_clm\", \"tools\", \"machines\", \"apidocs\")\n for dir_to_skip in list_of_directories_to_ignore:\n if dir_to_skip in filepath:\n return True\n\n return False\n\n###############################################################################\ndef get_all_checkable_files():\n###############################################################################\n cimeroot = get_cime_root()\n all_git_files = run_cmd_no_fail(\"git ls-files\", from_dir=cimeroot, verbose=False).splitlines()\n\n files_to_test = [item for item in all_git_files\n if ((item.endswith(\".py\") or is_python_executable(os.path.join(cimeroot, item))) and not _should_pylint_skip(item))]\n return files_to_test\n\n###############################################################################\ndef check_code(files, num_procs=10, interactive=False):\n###############################################################################\n \"\"\"\n Check all python files in the given directory\n\n Returns True if all files had no problems\n \"\"\"\n # Get list of files to check, we look to see if user-provided file argument\n # is a valid file, if not, we search the repo for a file with similar name.\n repo_files = run_cmd_no_fail('git ls-files --full-name %s' % get_cime_root(), verbose=False).splitlines()\n files_to_check = []\n if files:\n for filearg in files:\n if os.path.exists(filearg):\n files_to_check.append(os.path.abspath(filearg))\n else:\n found = False\n for repo_file in repo_files:\n if repo_file.endswith(filearg):\n found = True\n files_to_check.append(repo_file) # could have multiple matches\n\n if not found:\n logger.warning(\"Could not find file matching argument '%s'\" % filearg)\n else:\n # Check every python file\n files_to_check = get_all_checkable_files()\n\n expect(len(files_to_check) > 0, \"No matching files found\")\n\n # No point in using more threads than files\n if len(files_to_check) < num_procs:\n num_procs = len(files_to_check)\n\n pool = ThreadPool(num_procs)\n results = pool.map(lambda x : _run_pylint(x, interactive), files_to_check)\n pool.close()\n pool.join()\n return dict(results)\n", "path": "scripts/lib/CIME/code_checker.py"}]}
2,514
339
gh_patches_debug_37689
rasdani/github-patches
git_diff
encode__httpx-215
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- How to configure TLS beyond client certificate and CA root certs? Typically in requests you can use a __HTTPAdapter__ to hijack and set the SSL context and define what ciphers to use, how does one go about doing the same in __httpx__? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `httpx/config.py` Content: ``` 1 import ssl 2 import typing 3 from pathlib import Path 4 5 import certifi 6 7 from .__version__ import __version__ 8 9 CertTypes = typing.Union[str, typing.Tuple[str, str], typing.Tuple[str, str, str]] 10 VerifyTypes = typing.Union[str, bool] 11 TimeoutTypes = typing.Union[float, typing.Tuple[float, float, float], "TimeoutConfig"] 12 13 14 USER_AGENT = f"python-httpx/{__version__}" 15 16 DEFAULT_CIPHERS = ":".join( 17 [ 18 "ECDHE+AESGCM", 19 "ECDHE+CHACHA20", 20 "DHE+AESGCM", 21 "DHE+CHACHA20", 22 "ECDH+AESGCM", 23 "DH+AESGCM", 24 "ECDH+AES", 25 "DH+AES", 26 "RSA+AESGCM", 27 "RSA+AES", 28 "!aNULL", 29 "!eNULL", 30 "!MD5", 31 "!DSS", 32 ] 33 ) 34 35 36 class SSLConfig: 37 """ 38 SSL Configuration. 39 """ 40 41 def __init__(self, *, cert: CertTypes = None, verify: VerifyTypes = True): 42 self.cert = cert 43 self.verify = verify 44 45 self.ssl_context: typing.Optional[ssl.SSLContext] = None 46 47 def __eq__(self, other: typing.Any) -> bool: 48 return ( 49 isinstance(other, self.__class__) 50 and self.cert == other.cert 51 and self.verify == other.verify 52 ) 53 54 def __repr__(self) -> str: 55 class_name = self.__class__.__name__ 56 return f"{class_name}(cert={self.cert}, verify={self.verify})" 57 58 def with_overrides( 59 self, cert: CertTypes = None, verify: VerifyTypes = None 60 ) -> "SSLConfig": 61 cert = self.cert if cert is None else cert 62 verify = self.verify if verify is None else verify 63 if (cert == self.cert) and (verify == self.verify): 64 return self 65 return SSLConfig(cert=cert, verify=verify) 66 67 def load_ssl_context(self) -> ssl.SSLContext: 68 if self.ssl_context is None: 69 self.ssl_context = ( 70 self.load_ssl_context_verify() 71 if self.verify 72 else self.load_ssl_context_no_verify() 73 ) 74 75 assert self.ssl_context is not None 76 return self.ssl_context 77 78 def load_ssl_context_no_verify(self) -> ssl.SSLContext: 79 """ 80 Return an SSL context for unverified connections. 81 """ 82 context = self._create_default_ssl_context() 83 context.verify_mode = ssl.CERT_NONE 84 context.check_hostname = False 85 return context 86 87 def load_ssl_context_verify(self) -> ssl.SSLContext: 88 """ 89 Return an SSL context for verified connections. 90 """ 91 if isinstance(self.verify, bool): 92 ca_bundle_path = DEFAULT_CA_BUNDLE_PATH 93 elif Path(self.verify).exists(): 94 ca_bundle_path = Path(self.verify) 95 else: 96 raise IOError( 97 "Could not find a suitable TLS CA certificate bundle, " 98 "invalid path: {}".format(self.verify) 99 ) 100 101 context = self._create_default_ssl_context() 102 context.verify_mode = ssl.CERT_REQUIRED 103 context.check_hostname = True 104 105 # Signal to server support for PHA in TLS 1.3. Raises an 106 # AttributeError if only read-only access is implemented. 107 try: 108 context.post_handshake_auth = True # type: ignore 109 except AttributeError: # pragma: nocover 110 pass 111 112 # Disable using 'commonName' for SSLContext.check_hostname 113 # when the 'subjectAltName' extension isn't available. 114 try: 115 context.hostname_checks_common_name = False # type: ignore 116 except AttributeError: # pragma: nocover 117 pass 118 119 if ca_bundle_path.is_file(): 120 context.load_verify_locations(cafile=str(ca_bundle_path)) 121 elif ca_bundle_path.is_dir(): 122 context.load_verify_locations(capath=str(ca_bundle_path)) 123 124 if self.cert is not None: 125 if isinstance(self.cert, str): 126 context.load_cert_chain(certfile=self.cert) 127 elif isinstance(self.cert, tuple) and len(self.cert) == 2: 128 context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1]) 129 elif isinstance(self.cert, tuple) and len(self.cert) == 3: 130 context.load_cert_chain( 131 certfile=self.cert[0], 132 keyfile=self.cert[1], 133 password=self.cert[2], # type: ignore 134 ) 135 136 return context 137 138 def _create_default_ssl_context(self) -> ssl.SSLContext: 139 """ 140 Creates the default SSLContext object that's used for both verified 141 and unverified connections. 142 """ 143 context = ssl.SSLContext(ssl.PROTOCOL_TLS) 144 context.options |= ssl.OP_NO_SSLv2 145 context.options |= ssl.OP_NO_SSLv3 146 context.options |= ssl.OP_NO_TLSv1 147 context.options |= ssl.OP_NO_TLSv1_1 148 context.options |= ssl.OP_NO_COMPRESSION 149 context.set_ciphers(DEFAULT_CIPHERS) 150 151 if ssl.HAS_ALPN: 152 context.set_alpn_protocols(["h2", "http/1.1"]) 153 if ssl.HAS_NPN: 154 context.set_npn_protocols(["h2", "http/1.1"]) 155 156 return context 157 158 159 class TimeoutConfig: 160 """ 161 Timeout values. 162 """ 163 164 def __init__( 165 self, 166 timeout: TimeoutTypes = None, 167 *, 168 connect_timeout: float = None, 169 read_timeout: float = None, 170 write_timeout: float = None, 171 ): 172 if timeout is None: 173 self.connect_timeout = connect_timeout 174 self.read_timeout = read_timeout 175 self.write_timeout = write_timeout 176 else: 177 # Specified as a single timeout value 178 assert connect_timeout is None 179 assert read_timeout is None 180 assert write_timeout is None 181 if isinstance(timeout, TimeoutConfig): 182 self.connect_timeout = timeout.connect_timeout 183 self.read_timeout = timeout.read_timeout 184 self.write_timeout = timeout.write_timeout 185 elif isinstance(timeout, tuple): 186 self.connect_timeout = timeout[0] 187 self.read_timeout = timeout[1] 188 self.write_timeout = timeout[2] 189 else: 190 self.connect_timeout = timeout 191 self.read_timeout = timeout 192 self.write_timeout = timeout 193 194 def __eq__(self, other: typing.Any) -> bool: 195 return ( 196 isinstance(other, self.__class__) 197 and self.connect_timeout == other.connect_timeout 198 and self.read_timeout == other.read_timeout 199 and self.write_timeout == other.write_timeout 200 ) 201 202 def __repr__(self) -> str: 203 class_name = self.__class__.__name__ 204 if len({self.connect_timeout, self.read_timeout, self.write_timeout}) == 1: 205 return f"{class_name}(timeout={self.connect_timeout})" 206 return ( 207 f"{class_name}(connect_timeout={self.connect_timeout}, " 208 f"read_timeout={self.read_timeout}, write_timeout={self.write_timeout})" 209 ) 210 211 212 class PoolLimits: 213 """ 214 Limits on the number of connections in a connection pool. 215 """ 216 217 def __init__( 218 self, 219 *, 220 soft_limit: int = None, 221 hard_limit: int = None, 222 pool_timeout: float = None, 223 ): 224 self.soft_limit = soft_limit 225 self.hard_limit = hard_limit 226 self.pool_timeout = pool_timeout 227 228 def __eq__(self, other: typing.Any) -> bool: 229 return ( 230 isinstance(other, self.__class__) 231 and self.soft_limit == other.soft_limit 232 and self.hard_limit == other.hard_limit 233 and self.pool_timeout == other.pool_timeout 234 ) 235 236 def __repr__(self) -> str: 237 class_name = self.__class__.__name__ 238 return ( 239 f"{class_name}(soft_limit={self.soft_limit}, " 240 f"hard_limit={self.hard_limit}, pool_timeout={self.pool_timeout})" 241 ) 242 243 244 DEFAULT_SSL_CONFIG = SSLConfig(cert=None, verify=True) 245 DEFAULT_TIMEOUT_CONFIG = TimeoutConfig(timeout=5.0) 246 DEFAULT_POOL_LIMITS = PoolLimits(soft_limit=10, hard_limit=100, pool_timeout=5.0) 247 DEFAULT_CA_BUNDLE_PATH = Path(certifi.where()) 248 DEFAULT_MAX_REDIRECTS = 20 249 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/httpx/config.py b/httpx/config.py --- a/httpx/config.py +++ b/httpx/config.py @@ -7,7 +7,7 @@ from .__version__ import __version__ CertTypes = typing.Union[str, typing.Tuple[str, str], typing.Tuple[str, str, str]] -VerifyTypes = typing.Union[str, bool] +VerifyTypes = typing.Union[str, bool, ssl.SSLContext] TimeoutTypes = typing.Union[float, typing.Tuple[float, float, float], "TimeoutConfig"] @@ -40,9 +40,17 @@ def __init__(self, *, cert: CertTypes = None, verify: VerifyTypes = True): self.cert = cert - self.verify = verify - self.ssl_context: typing.Optional[ssl.SSLContext] = None + # Allow passing in our own SSLContext object that's pre-configured. + # If you do this we assume that you want verify=True as well. + ssl_context = None + if isinstance(verify, ssl.SSLContext): + ssl_context = verify + verify = True + self._load_client_certs(ssl_context) + + self.ssl_context: typing.Optional[ssl.SSLContext] = ssl_context + self.verify: typing.Union[str, bool] = verify def __eq__(self, other: typing.Any) -> bool: return ( @@ -121,17 +129,7 @@ elif ca_bundle_path.is_dir(): context.load_verify_locations(capath=str(ca_bundle_path)) - if self.cert is not None: - if isinstance(self.cert, str): - context.load_cert_chain(certfile=self.cert) - elif isinstance(self.cert, tuple) and len(self.cert) == 2: - context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1]) - elif isinstance(self.cert, tuple) and len(self.cert) == 3: - context.load_cert_chain( - certfile=self.cert[0], - keyfile=self.cert[1], - password=self.cert[2], # type: ignore - ) + self._load_client_certs(context) return context @@ -155,6 +153,22 @@ return context + def _load_client_certs(self, ssl_context: ssl.SSLContext) -> None: + """ + Loads client certificates into our SSLContext object + """ + if self.cert is not None: + if isinstance(self.cert, str): + ssl_context.load_cert_chain(certfile=self.cert) + elif isinstance(self.cert, tuple) and len(self.cert) == 2: + ssl_context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1]) + elif isinstance(self.cert, tuple) and len(self.cert) == 3: + ssl_context.load_cert_chain( + certfile=self.cert[0], + keyfile=self.cert[1], + password=self.cert[2], # type: ignore + ) + class TimeoutConfig: """
{"golden_diff": "diff --git a/httpx/config.py b/httpx/config.py\n--- a/httpx/config.py\n+++ b/httpx/config.py\n@@ -7,7 +7,7 @@\n from .__version__ import __version__\n \n CertTypes = typing.Union[str, typing.Tuple[str, str], typing.Tuple[str, str, str]]\n-VerifyTypes = typing.Union[str, bool]\n+VerifyTypes = typing.Union[str, bool, ssl.SSLContext]\n TimeoutTypes = typing.Union[float, typing.Tuple[float, float, float], \"TimeoutConfig\"]\n \n \n@@ -40,9 +40,17 @@\n \n def __init__(self, *, cert: CertTypes = None, verify: VerifyTypes = True):\n self.cert = cert\n- self.verify = verify\n \n- self.ssl_context: typing.Optional[ssl.SSLContext] = None\n+ # Allow passing in our own SSLContext object that's pre-configured.\n+ # If you do this we assume that you want verify=True as well.\n+ ssl_context = None\n+ if isinstance(verify, ssl.SSLContext):\n+ ssl_context = verify\n+ verify = True\n+ self._load_client_certs(ssl_context)\n+\n+ self.ssl_context: typing.Optional[ssl.SSLContext] = ssl_context\n+ self.verify: typing.Union[str, bool] = verify\n \n def __eq__(self, other: typing.Any) -> bool:\n return (\n@@ -121,17 +129,7 @@\n elif ca_bundle_path.is_dir():\n context.load_verify_locations(capath=str(ca_bundle_path))\n \n- if self.cert is not None:\n- if isinstance(self.cert, str):\n- context.load_cert_chain(certfile=self.cert)\n- elif isinstance(self.cert, tuple) and len(self.cert) == 2:\n- context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1])\n- elif isinstance(self.cert, tuple) and len(self.cert) == 3:\n- context.load_cert_chain(\n- certfile=self.cert[0],\n- keyfile=self.cert[1],\n- password=self.cert[2], # type: ignore\n- )\n+ self._load_client_certs(context)\n \n return context\n \n@@ -155,6 +153,22 @@\n \n return context\n \n+ def _load_client_certs(self, ssl_context: ssl.SSLContext) -> None:\n+ \"\"\"\n+ Loads client certificates into our SSLContext object\n+ \"\"\"\n+ if self.cert is not None:\n+ if isinstance(self.cert, str):\n+ ssl_context.load_cert_chain(certfile=self.cert)\n+ elif isinstance(self.cert, tuple) and len(self.cert) == 2:\n+ ssl_context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1])\n+ elif isinstance(self.cert, tuple) and len(self.cert) == 3:\n+ ssl_context.load_cert_chain(\n+ certfile=self.cert[0],\n+ keyfile=self.cert[1],\n+ password=self.cert[2], # type: ignore\n+ )\n+\n \n class TimeoutConfig:\n \"\"\"\n", "issue": "How to configure TLS beyond client certificate and CA root certs?\nTypically in requests you can use a __HTTPAdapter__ to hijack and set the SSL context and define what ciphers to use, how does one go about doing the same in __httpx__?\n", "before_files": [{"content": "import ssl\nimport typing\nfrom pathlib import Path\n\nimport certifi\n\nfrom .__version__ import __version__\n\nCertTypes = typing.Union[str, typing.Tuple[str, str], typing.Tuple[str, str, str]]\nVerifyTypes = typing.Union[str, bool]\nTimeoutTypes = typing.Union[float, typing.Tuple[float, float, float], \"TimeoutConfig\"]\n\n\nUSER_AGENT = f\"python-httpx/{__version__}\"\n\nDEFAULT_CIPHERS = \":\".join(\n [\n \"ECDHE+AESGCM\",\n \"ECDHE+CHACHA20\",\n \"DHE+AESGCM\",\n \"DHE+CHACHA20\",\n \"ECDH+AESGCM\",\n \"DH+AESGCM\",\n \"ECDH+AES\",\n \"DH+AES\",\n \"RSA+AESGCM\",\n \"RSA+AES\",\n \"!aNULL\",\n \"!eNULL\",\n \"!MD5\",\n \"!DSS\",\n ]\n)\n\n\nclass SSLConfig:\n \"\"\"\n SSL Configuration.\n \"\"\"\n\n def __init__(self, *, cert: CertTypes = None, verify: VerifyTypes = True):\n self.cert = cert\n self.verify = verify\n\n self.ssl_context: typing.Optional[ssl.SSLContext] = None\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.cert == other.cert\n and self.verify == other.verify\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f\"{class_name}(cert={self.cert}, verify={self.verify})\"\n\n def with_overrides(\n self, cert: CertTypes = None, verify: VerifyTypes = None\n ) -> \"SSLConfig\":\n cert = self.cert if cert is None else cert\n verify = self.verify if verify is None else verify\n if (cert == self.cert) and (verify == self.verify):\n return self\n return SSLConfig(cert=cert, verify=verify)\n\n def load_ssl_context(self) -> ssl.SSLContext:\n if self.ssl_context is None:\n self.ssl_context = (\n self.load_ssl_context_verify()\n if self.verify\n else self.load_ssl_context_no_verify()\n )\n\n assert self.ssl_context is not None\n return self.ssl_context\n\n def load_ssl_context_no_verify(self) -> ssl.SSLContext:\n \"\"\"\n Return an SSL context for unverified connections.\n \"\"\"\n context = self._create_default_ssl_context()\n context.verify_mode = ssl.CERT_NONE\n context.check_hostname = False\n return context\n\n def load_ssl_context_verify(self) -> ssl.SSLContext:\n \"\"\"\n Return an SSL context for verified connections.\n \"\"\"\n if isinstance(self.verify, bool):\n ca_bundle_path = DEFAULT_CA_BUNDLE_PATH\n elif Path(self.verify).exists():\n ca_bundle_path = Path(self.verify)\n else:\n raise IOError(\n \"Could not find a suitable TLS CA certificate bundle, \"\n \"invalid path: {}\".format(self.verify)\n )\n\n context = self._create_default_ssl_context()\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n\n # Signal to server support for PHA in TLS 1.3. Raises an\n # AttributeError if only read-only access is implemented.\n try:\n context.post_handshake_auth = True # type: ignore\n except AttributeError: # pragma: nocover\n pass\n\n # Disable using 'commonName' for SSLContext.check_hostname\n # when the 'subjectAltName' extension isn't available.\n try:\n context.hostname_checks_common_name = False # type: ignore\n except AttributeError: # pragma: nocover\n pass\n\n if ca_bundle_path.is_file():\n context.load_verify_locations(cafile=str(ca_bundle_path))\n elif ca_bundle_path.is_dir():\n context.load_verify_locations(capath=str(ca_bundle_path))\n\n if self.cert is not None:\n if isinstance(self.cert, str):\n context.load_cert_chain(certfile=self.cert)\n elif isinstance(self.cert, tuple) and len(self.cert) == 2:\n context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1])\n elif isinstance(self.cert, tuple) and len(self.cert) == 3:\n context.load_cert_chain(\n certfile=self.cert[0],\n keyfile=self.cert[1],\n password=self.cert[2], # type: ignore\n )\n\n return context\n\n def _create_default_ssl_context(self) -> ssl.SSLContext:\n \"\"\"\n Creates the default SSLContext object that's used for both verified\n and unverified connections.\n \"\"\"\n context = ssl.SSLContext(ssl.PROTOCOL_TLS)\n context.options |= ssl.OP_NO_SSLv2\n context.options |= ssl.OP_NO_SSLv3\n context.options |= ssl.OP_NO_TLSv1\n context.options |= ssl.OP_NO_TLSv1_1\n context.options |= ssl.OP_NO_COMPRESSION\n context.set_ciphers(DEFAULT_CIPHERS)\n\n if ssl.HAS_ALPN:\n context.set_alpn_protocols([\"h2\", \"http/1.1\"])\n if ssl.HAS_NPN:\n context.set_npn_protocols([\"h2\", \"http/1.1\"])\n\n return context\n\n\nclass TimeoutConfig:\n \"\"\"\n Timeout values.\n \"\"\"\n\n def __init__(\n self,\n timeout: TimeoutTypes = None,\n *,\n connect_timeout: float = None,\n read_timeout: float = None,\n write_timeout: float = None,\n ):\n if timeout is None:\n self.connect_timeout = connect_timeout\n self.read_timeout = read_timeout\n self.write_timeout = write_timeout\n else:\n # Specified as a single timeout value\n assert connect_timeout is None\n assert read_timeout is None\n assert write_timeout is None\n if isinstance(timeout, TimeoutConfig):\n self.connect_timeout = timeout.connect_timeout\n self.read_timeout = timeout.read_timeout\n self.write_timeout = timeout.write_timeout\n elif isinstance(timeout, tuple):\n self.connect_timeout = timeout[0]\n self.read_timeout = timeout[1]\n self.write_timeout = timeout[2]\n else:\n self.connect_timeout = timeout\n self.read_timeout = timeout\n self.write_timeout = timeout\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.connect_timeout == other.connect_timeout\n and self.read_timeout == other.read_timeout\n and self.write_timeout == other.write_timeout\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n if len({self.connect_timeout, self.read_timeout, self.write_timeout}) == 1:\n return f\"{class_name}(timeout={self.connect_timeout})\"\n return (\n f\"{class_name}(connect_timeout={self.connect_timeout}, \"\n f\"read_timeout={self.read_timeout}, write_timeout={self.write_timeout})\"\n )\n\n\nclass PoolLimits:\n \"\"\"\n Limits on the number of connections in a connection pool.\n \"\"\"\n\n def __init__(\n self,\n *,\n soft_limit: int = None,\n hard_limit: int = None,\n pool_timeout: float = None,\n ):\n self.soft_limit = soft_limit\n self.hard_limit = hard_limit\n self.pool_timeout = pool_timeout\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.soft_limit == other.soft_limit\n and self.hard_limit == other.hard_limit\n and self.pool_timeout == other.pool_timeout\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return (\n f\"{class_name}(soft_limit={self.soft_limit}, \"\n f\"hard_limit={self.hard_limit}, pool_timeout={self.pool_timeout})\"\n )\n\n\nDEFAULT_SSL_CONFIG = SSLConfig(cert=None, verify=True)\nDEFAULT_TIMEOUT_CONFIG = TimeoutConfig(timeout=5.0)\nDEFAULT_POOL_LIMITS = PoolLimits(soft_limit=10, hard_limit=100, pool_timeout=5.0)\nDEFAULT_CA_BUNDLE_PATH = Path(certifi.where())\nDEFAULT_MAX_REDIRECTS = 20\n", "path": "httpx/config.py"}], "after_files": [{"content": "import ssl\nimport typing\nfrom pathlib import Path\n\nimport certifi\n\nfrom .__version__ import __version__\n\nCertTypes = typing.Union[str, typing.Tuple[str, str], typing.Tuple[str, str, str]]\nVerifyTypes = typing.Union[str, bool, ssl.SSLContext]\nTimeoutTypes = typing.Union[float, typing.Tuple[float, float, float], \"TimeoutConfig\"]\n\n\nUSER_AGENT = f\"python-httpx/{__version__}\"\n\nDEFAULT_CIPHERS = \":\".join(\n [\n \"ECDHE+AESGCM\",\n \"ECDHE+CHACHA20\",\n \"DHE+AESGCM\",\n \"DHE+CHACHA20\",\n \"ECDH+AESGCM\",\n \"DH+AESGCM\",\n \"ECDH+AES\",\n \"DH+AES\",\n \"RSA+AESGCM\",\n \"RSA+AES\",\n \"!aNULL\",\n \"!eNULL\",\n \"!MD5\",\n \"!DSS\",\n ]\n)\n\n\nclass SSLConfig:\n \"\"\"\n SSL Configuration.\n \"\"\"\n\n def __init__(self, *, cert: CertTypes = None, verify: VerifyTypes = True):\n self.cert = cert\n\n # Allow passing in our own SSLContext object that's pre-configured.\n # If you do this we assume that you want verify=True as well.\n ssl_context = None\n if isinstance(verify, ssl.SSLContext):\n ssl_context = verify\n verify = True\n self._load_client_certs(ssl_context)\n\n self.ssl_context: typing.Optional[ssl.SSLContext] = ssl_context\n self.verify: typing.Union[str, bool] = verify\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.cert == other.cert\n and self.verify == other.verify\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return f\"{class_name}(cert={self.cert}, verify={self.verify})\"\n\n def with_overrides(\n self, cert: CertTypes = None, verify: VerifyTypes = None\n ) -> \"SSLConfig\":\n cert = self.cert if cert is None else cert\n verify = self.verify if verify is None else verify\n if (cert == self.cert) and (verify == self.verify):\n return self\n return SSLConfig(cert=cert, verify=verify)\n\n def load_ssl_context(self) -> ssl.SSLContext:\n if self.ssl_context is None:\n self.ssl_context = (\n self.load_ssl_context_verify()\n if self.verify\n else self.load_ssl_context_no_verify()\n )\n\n assert self.ssl_context is not None\n return self.ssl_context\n\n def load_ssl_context_no_verify(self) -> ssl.SSLContext:\n \"\"\"\n Return an SSL context for unverified connections.\n \"\"\"\n context = self._create_default_ssl_context()\n context.verify_mode = ssl.CERT_NONE\n context.check_hostname = False\n return context\n\n def load_ssl_context_verify(self) -> ssl.SSLContext:\n \"\"\"\n Return an SSL context for verified connections.\n \"\"\"\n if isinstance(self.verify, bool):\n ca_bundle_path = DEFAULT_CA_BUNDLE_PATH\n elif Path(self.verify).exists():\n ca_bundle_path = Path(self.verify)\n else:\n raise IOError(\n \"Could not find a suitable TLS CA certificate bundle, \"\n \"invalid path: {}\".format(self.verify)\n )\n\n context = self._create_default_ssl_context()\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n\n # Signal to server support for PHA in TLS 1.3. Raises an\n # AttributeError if only read-only access is implemented.\n try:\n context.post_handshake_auth = True # type: ignore\n except AttributeError: # pragma: nocover\n pass\n\n # Disable using 'commonName' for SSLContext.check_hostname\n # when the 'subjectAltName' extension isn't available.\n try:\n context.hostname_checks_common_name = False # type: ignore\n except AttributeError: # pragma: nocover\n pass\n\n if ca_bundle_path.is_file():\n context.load_verify_locations(cafile=str(ca_bundle_path))\n elif ca_bundle_path.is_dir():\n context.load_verify_locations(capath=str(ca_bundle_path))\n\n self._load_client_certs(context)\n\n return context\n\n def _create_default_ssl_context(self) -> ssl.SSLContext:\n \"\"\"\n Creates the default SSLContext object that's used for both verified\n and unverified connections.\n \"\"\"\n context = ssl.SSLContext(ssl.PROTOCOL_TLS)\n context.options |= ssl.OP_NO_SSLv2\n context.options |= ssl.OP_NO_SSLv3\n context.options |= ssl.OP_NO_TLSv1\n context.options |= ssl.OP_NO_TLSv1_1\n context.options |= ssl.OP_NO_COMPRESSION\n context.set_ciphers(DEFAULT_CIPHERS)\n\n if ssl.HAS_ALPN:\n context.set_alpn_protocols([\"h2\", \"http/1.1\"])\n if ssl.HAS_NPN:\n context.set_npn_protocols([\"h2\", \"http/1.1\"])\n\n return context\n\n def _load_client_certs(self, ssl_context: ssl.SSLContext) -> None:\n \"\"\"\n Loads client certificates into our SSLContext object\n \"\"\"\n if self.cert is not None:\n if isinstance(self.cert, str):\n ssl_context.load_cert_chain(certfile=self.cert)\n elif isinstance(self.cert, tuple) and len(self.cert) == 2:\n ssl_context.load_cert_chain(certfile=self.cert[0], keyfile=self.cert[1])\n elif isinstance(self.cert, tuple) and len(self.cert) == 3:\n ssl_context.load_cert_chain(\n certfile=self.cert[0],\n keyfile=self.cert[1],\n password=self.cert[2], # type: ignore\n )\n\n\nclass TimeoutConfig:\n \"\"\"\n Timeout values.\n \"\"\"\n\n def __init__(\n self,\n timeout: TimeoutTypes = None,\n *,\n connect_timeout: float = None,\n read_timeout: float = None,\n write_timeout: float = None,\n ):\n if timeout is None:\n self.connect_timeout = connect_timeout\n self.read_timeout = read_timeout\n self.write_timeout = write_timeout\n else:\n # Specified as a single timeout value\n assert connect_timeout is None\n assert read_timeout is None\n assert write_timeout is None\n if isinstance(timeout, TimeoutConfig):\n self.connect_timeout = timeout.connect_timeout\n self.read_timeout = timeout.read_timeout\n self.write_timeout = timeout.write_timeout\n elif isinstance(timeout, tuple):\n self.connect_timeout = timeout[0]\n self.read_timeout = timeout[1]\n self.write_timeout = timeout[2]\n else:\n self.connect_timeout = timeout\n self.read_timeout = timeout\n self.write_timeout = timeout\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.connect_timeout == other.connect_timeout\n and self.read_timeout == other.read_timeout\n and self.write_timeout == other.write_timeout\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n if len({self.connect_timeout, self.read_timeout, self.write_timeout}) == 1:\n return f\"{class_name}(timeout={self.connect_timeout})\"\n return (\n f\"{class_name}(connect_timeout={self.connect_timeout}, \"\n f\"read_timeout={self.read_timeout}, write_timeout={self.write_timeout})\"\n )\n\n\nclass PoolLimits:\n \"\"\"\n Limits on the number of connections in a connection pool.\n \"\"\"\n\n def __init__(\n self,\n *,\n soft_limit: int = None,\n hard_limit: int = None,\n pool_timeout: float = None,\n ):\n self.soft_limit = soft_limit\n self.hard_limit = hard_limit\n self.pool_timeout = pool_timeout\n\n def __eq__(self, other: typing.Any) -> bool:\n return (\n isinstance(other, self.__class__)\n and self.soft_limit == other.soft_limit\n and self.hard_limit == other.hard_limit\n and self.pool_timeout == other.pool_timeout\n )\n\n def __repr__(self) -> str:\n class_name = self.__class__.__name__\n return (\n f\"{class_name}(soft_limit={self.soft_limit}, \"\n f\"hard_limit={self.hard_limit}, pool_timeout={self.pool_timeout})\"\n )\n\n\nDEFAULT_SSL_CONFIG = SSLConfig(cert=None, verify=True)\nDEFAULT_TIMEOUT_CONFIG = TimeoutConfig(timeout=5.0)\nDEFAULT_POOL_LIMITS = PoolLimits(soft_limit=10, hard_limit=100, pool_timeout=5.0)\nDEFAULT_CA_BUNDLE_PATH = Path(certifi.where())\nDEFAULT_MAX_REDIRECTS = 20\n", "path": "httpx/config.py"}]}
2,760
691
gh_patches_debug_50810
rasdani/github-patches
git_diff
googleapis__google-cloud-python-6134
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PubSub protobuf dependency requirements incorrect I think the minimum version for the `protobuf` package dependency is not correct. google-cloud-python has the version requirements as protobuf>=3.0.0, but it fails to import when using version up to and including protobuf==3.3.0. I'm not sure what the exact correct version is, but the last version of google-cloud-pubsub to work with protobuf==3.3.0 is google-cloud-pubsub==0.35.4. I believe after this commit (https://github.com/GoogleCloudPlatform/google-cloud-python/commit/371333a51165e99d4d02876b1ef133618485b6fc#diff-29280288794caf553b0b008084a0e854), a protobuf version >3.3.0 is required: Python version ``` $ python --version Python 2.7.15rc1 ``` Package versions: ``` $ pip list | grep -E '(cloud|protobuf)' google-cloud-core 0.28.1 google-cloud-datastore 1.7.0 google-cloud-pubsub 0.38.0 google-cloud-storage 1.12.0 protobuf 3.3.0 ``` Getting a stack track just importing pubsub (in ipython here) ``` In [1]: from google.cloud import pubsub --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-1-8fba37b708ad> in <module>() ----> 1 from google.cloud import pubsub /home/aaronpeterson/.local/share/virtualenvs/turbinia-docs-oOHvuNoj/local/lib/python2.7/site-packages/google/cloud/pubsub.py in <module>() 17 from __future__ import absolute_import 18 ---> 19 from google.cloud.pubsub_v1 import PublisherClient 20 from google.cloud.pubsub_v1 import SubscriberClient 21 from google.cloud.pubsub_v1 import types /home/aaronpeterson/.local/share/virtualenvs/turbinia-docs-oOHvuNoj/local/lib/python2.7/site-packages/google/cloud/pubsub_v1/__init__.py in <module>() 15 from __future__ import absolute_import 16 ---> 17 from google.cloud.pubsub_v1 import types 18 from google.cloud.pubsub_v1 import publisher 19 from google.cloud.pubsub_v1 import subscriber /home/aaronpeterson/.local/share/virtualenvs/turbinia-docs-oOHvuNoj/local/lib/python2.7/site-packages/google/cloud/pubsub_v1/types.py in <module>() 28 29 from google.api_core.protobuf_helpers import get_messages ---> 30 from google.cloud.pubsub_v1.proto import pubsub_pb2 31 32 /home/aaronpeterson/.local/share/virtualenvs/turbinia-docs-oOHvuNoj/local/lib/python2.7/site-packages/google/cloud/pubsub_v1/proto/pubsub_pb2.py in <module>() 45 message_type=None, enum_type=None, containing_type=None, 46 is_extension=False, extension_scope=None, ---> 47 options=None, file=DESCRIPTOR), 48 ], 49 extensions=[ TypeError: __new__() got an unexpected keyword argument 'file' ``` Snipped the pubsub section from pipdeptree output showing the protobuf requirement is >=3.0.0: ``` - google-cloud-pubsub [required: Any, installed: 0.38.0] - enum34 [required: Any, installed: 1.1.6] - google-api-core [required: >=1.1.0,<2.0.0dev, installed: 1.4.0] - futures [required: >=3.2.0, installed: 3.2.0] - google-auth [required: >=0.4.0,<2.0.0dev, installed: 1.5.1] - cachetools [required: >=2.0.0, installed: 2.1.0] - pyasn1-modules [required: >=0.2.1, installed: 0.2.2] - pyasn1 [required: >=0.4.1,<0.5.0, installed: 0.4.4] - rsa [required: >=3.1.4, installed: 4.0] - pyasn1 [required: >=0.1.3, installed: 0.4.4] - six [required: >=1.9.0, installed: 1.11.0] - googleapis-common-protos [required: >=1.5.3,<2.0dev, installed: 1.5.3] - protobuf [required: >=3.0.0, installed: 3.3.0] - setuptools [required: Any, installed: 40.4.3] - six [required: >=1.9, installed: 1.11.0] - protobuf [required: >=3.0.0, installed: 3.3.0] - setuptools [required: Any, installed: 40.4.3] - six [required: >=1.9, installed: 1.11.0] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pubsub/setup.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 import setuptools 19 20 21 # Package metadata. 22 23 name = 'google-cloud-pubsub' 24 description = 'Google Cloud Pub/Sub API client library' 25 version = '0.38.0' 26 # Should be one of: 27 # 'Development Status :: 3 - Alpha' 28 # 'Development Status :: 4 - Beta' 29 # 'Development Status :: 5 - Production/Stable' 30 release_status = 'Development Status :: 4 - Beta' 31 dependencies = [ 32 'google-api-core[grpc] >= 1.1.0, < 2.0.0dev', 33 'grpc-google-iam-v1 >= 0.11.1, < 0.12dev', 34 'enum34; python_version < "3.4"', 35 ] 36 extras = { 37 } 38 39 40 # Setup boilerplate below this line. 41 42 package_root = os.path.abspath(os.path.dirname(__file__)) 43 44 readme_filename = os.path.join(package_root, 'README.rst') 45 with io.open(readme_filename, encoding='utf-8') as readme_file: 46 readme = readme_file.read() 47 48 # Only include packages under the 'google' namespace. Do not include tests, 49 # benchmarks, etc. 50 packages = [ 51 package for package in setuptools.find_packages() 52 if package.startswith('google')] 53 54 # Determine which namespaces are needed. 55 namespaces = ['google'] 56 if 'google.cloud' in packages: 57 namespaces.append('google.cloud') 58 59 60 setuptools.setup( 61 name=name, 62 version=version, 63 description=description, 64 long_description=readme, 65 author='Google LLC', 66 author_email='[email protected]', 67 license='Apache 2.0', 68 url='https://github.com/GoogleCloudPlatform/google-cloud-python', 69 classifiers=[ 70 release_status, 71 'Intended Audience :: Developers', 72 'License :: OSI Approved :: Apache Software License', 73 'Programming Language :: Python', 74 'Programming Language :: Python :: 2', 75 'Programming Language :: Python :: 2.7', 76 'Programming Language :: Python :: 3', 77 'Programming Language :: Python :: 3.4', 78 'Programming Language :: Python :: 3.5', 79 'Programming Language :: Python :: 3.6', 80 'Operating System :: OS Independent', 81 'Topic :: Internet', 82 ], 83 platforms='Posix; MacOS X; Windows', 84 packages=packages, 85 namespace_packages=namespaces, 86 install_requires=dependencies, 87 extras_require=extras, 88 include_package_data=True, 89 zip_safe=False, 90 ) 91 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pubsub/setup.py b/pubsub/setup.py --- a/pubsub/setup.py +++ b/pubsub/setup.py @@ -29,7 +29,7 @@ # 'Development Status :: 5 - Production/Stable' release_status = 'Development Status :: 4 - Beta' dependencies = [ - 'google-api-core[grpc] >= 1.1.0, < 2.0.0dev', + 'google-api-core[grpc] >= 1.4.1, < 2.0.0dev', 'grpc-google-iam-v1 >= 0.11.1, < 0.12dev', 'enum34; python_version < "3.4"', ]
{"golden_diff": "diff --git a/pubsub/setup.py b/pubsub/setup.py\n--- a/pubsub/setup.py\n+++ b/pubsub/setup.py\n@@ -29,7 +29,7 @@\n # 'Development Status :: 5 - Production/Stable'\n release_status = 'Development Status :: 4 - Beta'\n dependencies = [\n- 'google-api-core[grpc] >= 1.1.0, < 2.0.0dev',\n+ 'google-api-core[grpc] >= 1.4.1, < 2.0.0dev',\n 'grpc-google-iam-v1 >= 0.11.1, < 0.12dev',\n 'enum34; python_version < \"3.4\"',\n ]\n", "issue": "PubSub protobuf dependency requirements incorrect\nI think the minimum version for the `protobuf` package dependency is not correct. google-cloud-python has the version requirements as protobuf>=3.0.0, but it fails to import when using version up to and including protobuf==3.3.0. I'm not sure what the exact correct version is, but the last version of google-cloud-pubsub to work with protobuf==3.3.0 is google-cloud-pubsub==0.35.4. I believe after this commit (https://github.com/GoogleCloudPlatform/google-cloud-python/commit/371333a51165e99d4d02876b1ef133618485b6fc#diff-29280288794caf553b0b008084a0e854), a protobuf version >3.3.0 is required:\r\n\r\nPython version\r\n```\r\n$ python --version\r\nPython 2.7.15rc1\r\n```\r\n\r\nPackage versions:\r\n```\r\n$ pip list | grep -E '(cloud|protobuf)'\r\ngoogle-cloud-core 0.28.1 \r\ngoogle-cloud-datastore 1.7.0 \r\ngoogle-cloud-pubsub 0.38.0 \r\ngoogle-cloud-storage 1.12.0 \r\nprotobuf 3.3.0 \r\n```\r\n\r\nGetting a stack track just importing pubsub (in ipython here)\r\n```\r\nIn [1]: from google.cloud import pubsub\r\n---------------------------------------------------------------------------\r\nTypeError Traceback (most recent call last)\r\n<ipython-input-1-8fba37b708ad> in <module>()\r\n----> 1 from google.cloud import pubsub\r\n\r\n/home/aaronpeterson/.local/share/virtualenvs/turbinia-docs-oOHvuNoj/local/lib/python2.7/site-packages/google/cloud/pubsub.py in <module>()\r\n 17 from __future__ import absolute_import\r\n 18 \r\n---> 19 from google.cloud.pubsub_v1 import PublisherClient\r\n 20 from google.cloud.pubsub_v1 import SubscriberClient\r\n 21 from google.cloud.pubsub_v1 import types\r\n\r\n/home/aaronpeterson/.local/share/virtualenvs/turbinia-docs-oOHvuNoj/local/lib/python2.7/site-packages/google/cloud/pubsub_v1/__init__.py in <module>()\r\n 15 from __future__ import absolute_import\r\n 16 \r\n---> 17 from google.cloud.pubsub_v1 import types\r\n 18 from google.cloud.pubsub_v1 import publisher\r\n 19 from google.cloud.pubsub_v1 import subscriber\r\n\r\n/home/aaronpeterson/.local/share/virtualenvs/turbinia-docs-oOHvuNoj/local/lib/python2.7/site-packages/google/cloud/pubsub_v1/types.py in <module>()\r\n 28 \r\n 29 from google.api_core.protobuf_helpers import get_messages\r\n---> 30 from google.cloud.pubsub_v1.proto import pubsub_pb2\r\n 31 \r\n 32 \r\n\r\n/home/aaronpeterson/.local/share/virtualenvs/turbinia-docs-oOHvuNoj/local/lib/python2.7/site-packages/google/cloud/pubsub_v1/proto/pubsub_pb2.py in <module>()\r\n 45 message_type=None, enum_type=None, containing_type=None,\r\n 46 is_extension=False, extension_scope=None,\r\n---> 47 options=None, file=DESCRIPTOR),\r\n 48 ],\r\n 49 extensions=[\r\n\r\nTypeError: __new__() got an unexpected keyword argument 'file'\r\n```\r\n\r\nSnipped the pubsub section from pipdeptree output showing the protobuf requirement is >=3.0.0:\r\n```\r\n - google-cloud-pubsub [required: Any, installed: 0.38.0]\r\n - enum34 [required: Any, installed: 1.1.6]\r\n - google-api-core [required: >=1.1.0,<2.0.0dev, installed: 1.4.0]\r\n - futures [required: >=3.2.0, installed: 3.2.0]\r\n - google-auth [required: >=0.4.0,<2.0.0dev, installed: 1.5.1]\r\n - cachetools [required: >=2.0.0, installed: 2.1.0]\r\n - pyasn1-modules [required: >=0.2.1, installed: 0.2.2]\r\n - pyasn1 [required: >=0.4.1,<0.5.0, installed: 0.4.4]\r\n - rsa [required: >=3.1.4, installed: 4.0]\r\n - pyasn1 [required: >=0.1.3, installed: 0.4.4]\r\n - six [required: >=1.9.0, installed: 1.11.0]\r\n - googleapis-common-protos [required: >=1.5.3,<2.0dev, installed: 1.5.3]\r\n - protobuf [required: >=3.0.0, installed: 3.3.0]\r\n - setuptools [required: Any, installed: 40.4.3]\r\n - six [required: >=1.9, installed: 1.11.0]\r\n - protobuf [required: >=3.0.0, installed: 3.3.0]\r\n - setuptools [required: Any, installed: 40.4.3]\r\n - six [required: >=1.9, installed: 1.11.0]\r\n```\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-pubsub'\ndescription = 'Google Cloud Pub/Sub API client library'\nversion = '0.38.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 4 - Beta'\ndependencies = [\n 'google-api-core[grpc] >= 1.1.0, < 2.0.0dev',\n 'grpc-google-iam-v1 >= 0.11.1, < 0.12dev',\n 'enum34; python_version < \"3.4\"',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "pubsub/setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = 'google-cloud-pubsub'\ndescription = 'Google Cloud Pub/Sub API client library'\nversion = '0.38.0'\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = 'Development Status :: 4 - Beta'\ndependencies = [\n 'google-api-core[grpc] >= 1.4.1, < 2.0.0dev',\n 'grpc-google-iam-v1 >= 0.11.1, < 0.12dev',\n 'enum34; python_version < \"3.4\"',\n]\nextras = {\n}\n\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, 'README.rst')\nwith io.open(readme_filename, encoding='utf-8') as readme_file:\n readme = readme_file.read()\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package for package in setuptools.find_packages()\n if package.startswith('google')]\n\n# Determine which namespaces are needed.\nnamespaces = ['google']\nif 'google.cloud' in packages:\n namespaces.append('google.cloud')\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n url='https://github.com/GoogleCloudPlatform/google-cloud-python',\n classifiers=[\n release_status,\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: OS Independent',\n 'Topic :: Internet',\n ],\n platforms='Posix; MacOS X; Windows',\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "pubsub/setup.py"}]}
2,350
159
gh_patches_debug_7673
rasdani/github-patches
git_diff
translate__pootle-4132
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Running update_tmserver with --dry-run but no existing LocalTM will fail If we try to build the LocalTM but don't specify `--rebuild` our initial import will fail because the revision counter does not exist. ``` pytb $ pootle update_tmserver -v 3 --dry-run 2015-09-30 10:55:33,485 INFO Loading custom settings from '/root/.pootle/pootle.conf'... 2015-09-30 13:55:33,704 INFO Using Python PO System check identified some issues: WARNINGS: ?: (pootle.W017) There are user accounts with duplicate emails. This will not be allowed in Pootle 2.8. HINT: Try using 'pootle find_duplicate_emails', and then update user emails with 'pootle update_user_email username email'. You might also want to consider using pootle merge_user or purge_user commands Traceback (most recent call last): File "/var/www/pootle/env/bin/pootle", line 11, in <module> sys.exit(main()) File "/var/www/pootle/env/local/lib/python2.7/site-packages/pootle/runner.py", line 309, in main django_settings_module='pootle.settings') File "/var/www/pootle/env/local/lib/python2.7/site-packages/pootle/runner.py", line 289, in run_app management.execute_from_command_line(command) File "/var/www/pootle/env/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 385, in execute_from_command_line utility.execute() File "/var/www/pootle/env/local/lib/python2.7/site-packages/django/core/management/__init__.py", line 377, in execute self.fetch_command(subcommand).run_from_argv(self.argv) File "/var/www/pootle/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 288, in run_from_argv self.execute(*args, **options.__dict__) File "/var/www/pootle/env/local/lib/python2.7/site-packages/django/core/management/base.py", line 338, in execute output = self.handle(*args, **options) File "/var/www/pootle/env/local/lib/python2.7/site-packages/pootle/apps/pootle_app/management/commands/update_tmserver.py", line 152, in handle 'field': 'revision' File "/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/client/utils.py", line 69, in _wrapped return func(*args, params=params, **kwargs) File "/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/client/__init__.py", line 506, in search params=params, body=body) File "/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/transport.py", line 307, in perform_request status, headers, data = connection.perform_request(method, url, params, body, ignore=ignore, timeout=timeout) File "/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/connection/http_urllib3.py", line 89, in perform_request self._raise_error(response.status, raw_data) File "/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/connection/base.py", line 105, in _raise_error raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info) elasticsearch.exceptions.RequestError: TransportError(400, u'SearchPhaseExecutionException[Failed to execute phase [query], all shards failed; shardFailures {[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][0]: SearchParseException[[translations][0]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{"query": {"match_all": {}}, "facets": {"stat1": {"statistical": {"field": "revision"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }{[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][1]: SearchParseException[[translations][1]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{"query": {"match_all": {}}, "facets": {"stat1": {"statistical": {"field": "revision"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }{[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][2]: SearchParseException[[translations][2]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{"query": {"match_all": {}}, "facets": {"stat1": {"statistical": {"field": "revision"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }{[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][3]: SearchParseException[[translations][3]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{"query": {"match_all": {}}, "facets": {"stat1": {"statistical": {"field": "revision"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }{[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][4]: SearchParseException[[translations][4]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{"query": {"match_all": {}}, "facets": {"stat1": {"statistical": {"field": "revision"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }]') ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/apps/pootle_app/management/commands/update_tmserver.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 # 4 # Copyright (C) Pootle contributors. 5 # 6 # This file is a part of the Pootle project. It is distributed under the GPL3 7 # or later license. See the LICENSE file for a copy of the license and the 8 # AUTHORS file for copyright and authorship information. 9 10 from hashlib import md5 11 from optparse import make_option 12 import os 13 import sys 14 15 # This must be run before importing Django. 16 os.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings' 17 18 from elasticsearch import helpers, Elasticsearch 19 20 from django.conf import settings 21 from django.core.management.base import BaseCommand, CommandError 22 23 from pootle_store.models import Unit 24 25 26 BULK_CHUNK_SIZE = 5000 27 28 29 class Command(BaseCommand): 30 help = "Load Local Translation Memory" 31 option_list = BaseCommand.option_list + ( 32 make_option('--overwrite', 33 action="store_true", 34 dest='overwrite', 35 default=False, 36 help='Process all items, not just the new ones (useful to ' 37 'overwrite properties while keeping the index in a ' 38 'working condition)'), 39 make_option('--rebuild', 40 action="store_true", 41 dest='rebuild', 42 default=False, 43 help='Drop the entire index on start and update ' 44 'everything from scratch'), 45 make_option('--dry-run', 46 action="store_true", 47 dest='dry_run', 48 default=False, 49 help='Report only the number of translations to index ' 50 'and quit'), 51 ) 52 53 def _parse_translations(self, **options): 54 55 units_qs = Unit.simple_objects \ 56 .exclude(target_f__isnull=True) \ 57 .exclude(target_f__exact='') \ 58 .filter(revision__gt=self.last_indexed_revision) \ 59 .select_related( 60 'submitted_by', 61 'store', 62 'store__translation_project__project', 63 'store__translation_project__language' 64 ).values( 65 'id', 66 'revision', 67 'source_f', 68 'target_f', 69 'submitted_by__username', 70 'submitted_by__full_name', 71 'submitted_by__email', 72 'store__translation_project__project__fullname', 73 'store__pootle_path', 74 'store__translation_project__language__code' 75 ).order_by() 76 77 total = units_qs.count() 78 79 if total == 0: 80 self.stdout.write("No translations to index") 81 sys.exit() 82 83 self.stdout.write("%s translations to index" % total) 84 85 if options['dry_run']: 86 sys.exit() 87 88 self.stdout.write("") 89 90 for i, unit in enumerate(units_qs.iterator(), start=1): 91 fullname = (unit['submitted_by__full_name'] or 92 unit['submitted_by__username']) 93 project = unit['store__translation_project__project__fullname'] 94 95 email_md5 = None 96 if unit['submitted_by__email']: 97 email_md5 = md5(unit['submitted_by__email']).hexdigest() 98 99 if (i % 1000 == 0) or (i == total): 100 percent = "%.1f" % (i * 100.0 / total) 101 self.stdout.write("%s (%s%%)" % (i, percent), ending='\r') 102 self.stdout.flush() 103 104 yield { 105 "_index": self.INDEX_NAME, 106 "_type": unit['store__translation_project__language__code'], 107 "_id": unit['id'], 108 'revision': int(unit['revision']), 109 'project': project, 110 'path': unit['store__pootle_path'], 111 'username': unit['submitted_by__username'], 112 'fullname': fullname, 113 'email_md5': email_md5, 114 'source': unit['source_f'], 115 'target': unit['target_f'], 116 } 117 118 if i != total: 119 self.stdout.write("Expected %d, loaded %d." % (total, i)) 120 121 122 def handle(self, *args, **options): 123 if not getattr(settings, 'POOTLE_TM_SERVER', False): 124 raise CommandError("POOTLE_TM_SERVER is missing from your settings.") 125 126 self.INDEX_NAME = settings.POOTLE_TM_SERVER['default']['INDEX_NAME'] 127 es = Elasticsearch([{ 128 'host': settings.POOTLE_TM_SERVER['default']['HOST'], 129 'port': settings.POOTLE_TM_SERVER['default']['PORT'] 130 }], 131 retry_on_timeout=True 132 ) 133 134 self.last_indexed_revision = -1 135 136 if options['rebuild'] and not options['dry_run']: 137 if es.indices.exists(self.INDEX_NAME): 138 es.indices.delete(index=self.INDEX_NAME) 139 140 if (not options['rebuild'] and 141 not options['overwrite'] and 142 es.indices.exists(self.INDEX_NAME)): 143 result = es.search( 144 index=self.INDEX_NAME, 145 body={ 146 'query': { 147 'match_all': {} 148 }, 149 'facets': { 150 'stat1': { 151 'statistical': { 152 'field': 'revision' 153 } 154 } 155 } 156 } 157 ) 158 self.last_indexed_revision = result['facets']['stat1']['max'] 159 160 self.stdout.write("Last indexed revision = %s" % self.last_indexed_revision) 161 162 success, _ = helpers.bulk(es, self._parse_translations(**options)) 163 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/apps/pootle_app/management/commands/update_tmserver.py b/pootle/apps/pootle_app/management/commands/update_tmserver.py --- a/pootle/apps/pootle_app/management/commands/update_tmserver.py +++ b/pootle/apps/pootle_app/management/commands/update_tmserver.py @@ -137,6 +137,9 @@ if es.indices.exists(self.INDEX_NAME): es.indices.delete(index=self.INDEX_NAME) + if not options['dry_run'] and not es.indices.exists(self.INDEX_NAME): + es.indices.create(index=self.INDEX_NAME) + if (not options['rebuild'] and not options['overwrite'] and es.indices.exists(self.INDEX_NAME)):
{"golden_diff": "diff --git a/pootle/apps/pootle_app/management/commands/update_tmserver.py b/pootle/apps/pootle_app/management/commands/update_tmserver.py\n--- a/pootle/apps/pootle_app/management/commands/update_tmserver.py\n+++ b/pootle/apps/pootle_app/management/commands/update_tmserver.py\n@@ -137,6 +137,9 @@\n if es.indices.exists(self.INDEX_NAME):\n es.indices.delete(index=self.INDEX_NAME)\n \n+ if not options['dry_run'] and not es.indices.exists(self.INDEX_NAME):\n+ es.indices.create(index=self.INDEX_NAME)\n+\n if (not options['rebuild'] and\n not options['overwrite'] and\n es.indices.exists(self.INDEX_NAME)):\n", "issue": "Running update_tmserver with --dry-run but no existing LocalTM will fail\nIf we try to build the LocalTM but don't specify `--rebuild` our initial import will fail because the revision counter does not exist.\n\n``` pytb\n$ pootle update_tmserver -v 3 --dry-run \n2015-09-30 10:55:33,485 INFO Loading custom settings from '/root/.pootle/pootle.conf'...\n2015-09-30 13:55:33,704 INFO Using Python PO\nSystem check identified some issues:\n\nWARNINGS:\n?: (pootle.W017) There are user accounts with duplicate emails. This will not be allowed in Pootle 2.8.\n HINT: Try using 'pootle find_duplicate_emails', and then update user emails with 'pootle update_user_email username email'. You might also want to consider using pootle merge_user or purge_user commands\nTraceback (most recent call last):\n File \"/var/www/pootle/env/bin/pootle\", line 11, in <module>\n sys.exit(main())\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/pootle/runner.py\", line 309, in main\n django_settings_module='pootle.settings')\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/pootle/runner.py\", line 289, in run_app\n management.execute_from_command_line(command)\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 385, in execute_from_command_line\n utility.execute()\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/django/core/management/__init__.py\", line 377, in execute\n self.fetch_command(subcommand).run_from_argv(self.argv)\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 288, in run_from_argv\n self.execute(*args, **options.__dict__)\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/django/core/management/base.py\", line 338, in execute\n output = self.handle(*args, **options)\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/pootle/apps/pootle_app/management/commands/update_tmserver.py\", line 152, in handle\n 'field': 'revision'\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/client/utils.py\", line 69, in _wrapped\n return func(*args, params=params, **kwargs)\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/client/__init__.py\", line 506, in search\n params=params, body=body)\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/transport.py\", line 307, in perform_request\n status, headers, data = connection.perform_request(method, url, params, body, ignore=ignore, timeout=timeout)\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/connection/http_urllib3.py\", line 89, in perform_request\n self._raise_error(response.status, raw_data)\n File \"/var/www/pootle/env/local/lib/python2.7/site-packages/elasticsearch/connection/base.py\", line 105, in _raise_error\n raise HTTP_EXCEPTIONS.get(status_code, TransportError)(status_code, error_message, additional_info)\nelasticsearch.exceptions.RequestError: TransportError(400, u'SearchPhaseExecutionException[Failed to execute phase [query], all shards failed; shardFailures {[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][0]: SearchParseException[[translations][0]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{\"query\": {\"match_all\": {}}, \"facets\": {\"stat1\": {\"statistical\": {\"field\": \"revision\"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }{[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][1]: SearchParseException[[translations][1]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{\"query\": {\"match_all\": {}}, \"facets\": {\"stat1\": {\"statistical\": {\"field\": \"revision\"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }{[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][2]: SearchParseException[[translations][2]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{\"query\": {\"match_all\": {}}, \"facets\": {\"stat1\": {\"statistical\": {\"field\": \"revision\"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }{[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][3]: SearchParseException[[translations][3]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{\"query\": {\"match_all\": {}}, \"facets\": {\"stat1\": {\"statistical\": {\"field\": \"revision\"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }{[aQHk0CPtT1K_ZZ2YJG8rjQ][translations][4]: SearchParseException[[translations][4]: query[ConstantScore(*:*)],from[-1],size[-1]: Parse Failure [Failed to parse source [{\"query\": {\"match_all\": {}}, \"facets\": {\"stat1\": {\"statistical\": {\"field\": \"revision\"}}}}]]]; nested: FacetPhaseExecutionException[Facet [stat1]: No mapping found for field [revision]]; }]')\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\nfrom optparse import make_option\nimport os\nimport sys\n\n# This must be run before importing Django.\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom elasticsearch import helpers, Elasticsearch\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom pootle_store.models import Unit\n\n\nBULK_CHUNK_SIZE = 5000\n\n\nclass Command(BaseCommand):\n help = \"Load Local Translation Memory\"\n option_list = BaseCommand.option_list + (\n make_option('--overwrite',\n action=\"store_true\",\n dest='overwrite',\n default=False,\n help='Process all items, not just the new ones (useful to '\n 'overwrite properties while keeping the index in a '\n 'working condition)'),\n make_option('--rebuild',\n action=\"store_true\",\n dest='rebuild',\n default=False,\n help='Drop the entire index on start and update '\n 'everything from scratch'),\n make_option('--dry-run',\n action=\"store_true\",\n dest='dry_run',\n default=False,\n help='Report only the number of translations to index '\n 'and quit'),\n )\n\n def _parse_translations(self, **options):\n\n units_qs = Unit.simple_objects \\\n .exclude(target_f__isnull=True) \\\n .exclude(target_f__exact='') \\\n .filter(revision__gt=self.last_indexed_revision) \\\n .select_related(\n 'submitted_by',\n 'store',\n 'store__translation_project__project',\n 'store__translation_project__language'\n ).values(\n 'id',\n 'revision',\n 'source_f',\n 'target_f',\n 'submitted_by__username',\n 'submitted_by__full_name',\n 'submitted_by__email',\n 'store__translation_project__project__fullname',\n 'store__pootle_path',\n 'store__translation_project__language__code'\n ).order_by()\n\n total = units_qs.count()\n\n if total == 0:\n self.stdout.write(\"No translations to index\")\n sys.exit()\n\n self.stdout.write(\"%s translations to index\" % total)\n\n if options['dry_run']:\n sys.exit()\n\n self.stdout.write(\"\")\n\n for i, unit in enumerate(units_qs.iterator(), start=1):\n fullname = (unit['submitted_by__full_name'] or\n unit['submitted_by__username'])\n project = unit['store__translation_project__project__fullname']\n\n email_md5 = None\n if unit['submitted_by__email']:\n email_md5 = md5(unit['submitted_by__email']).hexdigest()\n\n if (i % 1000 == 0) or (i == total):\n percent = \"%.1f\" % (i * 100.0 / total)\n self.stdout.write(\"%s (%s%%)\" % (i, percent), ending='\\r')\n self.stdout.flush()\n\n yield {\n \"_index\": self.INDEX_NAME,\n \"_type\": unit['store__translation_project__language__code'],\n \"_id\": unit['id'],\n 'revision': int(unit['revision']),\n 'project': project,\n 'path': unit['store__pootle_path'],\n 'username': unit['submitted_by__username'],\n 'fullname': fullname,\n 'email_md5': email_md5,\n 'source': unit['source_f'],\n 'target': unit['target_f'],\n }\n\n if i != total:\n self.stdout.write(\"Expected %d, loaded %d.\" % (total, i))\n\n\n def handle(self, *args, **options):\n if not getattr(settings, 'POOTLE_TM_SERVER', False):\n raise CommandError(\"POOTLE_TM_SERVER is missing from your settings.\")\n\n self.INDEX_NAME = settings.POOTLE_TM_SERVER['default']['INDEX_NAME']\n es = Elasticsearch([{\n 'host': settings.POOTLE_TM_SERVER['default']['HOST'],\n 'port': settings.POOTLE_TM_SERVER['default']['PORT']\n }],\n retry_on_timeout=True\n )\n\n self.last_indexed_revision = -1\n\n if options['rebuild'] and not options['dry_run']:\n if es.indices.exists(self.INDEX_NAME):\n es.indices.delete(index=self.INDEX_NAME)\n\n if (not options['rebuild'] and\n not options['overwrite'] and\n es.indices.exists(self.INDEX_NAME)):\n result = es.search(\n index=self.INDEX_NAME,\n body={\n 'query': {\n 'match_all': {}\n },\n 'facets': {\n 'stat1': {\n 'statistical': {\n 'field': 'revision'\n }\n }\n }\n }\n )\n self.last_indexed_revision = result['facets']['stat1']['max']\n\n self.stdout.write(\"Last indexed revision = %s\" % self.last_indexed_revision)\n\n success, _ = helpers.bulk(es, self._parse_translations(**options))\n", "path": "pootle/apps/pootle_app/management/commands/update_tmserver.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom hashlib import md5\nfrom optparse import make_option\nimport os\nimport sys\n\n# This must be run before importing Django.\nos.environ['DJANGO_SETTINGS_MODULE'] = 'pootle.settings'\n\nfrom elasticsearch import helpers, Elasticsearch\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom pootle_store.models import Unit\n\n\nBULK_CHUNK_SIZE = 5000\n\n\nclass Command(BaseCommand):\n help = \"Load Local Translation Memory\"\n option_list = BaseCommand.option_list + (\n make_option('--overwrite',\n action=\"store_true\",\n dest='overwrite',\n default=False,\n help='Process all items, not just the new ones (useful to '\n 'overwrite properties while keeping the index in a '\n 'working condition)'),\n make_option('--rebuild',\n action=\"store_true\",\n dest='rebuild',\n default=False,\n help='Drop the entire index on start and update '\n 'everything from scratch'),\n make_option('--dry-run',\n action=\"store_true\",\n dest='dry_run',\n default=False,\n help='Report only the number of translations to index '\n 'and quit'),\n )\n\n def _parse_translations(self, **options):\n\n units_qs = Unit.simple_objects \\\n .exclude(target_f__isnull=True) \\\n .exclude(target_f__exact='') \\\n .filter(revision__gt=self.last_indexed_revision) \\\n .select_related(\n 'submitted_by',\n 'store',\n 'store__translation_project__project',\n 'store__translation_project__language'\n ).values(\n 'id',\n 'revision',\n 'source_f',\n 'target_f',\n 'submitted_by__username',\n 'submitted_by__full_name',\n 'submitted_by__email',\n 'store__translation_project__project__fullname',\n 'store__pootle_path',\n 'store__translation_project__language__code'\n ).order_by()\n\n total = units_qs.count()\n\n if total == 0:\n self.stdout.write(\"No translations to index\")\n sys.exit()\n\n self.stdout.write(\"%s translations to index\" % total)\n\n if options['dry_run']:\n sys.exit()\n\n self.stdout.write(\"\")\n\n for i, unit in enumerate(units_qs.iterator(), start=1):\n fullname = (unit['submitted_by__full_name'] or\n unit['submitted_by__username'])\n project = unit['store__translation_project__project__fullname']\n\n email_md5 = None\n if unit['submitted_by__email']:\n email_md5 = md5(unit['submitted_by__email']).hexdigest()\n\n if (i % 1000 == 0) or (i == total):\n percent = \"%.1f\" % (i * 100.0 / total)\n self.stdout.write(\"%s (%s%%)\" % (i, percent), ending='\\r')\n self.stdout.flush()\n\n yield {\n \"_index\": self.INDEX_NAME,\n \"_type\": unit['store__translation_project__language__code'],\n \"_id\": unit['id'],\n 'revision': int(unit['revision']),\n 'project': project,\n 'path': unit['store__pootle_path'],\n 'username': unit['submitted_by__username'],\n 'fullname': fullname,\n 'email_md5': email_md5,\n 'source': unit['source_f'],\n 'target': unit['target_f'],\n }\n\n if i != total:\n self.stdout.write(\"Expected %d, loaded %d.\" % (total, i))\n\n\n def handle(self, *args, **options):\n if not getattr(settings, 'POOTLE_TM_SERVER', False):\n raise CommandError(\"POOTLE_TM_SERVER is missing from your settings.\")\n\n self.INDEX_NAME = settings.POOTLE_TM_SERVER['default']['INDEX_NAME']\n es = Elasticsearch([{\n 'host': settings.POOTLE_TM_SERVER['default']['HOST'],\n 'port': settings.POOTLE_TM_SERVER['default']['PORT']\n }],\n retry_on_timeout=True\n )\n\n self.last_indexed_revision = -1\n\n if options['rebuild'] and not options['dry_run']:\n if es.indices.exists(self.INDEX_NAME):\n es.indices.delete(index=self.INDEX_NAME)\n\n if not options['dry_run'] and not es.indices.exists(self.INDEX_NAME):\n es.indices.create(index=self.INDEX_NAME)\n\n if (not options['rebuild'] and\n not options['overwrite'] and\n es.indices.exists(self.INDEX_NAME)):\n result = es.search(\n index=self.INDEX_NAME,\n body={\n 'query': {\n 'match_all': {}\n },\n 'facets': {\n 'stat1': {\n 'statistical': {\n 'field': 'revision'\n }\n }\n }\n }\n )\n self.last_indexed_revision = result['facets']['stat1']['max']\n\n self.stdout.write(\"Last indexed revision = %s\" % self.last_indexed_revision)\n\n success, _ = helpers.bulk(es, self._parse_translations(**options))\n", "path": "pootle/apps/pootle_app/management/commands/update_tmserver.py"}]}
3,214
170
gh_patches_debug_61640
rasdani/github-patches
git_diff
pallets__click-773
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dynamic bash autocompletion should escape blanks Thanks for #755, wonderful feature! I found an issue when my completion callback returns entries that have (legitimate) blanks in them. In this case, entries get split into separate arguments, whereas I would have expected that blanks are escaped with a backslash (as bash does by default). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `click/_bashcomplete.py` Content: ``` 1 import collections 2 import copy 3 import os 4 import re 5 6 from .utils import echo 7 from .parser import split_arg_string 8 from .core import MultiCommand, Option, Argument 9 from .types import Choice 10 11 WORDBREAK = '=' 12 13 COMPLETION_SCRIPT = ''' 14 %(complete_func)s() { 15 COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ 16 COMP_CWORD=$COMP_CWORD \\ 17 %(autocomplete_var)s=complete $1 ) ) 18 return 0 19 } 20 21 complete -F %(complete_func)s -o default %(script_names)s 22 ''' 23 24 _invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]') 25 26 27 def get_completion_script(prog_name, complete_var): 28 cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_')) 29 return (COMPLETION_SCRIPT % { 30 'complete_func': '_%s_completion' % cf_name, 31 'script_names': prog_name, 32 'autocomplete_var': complete_var, 33 }).strip() + ';' 34 35 36 def resolve_ctx(cli, prog_name, args): 37 """ 38 Parse into a hierarchy of contexts. Contexts are connected through the parent variable. 39 :param cli: command definition 40 :param prog_name: the program that is running 41 :param args: full list of args 42 :return: the final context/command parsed 43 """ 44 ctx = cli.make_context(prog_name, args, resilient_parsing=True) 45 while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand): 46 a = ctx.protected_args + ctx.args 47 cmd = ctx.command.get_command(ctx, a[0]) 48 if cmd is None: 49 return None 50 ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True) 51 return ctx 52 53 54 def start_of_option(param_str): 55 """ 56 :param param_str: param_str to check 57 :return: whether or not this is the start of an option declaration (i.e. starts "-" or "--") 58 """ 59 return param_str and param_str[:1] == '-' 60 61 62 def is_incomplete_option(all_args, cmd_param): 63 """ 64 :param all_args: the full original list of args supplied 65 :param cmd_param: the current command paramter 66 :return: whether or not the last option declaration (i.e. starts "-" or "--") is incomplete and 67 corresponds to this cmd_param. In other words whether this cmd_param option can still accept 68 values 69 """ 70 if cmd_param.is_flag: 71 return False 72 last_option = None 73 for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])): 74 if index + 1 > cmd_param.nargs: 75 break 76 if start_of_option(arg_str): 77 last_option = arg_str 78 79 return True if last_option and last_option in cmd_param.opts else False 80 81 82 def is_incomplete_argument(current_params, cmd_param): 83 """ 84 :param current_params: the current params and values for this argument as already entered 85 :param cmd_param: the current command parameter 86 :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In 87 other words whether or not the this cmd_param argument can still accept values 88 """ 89 current_param_values = current_params[cmd_param.name] 90 if current_param_values is None: 91 return True 92 if cmd_param.nargs == -1: 93 return True 94 if isinstance(current_param_values, collections.Iterable) \ 95 and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs: 96 return True 97 return False 98 99 def get_user_autocompletions(ctx, args, incomplete, cmd_param): 100 """ 101 :param ctx: context associated with the parsed command 102 :param args: full list of args 103 :param incomplete: the incomplete text to autocomplete 104 :param cmd_param: command definition 105 :return: all the possible user-specified completions for the param 106 """ 107 if isinstance(cmd_param.type, Choice): 108 return cmd_param.type.choices 109 elif cmd_param.autocompletion is not None: 110 return cmd_param.autocompletion(ctx=ctx, 111 args=args, 112 incomplete=incomplete) 113 else: 114 return [] 115 116 def get_choices(cli, prog_name, args, incomplete): 117 """ 118 :param cli: command definition 119 :param prog_name: the program that is running 120 :param args: full list of args 121 :param incomplete: the incomplete text to autocomplete 122 :return: all the possible completions for the incomplete 123 """ 124 all_args = copy.deepcopy(args) 125 126 ctx = resolve_ctx(cli, prog_name, args) 127 if ctx is None: 128 return 129 130 # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse 131 # without the '=' 132 if start_of_option(incomplete) and WORDBREAK in incomplete: 133 partition_incomplete = incomplete.partition(WORDBREAK) 134 all_args.append(partition_incomplete[0]) 135 incomplete = partition_incomplete[2] 136 elif incomplete == WORDBREAK: 137 incomplete = '' 138 139 choices = [] 140 found_param = False 141 if start_of_option(incomplete): 142 # completions for options 143 for param in ctx.command.params: 144 if isinstance(param, Option): 145 choices.extend([param_opt for param_opt in param.opts + param.secondary_opts 146 if param_opt not in all_args or param.multiple]) 147 found_param = True 148 if not found_param: 149 # completion for option values by choices 150 for cmd_param in ctx.command.params: 151 if isinstance(cmd_param, Option) and is_incomplete_option(all_args, cmd_param): 152 choices.extend(get_user_autocompletions(ctx, all_args, incomplete, cmd_param)) 153 found_param = True 154 break 155 if not found_param: 156 # completion for argument values by choices 157 for cmd_param in ctx.command.params: 158 if isinstance(cmd_param, Argument) and is_incomplete_argument(ctx.params, cmd_param): 159 choices.extend(get_user_autocompletions(ctx, all_args, incomplete, cmd_param)) 160 found_param = True 161 break 162 163 if not found_param and isinstance(ctx.command, MultiCommand): 164 # completion for any subcommands 165 choices.extend(ctx.command.list_commands(ctx)) 166 167 for item in choices: 168 if item.startswith(incomplete): 169 yield item 170 171 172 def do_complete(cli, prog_name): 173 cwords = split_arg_string(os.environ['COMP_WORDS']) 174 cword = int(os.environ['COMP_CWORD']) 175 args = cwords[1:cword] 176 try: 177 incomplete = cwords[cword] 178 except IndexError: 179 incomplete = '' 180 181 for item in get_choices(cli, prog_name, args, incomplete): 182 echo(item) 183 184 return True 185 186 187 def bashcomplete(cli, prog_name, complete_var, complete_instr): 188 if complete_instr == 'source': 189 echo(get_completion_script(prog_name, complete_var)) 190 return True 191 elif complete_instr == 'complete': 192 return do_complete(cli, prog_name) 193 return False 194 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/click/_bashcomplete.py b/click/_bashcomplete.py --- a/click/_bashcomplete.py +++ b/click/_bashcomplete.py @@ -12,6 +12,7 @@ COMPLETION_SCRIPT = ''' %(complete_func)s() { + local IFS=$'\n' COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\ COMP_CWORD=$COMP_CWORD \\ %(autocomplete_var)s=complete $1 ) )
{"golden_diff": "diff --git a/click/_bashcomplete.py b/click/_bashcomplete.py\n--- a/click/_bashcomplete.py\n+++ b/click/_bashcomplete.py\n@@ -12,6 +12,7 @@\n \n COMPLETION_SCRIPT = '''\n %(complete_func)s() {\n+ local IFS=$'\\n'\n COMPREPLY=( $( env COMP_WORDS=\"${COMP_WORDS[*]}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n %(autocomplete_var)s=complete $1 ) )\n", "issue": "Dynamic bash autocompletion should escape blanks\nThanks for #755, wonderful feature! I found an issue when my completion callback returns entries that have (legitimate) blanks in them. In this case, entries get split into separate arguments, whereas I would have expected that blanks are escaped with a backslash (as bash does by default).\n", "before_files": [{"content": "import collections\nimport copy\nimport os\nimport re\n\nfrom .utils import echo\nfrom .parser import split_arg_string\nfrom .core import MultiCommand, Option, Argument\nfrom .types import Choice\n\nWORDBREAK = '='\n\nCOMPLETION_SCRIPT = '''\n%(complete_func)s() {\n COMPREPLY=( $( env COMP_WORDS=\"${COMP_WORDS[*]}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n %(autocomplete_var)s=complete $1 ) )\n return 0\n}\n\ncomplete -F %(complete_func)s -o default %(script_names)s\n'''\n\n_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')\n\n\ndef get_completion_script(prog_name, complete_var):\n cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))\n return (COMPLETION_SCRIPT % {\n 'complete_func': '_%s_completion' % cf_name,\n 'script_names': prog_name,\n 'autocomplete_var': complete_var,\n }).strip() + ';'\n\n\ndef resolve_ctx(cli, prog_name, args):\n \"\"\"\n Parse into a hierarchy of contexts. Contexts are connected through the parent variable.\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :return: the final context/command parsed\n \"\"\"\n ctx = cli.make_context(prog_name, args, resilient_parsing=True)\n while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand):\n a = ctx.protected_args + ctx.args\n cmd = ctx.command.get_command(ctx, a[0])\n if cmd is None:\n return None\n ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True)\n return ctx\n\n\ndef start_of_option(param_str):\n \"\"\"\n :param param_str: param_str to check\n :return: whether or not this is the start of an option declaration (i.e. starts \"-\" or \"--\")\n \"\"\"\n return param_str and param_str[:1] == '-'\n\n\ndef is_incomplete_option(all_args, cmd_param):\n \"\"\"\n :param all_args: the full original list of args supplied\n :param cmd_param: the current command paramter\n :return: whether or not the last option declaration (i.e. starts \"-\" or \"--\") is incomplete and\n corresponds to this cmd_param. In other words whether this cmd_param option can still accept\n values\n \"\"\"\n if cmd_param.is_flag:\n return False\n last_option = None\n for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):\n if index + 1 > cmd_param.nargs:\n break\n if start_of_option(arg_str):\n last_option = arg_str\n\n return True if last_option and last_option in cmd_param.opts else False\n\n\ndef is_incomplete_argument(current_params, cmd_param):\n \"\"\"\n :param current_params: the current params and values for this argument as already entered\n :param cmd_param: the current command parameter\n :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In\n other words whether or not the this cmd_param argument can still accept values\n \"\"\"\n current_param_values = current_params[cmd_param.name]\n if current_param_values is None:\n return True\n if cmd_param.nargs == -1:\n return True\n if isinstance(current_param_values, collections.Iterable) \\\n and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:\n return True\n return False\n\ndef get_user_autocompletions(ctx, args, incomplete, cmd_param):\n \"\"\"\n :param ctx: context associated with the parsed command\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :param cmd_param: command definition\n :return: all the possible user-specified completions for the param\n \"\"\"\n if isinstance(cmd_param.type, Choice):\n return cmd_param.type.choices\n elif cmd_param.autocompletion is not None:\n return cmd_param.autocompletion(ctx=ctx,\n args=args,\n incomplete=incomplete)\n else:\n return []\n\ndef get_choices(cli, prog_name, args, incomplete):\n \"\"\"\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :return: all the possible completions for the incomplete\n \"\"\"\n all_args = copy.deepcopy(args)\n\n ctx = resolve_ctx(cli, prog_name, args)\n if ctx is None:\n return\n\n # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse\n # without the '='\n if start_of_option(incomplete) and WORDBREAK in incomplete:\n partition_incomplete = incomplete.partition(WORDBREAK)\n all_args.append(partition_incomplete[0])\n incomplete = partition_incomplete[2]\n elif incomplete == WORDBREAK:\n incomplete = ''\n\n choices = []\n found_param = False\n if start_of_option(incomplete):\n # completions for options\n for param in ctx.command.params:\n if isinstance(param, Option):\n choices.extend([param_opt for param_opt in param.opts + param.secondary_opts\n if param_opt not in all_args or param.multiple])\n found_param = True\n if not found_param:\n # completion for option values by choices\n for cmd_param in ctx.command.params:\n if isinstance(cmd_param, Option) and is_incomplete_option(all_args, cmd_param):\n choices.extend(get_user_autocompletions(ctx, all_args, incomplete, cmd_param))\n found_param = True\n break\n if not found_param:\n # completion for argument values by choices\n for cmd_param in ctx.command.params:\n if isinstance(cmd_param, Argument) and is_incomplete_argument(ctx.params, cmd_param):\n choices.extend(get_user_autocompletions(ctx, all_args, incomplete, cmd_param))\n found_param = True\n break\n\n if not found_param and isinstance(ctx.command, MultiCommand):\n # completion for any subcommands\n choices.extend(ctx.command.list_commands(ctx))\n\n for item in choices:\n if item.startswith(incomplete):\n yield item\n\n\ndef do_complete(cli, prog_name):\n cwords = split_arg_string(os.environ['COMP_WORDS'])\n cword = int(os.environ['COMP_CWORD'])\n args = cwords[1:cword]\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = ''\n\n for item in get_choices(cli, prog_name, args, incomplete):\n echo(item)\n\n return True\n\n\ndef bashcomplete(cli, prog_name, complete_var, complete_instr):\n if complete_instr == 'source':\n echo(get_completion_script(prog_name, complete_var))\n return True\n elif complete_instr == 'complete':\n return do_complete(cli, prog_name)\n return False\n", "path": "click/_bashcomplete.py"}], "after_files": [{"content": "import collections\nimport copy\nimport os\nimport re\n\nfrom .utils import echo\nfrom .parser import split_arg_string\nfrom .core import MultiCommand, Option, Argument\nfrom .types import Choice\n\nWORDBREAK = '='\n\nCOMPLETION_SCRIPT = '''\n%(complete_func)s() {\n local IFS=$'\\n'\n COMPREPLY=( $( env COMP_WORDS=\"${COMP_WORDS[*]}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n %(autocomplete_var)s=complete $1 ) )\n return 0\n}\n\ncomplete -F %(complete_func)s -o default %(script_names)s\n'''\n\n_invalid_ident_char_re = re.compile(r'[^a-zA-Z0-9_]')\n\n\ndef get_completion_script(prog_name, complete_var):\n cf_name = _invalid_ident_char_re.sub('', prog_name.replace('-', '_'))\n return (COMPLETION_SCRIPT % {\n 'complete_func': '_%s_completion' % cf_name,\n 'script_names': prog_name,\n 'autocomplete_var': complete_var,\n }).strip() + ';'\n\n\ndef resolve_ctx(cli, prog_name, args):\n \"\"\"\n Parse into a hierarchy of contexts. Contexts are connected through the parent variable.\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :return: the final context/command parsed\n \"\"\"\n ctx = cli.make_context(prog_name, args, resilient_parsing=True)\n while ctx.protected_args + ctx.args and isinstance(ctx.command, MultiCommand):\n a = ctx.protected_args + ctx.args\n cmd = ctx.command.get_command(ctx, a[0])\n if cmd is None:\n return None\n ctx = cmd.make_context(a[0], a[1:], parent=ctx, resilient_parsing=True)\n return ctx\n\n\ndef start_of_option(param_str):\n \"\"\"\n :param param_str: param_str to check\n :return: whether or not this is the start of an option declaration (i.e. starts \"-\" or \"--\")\n \"\"\"\n return param_str and param_str[:1] == '-'\n\n\ndef is_incomplete_option(all_args, cmd_param):\n \"\"\"\n :param all_args: the full original list of args supplied\n :param cmd_param: the current command paramter\n :return: whether or not the last option declaration (i.e. starts \"-\" or \"--\") is incomplete and\n corresponds to this cmd_param. In other words whether this cmd_param option can still accept\n values\n \"\"\"\n if cmd_param.is_flag:\n return False\n last_option = None\n for index, arg_str in enumerate(reversed([arg for arg in all_args if arg != WORDBREAK])):\n if index + 1 > cmd_param.nargs:\n break\n if start_of_option(arg_str):\n last_option = arg_str\n\n return True if last_option and last_option in cmd_param.opts else False\n\n\ndef is_incomplete_argument(current_params, cmd_param):\n \"\"\"\n :param current_params: the current params and values for this argument as already entered\n :param cmd_param: the current command parameter\n :return: whether or not the last argument is incomplete and corresponds to this cmd_param. In\n other words whether or not the this cmd_param argument can still accept values\n \"\"\"\n current_param_values = current_params[cmd_param.name]\n if current_param_values is None:\n return True\n if cmd_param.nargs == -1:\n return True\n if isinstance(current_param_values, collections.Iterable) \\\n and cmd_param.nargs > 1 and len(current_param_values) < cmd_param.nargs:\n return True\n return False\n\ndef get_user_autocompletions(ctx, args, incomplete, cmd_param):\n \"\"\"\n :param ctx: context associated with the parsed command\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :param cmd_param: command definition\n :return: all the possible user-specified completions for the param\n \"\"\"\n if isinstance(cmd_param.type, Choice):\n return cmd_param.type.choices\n elif cmd_param.autocompletion is not None:\n return cmd_param.autocompletion(ctx=ctx,\n args=args,\n incomplete=incomplete)\n else:\n return []\n\ndef get_choices(cli, prog_name, args, incomplete):\n \"\"\"\n :param cli: command definition\n :param prog_name: the program that is running\n :param args: full list of args\n :param incomplete: the incomplete text to autocomplete\n :return: all the possible completions for the incomplete\n \"\"\"\n all_args = copy.deepcopy(args)\n\n ctx = resolve_ctx(cli, prog_name, args)\n if ctx is None:\n return\n\n # In newer versions of bash long opts with '='s are partitioned, but it's easier to parse\n # without the '='\n if start_of_option(incomplete) and WORDBREAK in incomplete:\n partition_incomplete = incomplete.partition(WORDBREAK)\n all_args.append(partition_incomplete[0])\n incomplete = partition_incomplete[2]\n elif incomplete == WORDBREAK:\n incomplete = ''\n\n choices = []\n found_param = False\n if start_of_option(incomplete):\n # completions for options\n for param in ctx.command.params:\n if isinstance(param, Option):\n choices.extend([param_opt for param_opt in param.opts + param.secondary_opts\n if param_opt not in all_args or param.multiple])\n found_param = True\n if not found_param:\n # completion for option values by choices\n for cmd_param in ctx.command.params:\n if isinstance(cmd_param, Option) and is_incomplete_option(all_args, cmd_param):\n choices.extend(get_user_autocompletions(ctx, all_args, incomplete, cmd_param))\n found_param = True\n break\n if not found_param:\n # completion for argument values by choices\n for cmd_param in ctx.command.params:\n if isinstance(cmd_param, Argument) and is_incomplete_argument(ctx.params, cmd_param):\n choices.extend(get_user_autocompletions(ctx, all_args, incomplete, cmd_param))\n found_param = True\n break\n\n if not found_param and isinstance(ctx.command, MultiCommand):\n # completion for any subcommands\n choices.extend(ctx.command.list_commands(ctx))\n\n for item in choices:\n if item.startswith(incomplete):\n yield item\n\n\ndef do_complete(cli, prog_name):\n cwords = split_arg_string(os.environ['COMP_WORDS'])\n cword = int(os.environ['COMP_CWORD'])\n args = cwords[1:cword]\n try:\n incomplete = cwords[cword]\n except IndexError:\n incomplete = ''\n\n for item in get_choices(cli, prog_name, args, incomplete):\n echo(item)\n\n return True\n\n\ndef bashcomplete(cli, prog_name, complete_var, complete_instr):\n if complete_instr == 'source':\n echo(get_completion_script(prog_name, complete_var))\n return True\n elif complete_instr == 'complete':\n return do_complete(cli, prog_name)\n return False\n", "path": "click/_bashcomplete.py"}]}
2,330
105
gh_patches_debug_20325
rasdani/github-patches
git_diff
dask__distributed-3253
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cannot set WRITEABLE flag to True of this array My colleague is seeing the following error on distirbuted 2.8.0. It looks related to #3180 and this set of lines https://github.com/dask/distributed/blob/35551998d7350cd5ae6a5c24970d8437fd8d521d/distributed/protocol/numpy.py#L110-L114 ``` distributed.worker - INFO - Start worker at: tcp://10.12.205.19:34307 distributed.worker - INFO - Listening to: tcp://10.12.205.19:34307 distributed.worker - INFO - dashboard at: 10.12.205.19:34094 distributed.worker - INFO - Waiting to connect to: tcp://128.117.181.211:37309 distributed.worker - INFO - ------------------------------------------------- distributed.worker - INFO - Threads: 1 distributed.worker - INFO - Memory: 25.00 GB distributed.worker - INFO - Local Directory: /glade/scratch/deppenme/dask-tmp/worker-k05sxxku distributed.worker - INFO - ------------------------------------------------- distributed.worker - INFO - Registered to: tcp://128.117.181.211:37309 distributed.worker - INFO - ------------------------------------------------- distributed.worker - ERROR - cannot set WRITEABLE flag to True of this array Traceback (most recent call last): File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/worker.py", line 894, in handle_scheduler comm, every_cycle=[self.ensure_communicating, self.ensure_computing] File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/core.py", line 447, in handle_stream msgs = await comm.read() File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/comm/tcp.py", line 208, in read frames, deserialize=self.deserialize, deserializers=deserializers File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/comm/utils.py", line 63, in from_frames res = await offload(_from_frames) File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/utils.py", line 1371, in offload return await loop.run_in_executor(_offload_executor, fn, *args, **kwargs) File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/concurrent/futures/thread.py", line 57, in run result = self.fn(*self.args, **self.kwargs) File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/comm/utils.py", line 51, in _from_frames frames, deserialize=deserialize, deserializers=deserializers File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/protocol/core.py", line 124, in loads value = _deserialize(head, fs, deserializers=deserializers) File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/protocol/serialize.py", line 268, in deserialize return loads(header, frames) File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/protocol/serialize.py", line 54, in dask_loads return loads(header, frames) File "/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/protocol/numpy.py", line 113, in deserialize_numpy_ndarray x.setflags(write=writeable) ValueError: cannot set WRITEABLE flag to True of this array ``` This is coming out of a complicated analysis pipeline with xarray, zarr and dask so we don't have a minimal example yet. We could work to find one if you have some pointers on what to look for. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `distributed/protocol/numpy.py` Content: ``` 1 import math 2 import numpy as np 3 4 from .utils import frame_split_size, merge_frames 5 from .serialize import dask_serialize, dask_deserialize 6 from . import pickle 7 8 from ..utils import log_errors 9 10 11 def itemsize(dt): 12 """ Itemsize of dtype 13 14 Try to return the itemsize of the base element, return 8 as a fallback 15 """ 16 result = dt.base.itemsize 17 if result > 255: 18 result = 8 19 return result 20 21 22 @dask_serialize.register(np.ndarray) 23 def serialize_numpy_ndarray(x): 24 if x.dtype.hasobject: 25 header = {"pickle": True} 26 frames = [pickle.dumps(x)] 27 return header, frames 28 29 # We cannot blindly pickle the dtype as some may fail pickling, 30 # so we have a mixture of strategies. 31 if x.dtype.kind == "V": 32 # Preserving all the information works best when pickling 33 try: 34 # Only use stdlib pickle as cloudpickle is slow when failing 35 # (microseconds instead of nanoseconds) 36 dt = (1, pickle.pickle.dumps(x.dtype)) 37 pickle.loads(dt[1]) # does it unpickle fine? 38 except Exception: 39 # dtype fails pickling => fall back on the descr if reasonable. 40 if x.dtype.type is not np.void or x.dtype.alignment != 1: 41 raise 42 else: 43 dt = (0, x.dtype.descr) 44 else: 45 dt = (0, x.dtype.str) 46 47 # Only serialize non-broadcasted data for arrays with zero strided axes 48 if 0 in x.strides: 49 broadcast_to = (x.shape, x.flags.writeable) 50 x = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)] 51 else: 52 broadcast_to = None 53 54 if not x.shape: 55 # 0d array 56 strides = x.strides 57 data = x.ravel() 58 elif x.flags.c_contiguous or x.flags.f_contiguous: 59 # Avoid a copy and respect order when unserializing 60 strides = x.strides 61 data = x.ravel(order="K") 62 else: 63 x = np.ascontiguousarray(x) 64 strides = x.strides 65 data = x.ravel() 66 67 if data.dtype.fields or data.dtype.itemsize > 8: 68 data = data.view("u%d" % math.gcd(x.dtype.itemsize, 8)) 69 70 try: 71 data = data.data 72 except ValueError: 73 # "ValueError: cannot include dtype 'M' in a buffer" 74 data = data.view("u%d" % math.gcd(x.dtype.itemsize, 8)).data 75 76 header = {"dtype": dt, "shape": x.shape, "strides": strides} 77 78 if broadcast_to is not None: 79 header["broadcast_to"] = broadcast_to 80 81 if x.nbytes > 1e5: 82 frames = frame_split_size([data]) 83 else: 84 frames = [data] 85 86 header["lengths"] = [x.nbytes] 87 88 return header, frames 89 90 91 @dask_deserialize.register(np.ndarray) 92 def deserialize_numpy_ndarray(header, frames): 93 with log_errors(): 94 if len(frames) > 1: 95 frames = merge_frames(header, frames) 96 97 if header.get("pickle"): 98 return pickle.loads(frames[0]) 99 100 is_custom, dt = header["dtype"] 101 if is_custom: 102 dt = pickle.loads(dt) 103 else: 104 dt = np.dtype(dt) 105 106 x = np.ndarray( 107 header["shape"], dtype=dt, buffer=frames[0], strides=header["strides"] 108 ) 109 110 if header.get("broadcast_to"): 111 shape, writeable = header["broadcast_to"] 112 x = np.broadcast_to(x, shape) 113 x.setflags(write=writeable) 114 115 return x 116 117 118 @dask_serialize.register(np.ma.core.MaskedConstant) 119 def serialize_numpy_ma_masked(x): 120 return {}, [] 121 122 123 @dask_deserialize.register(np.ma.core.MaskedConstant) 124 def deserialize_numpy_ma_masked(header, frames): 125 return np.ma.masked 126 127 128 @dask_serialize.register(np.ma.core.MaskedArray) 129 def serialize_numpy_maskedarray(x): 130 data_header, frames = serialize_numpy_ndarray(x.data) 131 header = {"data-header": data_header, "nframes": len(frames)} 132 133 # Serialize mask if present 134 if x.mask is not np.ma.nomask: 135 mask_header, mask_frames = serialize_numpy_ndarray(x.mask) 136 header["mask-header"] = mask_header 137 frames += mask_frames 138 139 # Only a few dtypes have python equivalents msgpack can serialize 140 if isinstance(x.fill_value, (np.integer, np.floating, np.bool_)): 141 serialized_fill_value = (False, x.fill_value.item()) 142 else: 143 serialized_fill_value = (True, pickle.dumps(x.fill_value)) 144 header["fill-value"] = serialized_fill_value 145 146 return header, frames 147 148 149 @dask_deserialize.register(np.ma.core.MaskedArray) 150 def deserialize_numpy_maskedarray(header, frames): 151 data_header = header["data-header"] 152 data_frames = frames[: header["nframes"]] 153 data = deserialize_numpy_ndarray(data_header, data_frames) 154 155 if "mask-header" in header: 156 mask_header = header["mask-header"] 157 mask_frames = frames[header["nframes"] :] 158 mask = deserialize_numpy_ndarray(mask_header, mask_frames) 159 else: 160 mask = np.ma.nomask 161 162 pickled_fv, fill_value = header["fill-value"] 163 if pickled_fv: 164 fill_value = pickle.loads(fill_value) 165 166 return np.ma.masked_array(data, mask=mask, fill_value=fill_value) 167 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/distributed/protocol/numpy.py b/distributed/protocol/numpy.py --- a/distributed/protocol/numpy.py +++ b/distributed/protocol/numpy.py @@ -46,7 +46,7 @@ # Only serialize non-broadcasted data for arrays with zero strided axes if 0 in x.strides: - broadcast_to = (x.shape, x.flags.writeable) + broadcast_to = x.shape x = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)] else: broadcast_to = None @@ -103,14 +103,12 @@ else: dt = np.dtype(dt) - x = np.ndarray( - header["shape"], dtype=dt, buffer=frames[0], strides=header["strides"] - ) - if header.get("broadcast_to"): - shape, writeable = header["broadcast_to"] - x = np.broadcast_to(x, shape) - x.setflags(write=writeable) + shape = header["broadcast_to"] + else: + shape = header["shape"] + + x = np.ndarray(shape, dtype=dt, buffer=frames[0], strides=header["strides"]) return x
{"golden_diff": "diff --git a/distributed/protocol/numpy.py b/distributed/protocol/numpy.py\n--- a/distributed/protocol/numpy.py\n+++ b/distributed/protocol/numpy.py\n@@ -46,7 +46,7 @@\n \n # Only serialize non-broadcasted data for arrays with zero strided axes\n if 0 in x.strides:\n- broadcast_to = (x.shape, x.flags.writeable)\n+ broadcast_to = x.shape\n x = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)]\n else:\n broadcast_to = None\n@@ -103,14 +103,12 @@\n else:\n dt = np.dtype(dt)\n \n- x = np.ndarray(\n- header[\"shape\"], dtype=dt, buffer=frames[0], strides=header[\"strides\"]\n- )\n-\n if header.get(\"broadcast_to\"):\n- shape, writeable = header[\"broadcast_to\"]\n- x = np.broadcast_to(x, shape)\n- x.setflags(write=writeable)\n+ shape = header[\"broadcast_to\"]\n+ else:\n+ shape = header[\"shape\"]\n+\n+ x = np.ndarray(shape, dtype=dt, buffer=frames[0], strides=header[\"strides\"])\n \n return x\n", "issue": "cannot set WRITEABLE flag to True of this array\nMy colleague is seeing the following error on distirbuted 2.8.0. It looks related to #3180 and this set of lines\r\n\r\nhttps://github.com/dask/distributed/blob/35551998d7350cd5ae6a5c24970d8437fd8d521d/distributed/protocol/numpy.py#L110-L114\r\n\r\n\r\n```\r\ndistributed.worker - INFO - Start worker at: tcp://10.12.205.19:34307\r\n\r\ndistributed.worker - INFO - Listening to: tcp://10.12.205.19:34307\r\n\r\ndistributed.worker - INFO - dashboard at: 10.12.205.19:34094\r\n\r\ndistributed.worker - INFO - Waiting to connect to: tcp://128.117.181.211:37309\r\n\r\ndistributed.worker - INFO - -------------------------------------------------\r\n\r\ndistributed.worker - INFO - Threads: 1\r\n\r\ndistributed.worker - INFO - Memory: 25.00 GB\r\n\r\ndistributed.worker - INFO - Local Directory: /glade/scratch/deppenme/dask-tmp/worker-k05sxxku\r\n\r\ndistributed.worker - INFO - -------------------------------------------------\r\n\r\ndistributed.worker - INFO - Registered to: tcp://128.117.181.211:37309\r\n\r\ndistributed.worker - INFO - -------------------------------------------------\r\n\r\ndistributed.worker - ERROR - cannot set WRITEABLE flag to True of this array \r\n\r\nTraceback (most recent call last): File \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/worker.py\", line 894, in handle_scheduler comm, every_cycle=[self.ensure_communicating, self.ensure_computing] File \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/core.py\", line 447, in handle_stream msgs = await comm.read() File \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 208, in read frames, deserialize=self.deserialize, deserializers=deserializers \r\nFile \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/comm/utils.py\", line 63, in from_frames res = await offload(_from_frames) File \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/utils.py\", line 1371, in offload return await loop.run_in_executor(_offload_executor, fn, *args, **kwargs) File \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/concurrent/futures/thread.py\", line 57, in run result = self.fn(*self.args, **self.kwargs) \r\nFile \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/comm/utils.py\", line 51, in _from_frames frames, deserialize=deserialize, deserializers=deserializers File \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/protocol/core.py\", line 124, in loads value = _deserialize(head, fs, deserializers=deserializers) \r\nFile \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/protocol/serialize.py\", line 268, in deserialize return loads(header, frames) \r\nFile \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/protocol/serialize.py\", line 54, in dask_loads return loads(header, frames) \r\nFile \"/glade/work/deppenme/miniconda3/envs/analysis/lib/python3.7/site-packages/distributed/protocol/numpy.py\",\r\n line 113, in deserialize_numpy_ndarray x.setflags(write=writeable) ValueError: cannot set WRITEABLE flag to True of this array\r\n```\r\n\r\nThis is coming out of a complicated analysis pipeline with xarray, zarr and dask so we don't have a minimal example yet. We could work to find one if you have some pointers on what to look for.\n", "before_files": [{"content": "import math\nimport numpy as np\n\nfrom .utils import frame_split_size, merge_frames\nfrom .serialize import dask_serialize, dask_deserialize\nfrom . import pickle\n\nfrom ..utils import log_errors\n\n\ndef itemsize(dt):\n \"\"\" Itemsize of dtype\n\n Try to return the itemsize of the base element, return 8 as a fallback\n \"\"\"\n result = dt.base.itemsize\n if result > 255:\n result = 8\n return result\n\n\n@dask_serialize.register(np.ndarray)\ndef serialize_numpy_ndarray(x):\n if x.dtype.hasobject:\n header = {\"pickle\": True}\n frames = [pickle.dumps(x)]\n return header, frames\n\n # We cannot blindly pickle the dtype as some may fail pickling,\n # so we have a mixture of strategies.\n if x.dtype.kind == \"V\":\n # Preserving all the information works best when pickling\n try:\n # Only use stdlib pickle as cloudpickle is slow when failing\n # (microseconds instead of nanoseconds)\n dt = (1, pickle.pickle.dumps(x.dtype))\n pickle.loads(dt[1]) # does it unpickle fine?\n except Exception:\n # dtype fails pickling => fall back on the descr if reasonable.\n if x.dtype.type is not np.void or x.dtype.alignment != 1:\n raise\n else:\n dt = (0, x.dtype.descr)\n else:\n dt = (0, x.dtype.str)\n\n # Only serialize non-broadcasted data for arrays with zero strided axes\n if 0 in x.strides:\n broadcast_to = (x.shape, x.flags.writeable)\n x = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)]\n else:\n broadcast_to = None\n\n if not x.shape:\n # 0d array\n strides = x.strides\n data = x.ravel()\n elif x.flags.c_contiguous or x.flags.f_contiguous:\n # Avoid a copy and respect order when unserializing\n strides = x.strides\n data = x.ravel(order=\"K\")\n else:\n x = np.ascontiguousarray(x)\n strides = x.strides\n data = x.ravel()\n\n if data.dtype.fields or data.dtype.itemsize > 8:\n data = data.view(\"u%d\" % math.gcd(x.dtype.itemsize, 8))\n\n try:\n data = data.data\n except ValueError:\n # \"ValueError: cannot include dtype 'M' in a buffer\"\n data = data.view(\"u%d\" % math.gcd(x.dtype.itemsize, 8)).data\n\n header = {\"dtype\": dt, \"shape\": x.shape, \"strides\": strides}\n\n if broadcast_to is not None:\n header[\"broadcast_to\"] = broadcast_to\n\n if x.nbytes > 1e5:\n frames = frame_split_size([data])\n else:\n frames = [data]\n\n header[\"lengths\"] = [x.nbytes]\n\n return header, frames\n\n\n@dask_deserialize.register(np.ndarray)\ndef deserialize_numpy_ndarray(header, frames):\n with log_errors():\n if len(frames) > 1:\n frames = merge_frames(header, frames)\n\n if header.get(\"pickle\"):\n return pickle.loads(frames[0])\n\n is_custom, dt = header[\"dtype\"]\n if is_custom:\n dt = pickle.loads(dt)\n else:\n dt = np.dtype(dt)\n\n x = np.ndarray(\n header[\"shape\"], dtype=dt, buffer=frames[0], strides=header[\"strides\"]\n )\n\n if header.get(\"broadcast_to\"):\n shape, writeable = header[\"broadcast_to\"]\n x = np.broadcast_to(x, shape)\n x.setflags(write=writeable)\n\n return x\n\n\n@dask_serialize.register(np.ma.core.MaskedConstant)\ndef serialize_numpy_ma_masked(x):\n return {}, []\n\n\n@dask_deserialize.register(np.ma.core.MaskedConstant)\ndef deserialize_numpy_ma_masked(header, frames):\n return np.ma.masked\n\n\n@dask_serialize.register(np.ma.core.MaskedArray)\ndef serialize_numpy_maskedarray(x):\n data_header, frames = serialize_numpy_ndarray(x.data)\n header = {\"data-header\": data_header, \"nframes\": len(frames)}\n\n # Serialize mask if present\n if x.mask is not np.ma.nomask:\n mask_header, mask_frames = serialize_numpy_ndarray(x.mask)\n header[\"mask-header\"] = mask_header\n frames += mask_frames\n\n # Only a few dtypes have python equivalents msgpack can serialize\n if isinstance(x.fill_value, (np.integer, np.floating, np.bool_)):\n serialized_fill_value = (False, x.fill_value.item())\n else:\n serialized_fill_value = (True, pickle.dumps(x.fill_value))\n header[\"fill-value\"] = serialized_fill_value\n\n return header, frames\n\n\n@dask_deserialize.register(np.ma.core.MaskedArray)\ndef deserialize_numpy_maskedarray(header, frames):\n data_header = header[\"data-header\"]\n data_frames = frames[: header[\"nframes\"]]\n data = deserialize_numpy_ndarray(data_header, data_frames)\n\n if \"mask-header\" in header:\n mask_header = header[\"mask-header\"]\n mask_frames = frames[header[\"nframes\"] :]\n mask = deserialize_numpy_ndarray(mask_header, mask_frames)\n else:\n mask = np.ma.nomask\n\n pickled_fv, fill_value = header[\"fill-value\"]\n if pickled_fv:\n fill_value = pickle.loads(fill_value)\n\n return np.ma.masked_array(data, mask=mask, fill_value=fill_value)\n", "path": "distributed/protocol/numpy.py"}], "after_files": [{"content": "import math\nimport numpy as np\n\nfrom .utils import frame_split_size, merge_frames\nfrom .serialize import dask_serialize, dask_deserialize\nfrom . import pickle\n\nfrom ..utils import log_errors\n\n\ndef itemsize(dt):\n \"\"\" Itemsize of dtype\n\n Try to return the itemsize of the base element, return 8 as a fallback\n \"\"\"\n result = dt.base.itemsize\n if result > 255:\n result = 8\n return result\n\n\n@dask_serialize.register(np.ndarray)\ndef serialize_numpy_ndarray(x):\n if x.dtype.hasobject:\n header = {\"pickle\": True}\n frames = [pickle.dumps(x)]\n return header, frames\n\n # We cannot blindly pickle the dtype as some may fail pickling,\n # so we have a mixture of strategies.\n if x.dtype.kind == \"V\":\n # Preserving all the information works best when pickling\n try:\n # Only use stdlib pickle as cloudpickle is slow when failing\n # (microseconds instead of nanoseconds)\n dt = (1, pickle.pickle.dumps(x.dtype))\n pickle.loads(dt[1]) # does it unpickle fine?\n except Exception:\n # dtype fails pickling => fall back on the descr if reasonable.\n if x.dtype.type is not np.void or x.dtype.alignment != 1:\n raise\n else:\n dt = (0, x.dtype.descr)\n else:\n dt = (0, x.dtype.str)\n\n # Only serialize non-broadcasted data for arrays with zero strided axes\n if 0 in x.strides:\n broadcast_to = x.shape\n x = x[tuple(slice(None) if s != 0 else slice(1) for s in x.strides)]\n else:\n broadcast_to = None\n\n if not x.shape:\n # 0d array\n strides = x.strides\n data = x.ravel()\n elif x.flags.c_contiguous or x.flags.f_contiguous:\n # Avoid a copy and respect order when unserializing\n strides = x.strides\n data = x.ravel(order=\"K\")\n else:\n x = np.ascontiguousarray(x)\n strides = x.strides\n data = x.ravel()\n\n if data.dtype.fields or data.dtype.itemsize > 8:\n data = data.view(\"u%d\" % math.gcd(x.dtype.itemsize, 8))\n\n try:\n data = data.data\n except ValueError:\n # \"ValueError: cannot include dtype 'M' in a buffer\"\n data = data.view(\"u%d\" % math.gcd(x.dtype.itemsize, 8)).data\n\n header = {\"dtype\": dt, \"shape\": x.shape, \"strides\": strides}\n\n if broadcast_to is not None:\n header[\"broadcast_to\"] = broadcast_to\n\n if x.nbytes > 1e5:\n frames = frame_split_size([data])\n else:\n frames = [data]\n\n header[\"lengths\"] = [x.nbytes]\n\n return header, frames\n\n\n@dask_deserialize.register(np.ndarray)\ndef deserialize_numpy_ndarray(header, frames):\n with log_errors():\n if len(frames) > 1:\n frames = merge_frames(header, frames)\n\n if header.get(\"pickle\"):\n return pickle.loads(frames[0])\n\n is_custom, dt = header[\"dtype\"]\n if is_custom:\n dt = pickle.loads(dt)\n else:\n dt = np.dtype(dt)\n\n if header.get(\"broadcast_to\"):\n shape = header[\"broadcast_to\"]\n else:\n shape = header[\"shape\"]\n\n x = np.ndarray(shape, dtype=dt, buffer=frames[0], strides=header[\"strides\"])\n\n return x\n\n\n@dask_serialize.register(np.ma.core.MaskedConstant)\ndef serialize_numpy_ma_masked(x):\n return {}, []\n\n\n@dask_deserialize.register(np.ma.core.MaskedConstant)\ndef deserialize_numpy_ma_masked(header, frames):\n return np.ma.masked\n\n\n@dask_serialize.register(np.ma.core.MaskedArray)\ndef serialize_numpy_maskedarray(x):\n data_header, frames = serialize_numpy_ndarray(x.data)\n header = {\"data-header\": data_header, \"nframes\": len(frames)}\n\n # Serialize mask if present\n if x.mask is not np.ma.nomask:\n mask_header, mask_frames = serialize_numpy_ndarray(x.mask)\n header[\"mask-header\"] = mask_header\n frames += mask_frames\n\n # Only a few dtypes have python equivalents msgpack can serialize\n if isinstance(x.fill_value, (np.integer, np.floating, np.bool_)):\n serialized_fill_value = (False, x.fill_value.item())\n else:\n serialized_fill_value = (True, pickle.dumps(x.fill_value))\n header[\"fill-value\"] = serialized_fill_value\n\n return header, frames\n\n\n@dask_deserialize.register(np.ma.core.MaskedArray)\ndef deserialize_numpy_maskedarray(header, frames):\n data_header = header[\"data-header\"]\n data_frames = frames[: header[\"nframes\"]]\n data = deserialize_numpy_ndarray(data_header, data_frames)\n\n if \"mask-header\" in header:\n mask_header = header[\"mask-header\"]\n mask_frames = frames[header[\"nframes\"] :]\n mask = deserialize_numpy_ndarray(mask_header, mask_frames)\n else:\n mask = np.ma.nomask\n\n pickled_fv, fill_value = header[\"fill-value\"]\n if pickled_fv:\n fill_value = pickle.loads(fill_value)\n\n return np.ma.masked_array(data, mask=mask, fill_value=fill_value)\n", "path": "distributed/protocol/numpy.py"}]}
2,892
290
gh_patches_debug_22167
rasdani/github-patches
git_diff
cupy__cupy-3159
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- compatibility issue of `erfinv` and `erfcinv` For `erfinv`, the valid domain is [-1, 1], and at the boundary -1 and +1 the values are -Inf and +Inf, respectively. But outside the boundary, the values are NaN in SciPy: see [here](https://github.com/scipy/scipy/blob/59347ae8b86bcc92c339efe213128f64ab6df98c/scipy/special/cephes/ndtri.c#L146-L149) (the `ndtri` function is the underlying workhorse). Reproducer: ```python >>> from cupyx.scipy.special import erfinv >>> import cupy as cp >>> >>> a = (cp.arange(6) + 1).reshape(2,3) >>> a array([[1, 2, 3], [4, 5, 6]]) >>> erfinv(a) array([[inf, inf, inf], [inf, inf, inf]]) >>> >>> import scipy.special as scp >>> scp.erfinv(cp.asnumpy(a)) array([[inf, nan, nan], [nan, nan, nan]]) ``` Reproducer 2: ```bash $ pytest -v tests/cupyx_tests/scipy_tests/special_tests/test_erf.py ========================================================================= test session starts ========================================================================= platform linux -- Python 3.7.6, pytest-5.3.5, py-1.8.1, pluggy-0.12.0 -- /home/leofang/miniconda3/envs/cupy_dev/bin/python cachedir: .pytest_cache rootdir: /home/leofang/cupy, inifile: setup.cfg collected 10 items tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erf PASSED [ 10%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erfc PASSED [ 20%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erfcinv FAILED [ 30%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erfcx PASSED [ 40%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erfinv FAILED [ 50%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erf PASSED [ 60%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erfc PASSED [ 70%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erfcinv FAILED [ 80%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erfcx PASSED [ 90%] tests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erfinv FAILED [100%] =============================================================== 4 failed, 6 passed, 1 warning in 0.74s ================================================================ ``` I am a bit surprised to learn this, as the CI doesn't seem to complain at all, so it is likely the behavior is changed in recent SciPy? (I'm using v1.4.1 btw.) The fix should be simple: just add another `else if` branch handling the out of boundary behavior to the ufunc here : https://github.com/cupy/cupy/blob/84343ce8a87d34928abef65d8930ba590189f43f/cupyx/scipy/special/erf.py#L37-L43 I have not dug into `erfcinv` but presumably the source of error is similar. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupyx/scipy/special/erf.py` Content: ``` 1 from cupy import core 2 3 4 erf = core.create_ufunc( 5 'cupyx_scipy_erf', ('f->f', 'd->d'), 6 'out0 = erf(in0)', 7 doc='''Error function. 8 9 .. seealso:: :meth:`scipy.special.erf` 10 11 ''') 12 13 14 erfc = core.create_ufunc( 15 'cupyx_scipy_erfc', ('f->f', 'd->d'), 16 'out0 = erfc(in0)', 17 doc='''Complementary error function. 18 19 .. seealso:: :meth:`scipy.special.erfc` 20 21 ''') 22 23 24 erfcx = core.create_ufunc( 25 'cupyx_scipy_erfcx', ('f->f', 'd->d'), 26 'out0 = erfcx(in0)', 27 doc='''Scaled complementary error function. 28 29 .. seealso:: :meth:`scipy.special.erfcx` 30 31 ''') 32 33 34 erfinv = core.create_ufunc( 35 'cupyx_scipy_erfinv', ('f->f', 'd->d'), 36 ''' 37 if (in0 < -1) { 38 out0 = -1.0 / 0.0; 39 } else if (in0 > 1) { 40 out0 = 1.0 / 0.0; 41 } else { 42 out0 = erfinv(in0); 43 } 44 ''', 45 doc='''Inverse function of error function. 46 47 .. seealso:: :meth:`scipy.special.erfinv` 48 49 ''') 50 51 52 erfcinv = core.create_ufunc( 53 'cupyx_scipy_erfcinv', ('f->f', 'd->d'), 54 ''' 55 if (in0 < 0) { 56 out0 = 1.0 / 0.0; 57 } else if (in0 > 2) { 58 out0 = -1.0 / 0.0; 59 } else { 60 out0 = erfcinv(in0); 61 } 62 ''', 63 doc='''Inverse function of complementary error function. 64 65 .. seealso:: :meth:`scipy.special.erfcinv` 66 67 ''') 68 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupyx/scipy/special/erf.py b/cupyx/scipy/special/erf.py --- a/cupyx/scipy/special/erf.py +++ b/cupyx/scipy/special/erf.py @@ -33,35 +33,27 @@ erfinv = core.create_ufunc( 'cupyx_scipy_erfinv', ('f->f', 'd->d'), - ''' - if (in0 < -1) { - out0 = -1.0 / 0.0; - } else if (in0 > 1) { - out0 = 1.0 / 0.0; - } else { - out0 = erfinv(in0); - } - ''', + 'out0 = erfinv(in0);', doc='''Inverse function of error function. .. seealso:: :meth:`scipy.special.erfinv` + .. note:: + The behavior close to (and outside) the domain follows that of + SciPy v1.4.0+. + ''') erfcinv = core.create_ufunc( 'cupyx_scipy_erfcinv', ('f->f', 'd->d'), - ''' - if (in0 < 0) { - out0 = 1.0 / 0.0; - } else if (in0 > 2) { - out0 = -1.0 / 0.0; - } else { - out0 = erfcinv(in0); - } - ''', + 'out0 = erfcinv(in0);', doc='''Inverse function of complementary error function. .. seealso:: :meth:`scipy.special.erfcinv` + .. note:: + The behavior close to (and outside) the domain follows that of + SciPy v1.4.0+. + ''')
{"golden_diff": "diff --git a/cupyx/scipy/special/erf.py b/cupyx/scipy/special/erf.py\n--- a/cupyx/scipy/special/erf.py\n+++ b/cupyx/scipy/special/erf.py\n@@ -33,35 +33,27 @@\n \n erfinv = core.create_ufunc(\n 'cupyx_scipy_erfinv', ('f->f', 'd->d'),\n- '''\n- if (in0 < -1) {\n- out0 = -1.0 / 0.0;\n- } else if (in0 > 1) {\n- out0 = 1.0 / 0.0;\n- } else {\n- out0 = erfinv(in0);\n- }\n- ''',\n+ 'out0 = erfinv(in0);',\n doc='''Inverse function of error function.\n \n .. seealso:: :meth:`scipy.special.erfinv`\n \n+ .. note::\n+ The behavior close to (and outside) the domain follows that of\n+ SciPy v1.4.0+.\n+\n ''')\n \n \n erfcinv = core.create_ufunc(\n 'cupyx_scipy_erfcinv', ('f->f', 'd->d'),\n- '''\n- if (in0 < 0) {\n- out0 = 1.0 / 0.0;\n- } else if (in0 > 2) {\n- out0 = -1.0 / 0.0;\n- } else {\n- out0 = erfcinv(in0);\n- }\n- ''',\n+ 'out0 = erfcinv(in0);',\n doc='''Inverse function of complementary error function.\n \n .. seealso:: :meth:`scipy.special.erfcinv`\n \n+ .. note::\n+ The behavior close to (and outside) the domain follows that of\n+ SciPy v1.4.0+.\n+\n ''')\n", "issue": "compatibility issue of `erfinv` and `erfcinv` \nFor `erfinv`, the valid domain is [-1, 1], and at the boundary -1 and +1 the values are -Inf and +Inf, respectively. But outside the boundary, the values are NaN in SciPy: see [here](https://github.com/scipy/scipy/blob/59347ae8b86bcc92c339efe213128f64ab6df98c/scipy/special/cephes/ndtri.c#L146-L149) (the `ndtri` function is the underlying workhorse).\r\n\r\nReproducer:\r\n```python\r\n>>> from cupyx.scipy.special import erfinv\r\n>>> import cupy as cp\r\n>>> \r\n>>> a = (cp.arange(6) + 1).reshape(2,3)\r\n>>> a\r\narray([[1, 2, 3],\r\n [4, 5, 6]])\r\n>>> erfinv(a)\r\narray([[inf, inf, inf],\r\n [inf, inf, inf]])\r\n>>>\r\n>>> import scipy.special as scp\r\n>>> scp.erfinv(cp.asnumpy(a))\r\narray([[inf, nan, nan],\r\n [nan, nan, nan]])\r\n```\r\n\r\nReproducer 2:\r\n```bash\r\n$ pytest -v tests/cupyx_tests/scipy_tests/special_tests/test_erf.py\r\n========================================================================= test session starts =========================================================================\r\nplatform linux -- Python 3.7.6, pytest-5.3.5, py-1.8.1, pluggy-0.12.0 -- /home/leofang/miniconda3/envs/cupy_dev/bin/python\r\ncachedir: .pytest_cache\r\nrootdir: /home/leofang/cupy, inifile: setup.cfg\r\ncollected 10 items \r\n\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erf PASSED [ 10%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erfc PASSED [ 20%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erfcinv FAILED [ 30%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erfcx PASSED [ 40%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestSpecial::test_erfinv FAILED [ 50%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erf PASSED [ 60%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erfc PASSED [ 70%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erfcinv FAILED [ 80%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erfcx PASSED [ 90%]\r\ntests/cupyx_tests/scipy_tests/special_tests/test_erf.py::TestFusionSpecial::test_erfinv FAILED [100%]\r\n\r\n=============================================================== 4 failed, 6 passed, 1 warning in 0.74s ================================================================\r\n```\r\n\r\nI am a bit surprised to learn this, as the CI doesn't seem to complain at all, so it is likely the behavior is changed in recent SciPy? (I'm using v1.4.1 btw.) \r\n\r\nThe fix should be simple: just add another `else if` branch handling the out of boundary behavior to the ufunc here : https://github.com/cupy/cupy/blob/84343ce8a87d34928abef65d8930ba590189f43f/cupyx/scipy/special/erf.py#L37-L43\r\n\r\nI have not dug into `erfcinv` but presumably the source of error is similar. \n", "before_files": [{"content": "from cupy import core\n\n\nerf = core.create_ufunc(\n 'cupyx_scipy_erf', ('f->f', 'd->d'),\n 'out0 = erf(in0)',\n doc='''Error function.\n\n .. seealso:: :meth:`scipy.special.erf`\n\n ''')\n\n\nerfc = core.create_ufunc(\n 'cupyx_scipy_erfc', ('f->f', 'd->d'),\n 'out0 = erfc(in0)',\n doc='''Complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfc`\n\n ''')\n\n\nerfcx = core.create_ufunc(\n 'cupyx_scipy_erfcx', ('f->f', 'd->d'),\n 'out0 = erfcx(in0)',\n doc='''Scaled complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfcx`\n\n ''')\n\n\nerfinv = core.create_ufunc(\n 'cupyx_scipy_erfinv', ('f->f', 'd->d'),\n '''\n if (in0 < -1) {\n out0 = -1.0 / 0.0;\n } else if (in0 > 1) {\n out0 = 1.0 / 0.0;\n } else {\n out0 = erfinv(in0);\n }\n ''',\n doc='''Inverse function of error function.\n\n .. seealso:: :meth:`scipy.special.erfinv`\n\n ''')\n\n\nerfcinv = core.create_ufunc(\n 'cupyx_scipy_erfcinv', ('f->f', 'd->d'),\n '''\n if (in0 < 0) {\n out0 = 1.0 / 0.0;\n } else if (in0 > 2) {\n out0 = -1.0 / 0.0;\n } else {\n out0 = erfcinv(in0);\n }\n ''',\n doc='''Inverse function of complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfcinv`\n\n ''')\n", "path": "cupyx/scipy/special/erf.py"}], "after_files": [{"content": "from cupy import core\n\n\nerf = core.create_ufunc(\n 'cupyx_scipy_erf', ('f->f', 'd->d'),\n 'out0 = erf(in0)',\n doc='''Error function.\n\n .. seealso:: :meth:`scipy.special.erf`\n\n ''')\n\n\nerfc = core.create_ufunc(\n 'cupyx_scipy_erfc', ('f->f', 'd->d'),\n 'out0 = erfc(in0)',\n doc='''Complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfc`\n\n ''')\n\n\nerfcx = core.create_ufunc(\n 'cupyx_scipy_erfcx', ('f->f', 'd->d'),\n 'out0 = erfcx(in0)',\n doc='''Scaled complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfcx`\n\n ''')\n\n\nerfinv = core.create_ufunc(\n 'cupyx_scipy_erfinv', ('f->f', 'd->d'),\n 'out0 = erfinv(in0);',\n doc='''Inverse function of error function.\n\n .. seealso:: :meth:`scipy.special.erfinv`\n\n .. note::\n The behavior close to (and outside) the domain follows that of\n SciPy v1.4.0+.\n\n ''')\n\n\nerfcinv = core.create_ufunc(\n 'cupyx_scipy_erfcinv', ('f->f', 'd->d'),\n 'out0 = erfcinv(in0);',\n doc='''Inverse function of complementary error function.\n\n .. seealso:: :meth:`scipy.special.erfcinv`\n\n .. note::\n The behavior close to (and outside) the domain follows that of\n SciPy v1.4.0+.\n\n ''')\n", "path": "cupyx/scipy/special/erf.py"}]}
1,777
447
gh_patches_debug_17996
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-768
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong hook executed when a tilde-suffixed file of the same name exists - Cookiecutter version: 1.4.0 - Template project url: https://github.com/thorgate/django-project-template - Python version: 3.4 - Operating System: Ubuntu 15.10 wily ### Description: When using gedit or some other text editor that pollutes the directory with backup files ending with a tilde, cookiecutter mistakes that for the "real" hook it should run. This resulted in cookiecutter running a ridiculously outdated version of my pre-gen hook. The obvious solution is to just remove `pre_gen_project.py~`, which works, but I believe ideally cookiecutter shouldn't be running it in the first place. ### What I've run: ``` gedit django-template/hooks/pre_gen_project.py cookiecutter django-template ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cookiecutter/hooks.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """ 4 cookiecutter.hooks 5 ------------------ 6 7 Functions for discovering and executing various cookiecutter hooks. 8 """ 9 10 import io 11 import logging 12 import os 13 import subprocess 14 import sys 15 import tempfile 16 17 from jinja2 import Template 18 19 from cookiecutter import utils 20 from .exceptions import FailedHookException 21 22 23 _HOOKS = [ 24 'pre_gen_project', 25 'post_gen_project', 26 # TODO: other hooks should be listed here 27 ] 28 EXIT_SUCCESS = 0 29 30 31 def find_hooks(): 32 """ 33 Must be called with the project template as the current working directory. 34 Returns a dict of all hook scripts provided. 35 Dict's key will be the hook/script's name, without extension, while 36 values will be the absolute path to the script. 37 Missing scripts will not be included in the returned dict. 38 """ 39 hooks_dir = 'hooks' 40 r = {} 41 logging.debug('hooks_dir is {0}'.format(hooks_dir)) 42 if not os.path.isdir(hooks_dir): 43 logging.debug('No hooks/ dir in template_dir') 44 return r 45 for f in os.listdir(hooks_dir): 46 basename = os.path.splitext(os.path.basename(f))[0] 47 if basename in _HOOKS: 48 r[basename] = os.path.abspath(os.path.join(hooks_dir, f)) 49 return r 50 51 52 def run_script(script_path, cwd='.'): 53 """ 54 Executes a script from a working directory. 55 56 :param script_path: Absolute path to the script to run. 57 :param cwd: The directory to run the script from. 58 """ 59 run_thru_shell = sys.platform.startswith('win') 60 if script_path.endswith('.py'): 61 script_command = [sys.executable, script_path] 62 else: 63 script_command = [script_path] 64 65 utils.make_executable(script_path) 66 67 proc = subprocess.Popen( 68 script_command, 69 shell=run_thru_shell, 70 cwd=cwd 71 ) 72 exit_status = proc.wait() 73 if exit_status != EXIT_SUCCESS: 74 raise FailedHookException( 75 "Hook script failed (exit status: %d)" % exit_status) 76 77 78 def run_script_with_context(script_path, cwd, context): 79 """ 80 Executes a script after rendering with it Jinja. 81 82 :param script_path: Absolute path to the script to run. 83 :param cwd: The directory to run the script from. 84 :param context: Cookiecutter project template context. 85 """ 86 _, extension = os.path.splitext(script_path) 87 88 contents = io.open(script_path, 'r', encoding='utf-8').read() 89 90 with tempfile.NamedTemporaryFile( 91 delete=False, 92 mode='wb', 93 suffix=extension 94 ) as temp: 95 output = Template(contents).render(**context) 96 temp.write(output.encode('utf-8')) 97 98 run_script(temp.name, cwd) 99 100 101 def run_hook(hook_name, project_dir, context): 102 """ 103 Try to find and execute a hook from the specified project directory. 104 105 :param hook_name: The hook to execute. 106 :param project_dir: The directory to execute the script from. 107 :param context: Cookiecutter project context. 108 """ 109 script = find_hooks().get(hook_name) 110 if script is None: 111 logging.debug('No hooks found') 112 return 113 run_script_with_context(script, project_dir, context) 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py --- a/cookiecutter/hooks.py +++ b/cookiecutter/hooks.py @@ -37,16 +37,20 @@ Missing scripts will not be included in the returned dict. """ hooks_dir = 'hooks' - r = {} + hooks = {} logging.debug('hooks_dir is {0}'.format(hooks_dir)) + if not os.path.isdir(hooks_dir): logging.debug('No hooks/ dir in template_dir') - return r + return hooks + for f in os.listdir(hooks_dir): - basename = os.path.splitext(os.path.basename(f))[0] - if basename in _HOOKS: - r[basename] = os.path.abspath(os.path.join(hooks_dir, f)) - return r + filename = os.path.basename(f) + basename = os.path.splitext(filename)[0] + + if basename in _HOOKS and not filename.endswith('~'): + hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f)) + return hooks def run_script(script_path, cwd='.'):
{"golden_diff": "diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py\n--- a/cookiecutter/hooks.py\n+++ b/cookiecutter/hooks.py\n@@ -37,16 +37,20 @@\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n- r = {}\n+ hooks = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n+\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n- return r\n+ return hooks\n+\n for f in os.listdir(hooks_dir):\n- basename = os.path.splitext(os.path.basename(f))[0]\n- if basename in _HOOKS:\n- r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n- return r\n+ filename = os.path.basename(f)\n+ basename = os.path.splitext(filename)[0]\n+\n+ if basename in _HOOKS and not filename.endswith('~'):\n+ hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n+ return hooks\n \n \n def run_script(script_path, cwd='.'):\n", "issue": "Wrong hook executed when a tilde-suffixed file of the same name exists\n- Cookiecutter version: 1.4.0\n- Template project url: https://github.com/thorgate/django-project-template\n- Python version: 3.4\n- Operating System: Ubuntu 15.10 wily\n### Description:\n\nWhen using gedit or some other text editor that pollutes the directory with backup files ending with a tilde, cookiecutter mistakes that for the \"real\" hook it should run. This resulted in cookiecutter running a ridiculously outdated version of my pre-gen hook.\n\nThe obvious solution is to just remove `pre_gen_project.py~`, which works, but I believe ideally cookiecutter shouldn't be running it in the first place.\n### What I've run:\n\n```\ngedit django-template/hooks/pre_gen_project.py\ncookiecutter django-template\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n hooks = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return hooks\n\n for f in os.listdir(hooks_dir):\n filename = os.path.basename(f)\n basename = os.path.splitext(filename)[0]\n\n if basename in _HOOKS and not filename.endswith('~'):\n hooks[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return hooks\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}]}
1,402
256
gh_patches_debug_29442
rasdani/github-patches
git_diff
python-discord__bot-466
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Reminder details in confirmation message. The reminder command currently only replies with `Your reminder has been created successfully!`. This can be improved by providing the actual reminder contents in the confirmation and the datetime / humanized timedelta the reminder will be due to ensure the intended duration was processed. For the message sent once the reminder is due, a jump url to the original command message so we can jump back into context of the conversation when the reminder was made would be good to add. (Previously had mixed up wording for this section). This would do well as a first issue if it's not gotten to in short time. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bot/cogs/reminders.py` Content: ``` 1 import asyncio 2 import logging 3 import random 4 import textwrap 5 from datetime import datetime 6 from operator import itemgetter 7 from typing import Optional 8 9 from dateutil.relativedelta import relativedelta 10 from discord import Colour, Embed, Message 11 from discord.ext.commands import Bot, Cog, Context, group 12 13 from bot.constants import Channels, Icons, NEGATIVE_REPLIES, POSITIVE_REPLIES, STAFF_ROLES 14 from bot.converters import Duration 15 from bot.pagination import LinePaginator 16 from bot.utils.checks import without_role_check 17 from bot.utils.scheduling import Scheduler 18 from bot.utils.time import humanize_delta, wait_until 19 20 log = logging.getLogger(__name__) 21 22 WHITELISTED_CHANNELS = (Channels.bot,) 23 MAXIMUM_REMINDERS = 5 24 25 26 class Reminders(Scheduler, Cog): 27 """Provide in-channel reminder functionality.""" 28 29 def __init__(self, bot: Bot): 30 self.bot = bot 31 super().__init__() 32 33 self.bot.loop.create_task(self.reschedule_reminders()) 34 35 async def reschedule_reminders(self) -> None: 36 """Get all current reminders from the API and reschedule them.""" 37 await self.bot.wait_until_ready() 38 response = await self.bot.api_client.get( 39 'bot/reminders', 40 params={'active': 'true'} 41 ) 42 43 now = datetime.utcnow() 44 loop = asyncio.get_event_loop() 45 46 for reminder in response: 47 remind_at = datetime.fromisoformat(reminder['expiration'][:-1]) 48 49 # If the reminder is already overdue ... 50 if remind_at < now: 51 late = relativedelta(now, remind_at) 52 await self.send_reminder(reminder, late) 53 54 else: 55 self.schedule_task(loop, reminder["id"], reminder) 56 57 @staticmethod 58 async def _send_confirmation(ctx: Context, on_success: str) -> None: 59 """Send an embed confirming the reminder change was made successfully.""" 60 embed = Embed() 61 embed.colour = Colour.green() 62 embed.title = random.choice(POSITIVE_REPLIES) 63 embed.description = on_success 64 await ctx.send(embed=embed) 65 66 async def _scheduled_task(self, reminder: dict) -> None: 67 """A coroutine which sends the reminder once the time is reached, and cancels the running task.""" 68 reminder_id = reminder["id"] 69 reminder_datetime = datetime.fromisoformat(reminder['expiration'][:-1]) 70 71 # Send the reminder message once the desired duration has passed 72 await wait_until(reminder_datetime) 73 await self.send_reminder(reminder) 74 75 log.debug(f"Deleting reminder {reminder_id} (the user has been reminded).") 76 await self._delete_reminder(reminder_id) 77 78 # Now we can begone with it from our schedule list. 79 self.cancel_task(reminder_id) 80 81 async def _delete_reminder(self, reminder_id: str) -> None: 82 """Delete a reminder from the database, given its ID, and cancel the running task.""" 83 await self.bot.api_client.delete('bot/reminders/' + str(reminder_id)) 84 85 # Now we can remove it from the schedule list 86 self.cancel_task(reminder_id) 87 88 async def _reschedule_reminder(self, reminder: dict) -> None: 89 """Reschedule a reminder object.""" 90 loop = asyncio.get_event_loop() 91 92 self.cancel_task(reminder["id"]) 93 self.schedule_task(loop, reminder["id"], reminder) 94 95 async def send_reminder(self, reminder: dict, late: relativedelta = None) -> None: 96 """Send the reminder.""" 97 channel = self.bot.get_channel(reminder["channel_id"]) 98 user = self.bot.get_user(reminder["author"]) 99 100 embed = Embed() 101 embed.colour = Colour.blurple() 102 embed.set_author( 103 icon_url=Icons.remind_blurple, 104 name="It has arrived!" 105 ) 106 107 embed.description = f"Here's your reminder: `{reminder['content']}`" 108 109 if late: 110 embed.colour = Colour.red() 111 embed.set_author( 112 icon_url=Icons.remind_red, 113 name=f"Sorry it arrived {humanize_delta(late, max_units=2)} late!" 114 ) 115 116 await channel.send( 117 content=user.mention, 118 embed=embed 119 ) 120 await self._delete_reminder(reminder["id"]) 121 122 @group(name="remind", aliases=("reminder", "reminders"), invoke_without_command=True) 123 async def remind_group(self, ctx: Context, expiration: Duration, *, content: str) -> None: 124 """Commands for managing your reminders.""" 125 await ctx.invoke(self.new_reminder, expiration=expiration, content=content) 126 127 @remind_group.command(name="new", aliases=("add", "create")) 128 async def new_reminder(self, ctx: Context, expiration: Duration, *, content: str) -> Optional[Message]: 129 """ 130 Set yourself a simple reminder. 131 132 Expiration is parsed per: http://strftime.org/ 133 """ 134 embed = Embed() 135 136 # If the user is not staff, we need to verify whether or not to make a reminder at all. 137 if without_role_check(ctx, *STAFF_ROLES): 138 139 # If they don't have permission to set a reminder in this channel 140 if ctx.channel.id not in WHITELISTED_CHANNELS: 141 embed.colour = Colour.red() 142 embed.title = random.choice(NEGATIVE_REPLIES) 143 embed.description = "Sorry, you can't do that here!" 144 145 return await ctx.send(embed=embed) 146 147 # Get their current active reminders 148 active_reminders = await self.bot.api_client.get( 149 'bot/reminders', 150 params={ 151 'author__id': str(ctx.author.id) 152 } 153 ) 154 155 # Let's limit this, so we don't get 10 000 156 # reminders from kip or something like that :P 157 if len(active_reminders) > MAXIMUM_REMINDERS: 158 embed.colour = Colour.red() 159 embed.title = random.choice(NEGATIVE_REPLIES) 160 embed.description = "You have too many active reminders!" 161 162 return await ctx.send(embed=embed) 163 164 # Now we can attempt to actually set the reminder. 165 reminder = await self.bot.api_client.post( 166 'bot/reminders', 167 json={ 168 'author': ctx.author.id, 169 'channel_id': ctx.message.channel.id, 170 'content': content, 171 'expiration': expiration.isoformat() 172 } 173 ) 174 175 # Confirm to the user that it worked. 176 await self._send_confirmation( 177 ctx, on_success="Your reminder has been created successfully!" 178 ) 179 180 loop = asyncio.get_event_loop() 181 self.schedule_task(loop, reminder["id"], reminder) 182 183 @remind_group.command(name="list") 184 async def list_reminders(self, ctx: Context) -> Optional[Message]: 185 """View a paginated embed of all reminders for your user.""" 186 # Get all the user's reminders from the database. 187 data = await self.bot.api_client.get( 188 'bot/reminders', 189 params={'author__id': str(ctx.author.id)} 190 ) 191 192 now = datetime.utcnow() 193 194 # Make a list of tuples so it can be sorted by time. 195 reminders = sorted( 196 ( 197 (rem['content'], rem['expiration'], rem['id']) 198 for rem in data 199 ), 200 key=itemgetter(1) 201 ) 202 203 lines = [] 204 205 for content, remind_at, id_ in reminders: 206 # Parse and humanize the time, make it pretty :D 207 remind_datetime = datetime.fromisoformat(remind_at[:-1]) 208 time = humanize_delta(relativedelta(remind_datetime, now)) 209 210 text = textwrap.dedent(f""" 211 **Reminder #{id_}:** *expires in {time}* (ID: {id_}) 212 {content} 213 """).strip() 214 215 lines.append(text) 216 217 embed = Embed() 218 embed.colour = Colour.blurple() 219 embed.title = f"Reminders for {ctx.author}" 220 221 # Remind the user that they have no reminders :^) 222 if not lines: 223 embed.description = "No active reminders could be found." 224 return await ctx.send(embed=embed) 225 226 # Construct the embed and paginate it. 227 embed.colour = Colour.blurple() 228 229 await LinePaginator.paginate( 230 lines, 231 ctx, embed, 232 max_lines=3, 233 empty=True 234 ) 235 236 @remind_group.group(name="edit", aliases=("change", "modify"), invoke_without_command=True) 237 async def edit_reminder_group(self, ctx: Context) -> None: 238 """Commands for modifying your current reminders.""" 239 await ctx.invoke(self.bot.get_command("help"), "reminders", "edit") 240 241 @edit_reminder_group.command(name="duration", aliases=("time",)) 242 async def edit_reminder_duration(self, ctx: Context, id_: int, expiration: Duration) -> None: 243 """ 244 Edit one of your reminder's expiration. 245 246 Expiration is parsed per: http://strftime.org/ 247 """ 248 # Send the request to update the reminder in the database 249 reminder = await self.bot.api_client.patch( 250 'bot/reminders/' + str(id_), 251 json={'expiration': expiration.isoformat()} 252 ) 253 254 # Send a confirmation message to the channel 255 await self._send_confirmation( 256 ctx, on_success="That reminder has been edited successfully!" 257 ) 258 259 await self._reschedule_reminder(reminder) 260 261 @edit_reminder_group.command(name="content", aliases=("reason",)) 262 async def edit_reminder_content(self, ctx: Context, id_: int, *, content: str) -> None: 263 """Edit one of your reminder's content.""" 264 # Send the request to update the reminder in the database 265 reminder = await self.bot.api_client.patch( 266 'bot/reminders/' + str(id_), 267 json={'content': content} 268 ) 269 270 # Send a confirmation message to the channel 271 await self._send_confirmation( 272 ctx, on_success="That reminder has been edited successfully!" 273 ) 274 await self._reschedule_reminder(reminder) 275 276 @remind_group.command("delete", aliases=("remove",)) 277 async def delete_reminder(self, ctx: Context, id_: int) -> None: 278 """Delete one of your active reminders.""" 279 await self._delete_reminder(id_) 280 await self._send_confirmation( 281 ctx, on_success="That reminder has been deleted successfully!" 282 ) 283 284 285 def setup(bot: Bot) -> None: 286 """Reminders cog load.""" 287 bot.add_cog(Reminders(bot)) 288 log.info("Cog loaded: Reminders") 289 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bot/cogs/reminders.py b/bot/cogs/reminders.py --- a/bot/cogs/reminders.py +++ b/bot/cogs/reminders.py @@ -2,7 +2,7 @@ import logging import random import textwrap -from datetime import datetime +from datetime import datetime, timedelta from operator import itemgetter from typing import Optional @@ -104,7 +104,10 @@ name="It has arrived!" ) - embed.description = f"Here's your reminder: `{reminder['content']}`" + embed.description = f"Here's your reminder: `{reminder['content']}`." + + if reminder.get("jump_url"): # keep backward compatibility + embed.description += f"\n[Jump back to when you created the reminder]({reminder['jump_url']})" if late: embed.colour = Colour.red() @@ -167,14 +170,18 @@ json={ 'author': ctx.author.id, 'channel_id': ctx.message.channel.id, + 'jump_url': ctx.message.jump_url, 'content': content, 'expiration': expiration.isoformat() } ) + now = datetime.utcnow() - timedelta(seconds=1) + # Confirm to the user that it worked. await self._send_confirmation( - ctx, on_success="Your reminder has been created successfully!" + ctx, + on_success=f"Your reminder will arrive in {humanize_delta(relativedelta(expiration, now))}!" ) loop = asyncio.get_event_loop()
{"golden_diff": "diff --git a/bot/cogs/reminders.py b/bot/cogs/reminders.py\n--- a/bot/cogs/reminders.py\n+++ b/bot/cogs/reminders.py\n@@ -2,7 +2,7 @@\n import logging\n import random\n import textwrap\n-from datetime import datetime\n+from datetime import datetime, timedelta\n from operator import itemgetter\n from typing import Optional\n \n@@ -104,7 +104,10 @@\n name=\"It has arrived!\"\n )\n \n- embed.description = f\"Here's your reminder: `{reminder['content']}`\"\n+ embed.description = f\"Here's your reminder: `{reminder['content']}`.\"\n+\n+ if reminder.get(\"jump_url\"): # keep backward compatibility\n+ embed.description += f\"\\n[Jump back to when you created the reminder]({reminder['jump_url']})\"\n \n if late:\n embed.colour = Colour.red()\n@@ -167,14 +170,18 @@\n json={\n 'author': ctx.author.id,\n 'channel_id': ctx.message.channel.id,\n+ 'jump_url': ctx.message.jump_url,\n 'content': content,\n 'expiration': expiration.isoformat()\n }\n )\n \n+ now = datetime.utcnow() - timedelta(seconds=1)\n+\n # Confirm to the user that it worked.\n await self._send_confirmation(\n- ctx, on_success=\"Your reminder has been created successfully!\"\n+ ctx,\n+ on_success=f\"Your reminder will arrive in {humanize_delta(relativedelta(expiration, now))}!\"\n )\n \n loop = asyncio.get_event_loop()\n", "issue": "Reminder details in confirmation message.\nThe reminder command currently only replies with `Your reminder has been created successfully!`. \r\n\r\nThis can be improved by providing the actual reminder contents in the confirmation and the datetime / humanized timedelta the reminder will be due to ensure the intended duration was processed. \r\n\r\nFor the message sent once the reminder is due, a jump url to the original command message so we can jump back into context of the conversation when the reminder was made would be good to add. (Previously had mixed up wording for this section).\r\n\r\nThis would do well as a first issue if it's not gotten to in short time.\n", "before_files": [{"content": "import asyncio\nimport logging\nimport random\nimport textwrap\nfrom datetime import datetime\nfrom operator import itemgetter\nfrom typing import Optional\n\nfrom dateutil.relativedelta import relativedelta\nfrom discord import Colour, Embed, Message\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, Icons, NEGATIVE_REPLIES, POSITIVE_REPLIES, STAFF_ROLES\nfrom bot.converters import Duration\nfrom bot.pagination import LinePaginator\nfrom bot.utils.checks import without_role_check\nfrom bot.utils.scheduling import Scheduler\nfrom bot.utils.time import humanize_delta, wait_until\n\nlog = logging.getLogger(__name__)\n\nWHITELISTED_CHANNELS = (Channels.bot,)\nMAXIMUM_REMINDERS = 5\n\n\nclass Reminders(Scheduler, Cog):\n \"\"\"Provide in-channel reminder functionality.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n super().__init__()\n\n self.bot.loop.create_task(self.reschedule_reminders())\n\n async def reschedule_reminders(self) -> None:\n \"\"\"Get all current reminders from the API and reschedule them.\"\"\"\n await self.bot.wait_until_ready()\n response = await self.bot.api_client.get(\n 'bot/reminders',\n params={'active': 'true'}\n )\n\n now = datetime.utcnow()\n loop = asyncio.get_event_loop()\n\n for reminder in response:\n remind_at = datetime.fromisoformat(reminder['expiration'][:-1])\n\n # If the reminder is already overdue ...\n if remind_at < now:\n late = relativedelta(now, remind_at)\n await self.send_reminder(reminder, late)\n\n else:\n self.schedule_task(loop, reminder[\"id\"], reminder)\n\n @staticmethod\n async def _send_confirmation(ctx: Context, on_success: str) -> None:\n \"\"\"Send an embed confirming the reminder change was made successfully.\"\"\"\n embed = Embed()\n embed.colour = Colour.green()\n embed.title = random.choice(POSITIVE_REPLIES)\n embed.description = on_success\n await ctx.send(embed=embed)\n\n async def _scheduled_task(self, reminder: dict) -> None:\n \"\"\"A coroutine which sends the reminder once the time is reached, and cancels the running task.\"\"\"\n reminder_id = reminder[\"id\"]\n reminder_datetime = datetime.fromisoformat(reminder['expiration'][:-1])\n\n # Send the reminder message once the desired duration has passed\n await wait_until(reminder_datetime)\n await self.send_reminder(reminder)\n\n log.debug(f\"Deleting reminder {reminder_id} (the user has been reminded).\")\n await self._delete_reminder(reminder_id)\n\n # Now we can begone with it from our schedule list.\n self.cancel_task(reminder_id)\n\n async def _delete_reminder(self, reminder_id: str) -> None:\n \"\"\"Delete a reminder from the database, given its ID, and cancel the running task.\"\"\"\n await self.bot.api_client.delete('bot/reminders/' + str(reminder_id))\n\n # Now we can remove it from the schedule list\n self.cancel_task(reminder_id)\n\n async def _reschedule_reminder(self, reminder: dict) -> None:\n \"\"\"Reschedule a reminder object.\"\"\"\n loop = asyncio.get_event_loop()\n\n self.cancel_task(reminder[\"id\"])\n self.schedule_task(loop, reminder[\"id\"], reminder)\n\n async def send_reminder(self, reminder: dict, late: relativedelta = None) -> None:\n \"\"\"Send the reminder.\"\"\"\n channel = self.bot.get_channel(reminder[\"channel_id\"])\n user = self.bot.get_user(reminder[\"author\"])\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.set_author(\n icon_url=Icons.remind_blurple,\n name=\"It has arrived!\"\n )\n\n embed.description = f\"Here's your reminder: `{reminder['content']}`\"\n\n if late:\n embed.colour = Colour.red()\n embed.set_author(\n icon_url=Icons.remind_red,\n name=f\"Sorry it arrived {humanize_delta(late, max_units=2)} late!\"\n )\n\n await channel.send(\n content=user.mention,\n embed=embed\n )\n await self._delete_reminder(reminder[\"id\"])\n\n @group(name=\"remind\", aliases=(\"reminder\", \"reminders\"), invoke_without_command=True)\n async def remind_group(self, ctx: Context, expiration: Duration, *, content: str) -> None:\n \"\"\"Commands for managing your reminders.\"\"\"\n await ctx.invoke(self.new_reminder, expiration=expiration, content=content)\n\n @remind_group.command(name=\"new\", aliases=(\"add\", \"create\"))\n async def new_reminder(self, ctx: Context, expiration: Duration, *, content: str) -> Optional[Message]:\n \"\"\"\n Set yourself a simple reminder.\n\n Expiration is parsed per: http://strftime.org/\n \"\"\"\n embed = Embed()\n\n # If the user is not staff, we need to verify whether or not to make a reminder at all.\n if without_role_check(ctx, *STAFF_ROLES):\n\n # If they don't have permission to set a reminder in this channel\n if ctx.channel.id not in WHITELISTED_CHANNELS:\n embed.colour = Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = \"Sorry, you can't do that here!\"\n\n return await ctx.send(embed=embed)\n\n # Get their current active reminders\n active_reminders = await self.bot.api_client.get(\n 'bot/reminders',\n params={\n 'author__id': str(ctx.author.id)\n }\n )\n\n # Let's limit this, so we don't get 10 000\n # reminders from kip or something like that :P\n if len(active_reminders) > MAXIMUM_REMINDERS:\n embed.colour = Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = \"You have too many active reminders!\"\n\n return await ctx.send(embed=embed)\n\n # Now we can attempt to actually set the reminder.\n reminder = await self.bot.api_client.post(\n 'bot/reminders',\n json={\n 'author': ctx.author.id,\n 'channel_id': ctx.message.channel.id,\n 'content': content,\n 'expiration': expiration.isoformat()\n }\n )\n\n # Confirm to the user that it worked.\n await self._send_confirmation(\n ctx, on_success=\"Your reminder has been created successfully!\"\n )\n\n loop = asyncio.get_event_loop()\n self.schedule_task(loop, reminder[\"id\"], reminder)\n\n @remind_group.command(name=\"list\")\n async def list_reminders(self, ctx: Context) -> Optional[Message]:\n \"\"\"View a paginated embed of all reminders for your user.\"\"\"\n # Get all the user's reminders from the database.\n data = await self.bot.api_client.get(\n 'bot/reminders',\n params={'author__id': str(ctx.author.id)}\n )\n\n now = datetime.utcnow()\n\n # Make a list of tuples so it can be sorted by time.\n reminders = sorted(\n (\n (rem['content'], rem['expiration'], rem['id'])\n for rem in data\n ),\n key=itemgetter(1)\n )\n\n lines = []\n\n for content, remind_at, id_ in reminders:\n # Parse and humanize the time, make it pretty :D\n remind_datetime = datetime.fromisoformat(remind_at[:-1])\n time = humanize_delta(relativedelta(remind_datetime, now))\n\n text = textwrap.dedent(f\"\"\"\n **Reminder #{id_}:** *expires in {time}* (ID: {id_})\n {content}\n \"\"\").strip()\n\n lines.append(text)\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.title = f\"Reminders for {ctx.author}\"\n\n # Remind the user that they have no reminders :^)\n if not lines:\n embed.description = \"No active reminders could be found.\"\n return await ctx.send(embed=embed)\n\n # Construct the embed and paginate it.\n embed.colour = Colour.blurple()\n\n await LinePaginator.paginate(\n lines,\n ctx, embed,\n max_lines=3,\n empty=True\n )\n\n @remind_group.group(name=\"edit\", aliases=(\"change\", \"modify\"), invoke_without_command=True)\n async def edit_reminder_group(self, ctx: Context) -> None:\n \"\"\"Commands for modifying your current reminders.\"\"\"\n await ctx.invoke(self.bot.get_command(\"help\"), \"reminders\", \"edit\")\n\n @edit_reminder_group.command(name=\"duration\", aliases=(\"time\",))\n async def edit_reminder_duration(self, ctx: Context, id_: int, expiration: Duration) -> None:\n \"\"\"\n Edit one of your reminder's expiration.\n\n Expiration is parsed per: http://strftime.org/\n \"\"\"\n # Send the request to update the reminder in the database\n reminder = await self.bot.api_client.patch(\n 'bot/reminders/' + str(id_),\n json={'expiration': expiration.isoformat()}\n )\n\n # Send a confirmation message to the channel\n await self._send_confirmation(\n ctx, on_success=\"That reminder has been edited successfully!\"\n )\n\n await self._reschedule_reminder(reminder)\n\n @edit_reminder_group.command(name=\"content\", aliases=(\"reason\",))\n async def edit_reminder_content(self, ctx: Context, id_: int, *, content: str) -> None:\n \"\"\"Edit one of your reminder's content.\"\"\"\n # Send the request to update the reminder in the database\n reminder = await self.bot.api_client.patch(\n 'bot/reminders/' + str(id_),\n json={'content': content}\n )\n\n # Send a confirmation message to the channel\n await self._send_confirmation(\n ctx, on_success=\"That reminder has been edited successfully!\"\n )\n await self._reschedule_reminder(reminder)\n\n @remind_group.command(\"delete\", aliases=(\"remove\",))\n async def delete_reminder(self, ctx: Context, id_: int) -> None:\n \"\"\"Delete one of your active reminders.\"\"\"\n await self._delete_reminder(id_)\n await self._send_confirmation(\n ctx, on_success=\"That reminder has been deleted successfully!\"\n )\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Reminders cog load.\"\"\"\n bot.add_cog(Reminders(bot))\n log.info(\"Cog loaded: Reminders\")\n", "path": "bot/cogs/reminders.py"}], "after_files": [{"content": "import asyncio\nimport logging\nimport random\nimport textwrap\nfrom datetime import datetime, timedelta\nfrom operator import itemgetter\nfrom typing import Optional\n\nfrom dateutil.relativedelta import relativedelta\nfrom discord import Colour, Embed, Message\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, Icons, NEGATIVE_REPLIES, POSITIVE_REPLIES, STAFF_ROLES\nfrom bot.converters import Duration\nfrom bot.pagination import LinePaginator\nfrom bot.utils.checks import without_role_check\nfrom bot.utils.scheduling import Scheduler\nfrom bot.utils.time import humanize_delta, wait_until\n\nlog = logging.getLogger(__name__)\n\nWHITELISTED_CHANNELS = (Channels.bot,)\nMAXIMUM_REMINDERS = 5\n\n\nclass Reminders(Scheduler, Cog):\n \"\"\"Provide in-channel reminder functionality.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n super().__init__()\n\n self.bot.loop.create_task(self.reschedule_reminders())\n\n async def reschedule_reminders(self) -> None:\n \"\"\"Get all current reminders from the API and reschedule them.\"\"\"\n await self.bot.wait_until_ready()\n response = await self.bot.api_client.get(\n 'bot/reminders',\n params={'active': 'true'}\n )\n\n now = datetime.utcnow()\n loop = asyncio.get_event_loop()\n\n for reminder in response:\n remind_at = datetime.fromisoformat(reminder['expiration'][:-1])\n\n # If the reminder is already overdue ...\n if remind_at < now:\n late = relativedelta(now, remind_at)\n await self.send_reminder(reminder, late)\n\n else:\n self.schedule_task(loop, reminder[\"id\"], reminder)\n\n @staticmethod\n async def _send_confirmation(ctx: Context, on_success: str) -> None:\n \"\"\"Send an embed confirming the reminder change was made successfully.\"\"\"\n embed = Embed()\n embed.colour = Colour.green()\n embed.title = random.choice(POSITIVE_REPLIES)\n embed.description = on_success\n await ctx.send(embed=embed)\n\n async def _scheduled_task(self, reminder: dict) -> None:\n \"\"\"A coroutine which sends the reminder once the time is reached, and cancels the running task.\"\"\"\n reminder_id = reminder[\"id\"]\n reminder_datetime = datetime.fromisoformat(reminder['expiration'][:-1])\n\n # Send the reminder message once the desired duration has passed\n await wait_until(reminder_datetime)\n await self.send_reminder(reminder)\n\n log.debug(f\"Deleting reminder {reminder_id} (the user has been reminded).\")\n await self._delete_reminder(reminder_id)\n\n # Now we can begone with it from our schedule list.\n self.cancel_task(reminder_id)\n\n async def _delete_reminder(self, reminder_id: str) -> None:\n \"\"\"Delete a reminder from the database, given its ID, and cancel the running task.\"\"\"\n await self.bot.api_client.delete('bot/reminders/' + str(reminder_id))\n\n # Now we can remove it from the schedule list\n self.cancel_task(reminder_id)\n\n async def _reschedule_reminder(self, reminder: dict) -> None:\n \"\"\"Reschedule a reminder object.\"\"\"\n loop = asyncio.get_event_loop()\n\n self.cancel_task(reminder[\"id\"])\n self.schedule_task(loop, reminder[\"id\"], reminder)\n\n async def send_reminder(self, reminder: dict, late: relativedelta = None) -> None:\n \"\"\"Send the reminder.\"\"\"\n channel = self.bot.get_channel(reminder[\"channel_id\"])\n user = self.bot.get_user(reminder[\"author\"])\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.set_author(\n icon_url=Icons.remind_blurple,\n name=\"It has arrived!\"\n )\n\n embed.description = f\"Here's your reminder: `{reminder['content']}`.\"\n\n if reminder.get(\"jump_url\"): # keep backward compatibility\n embed.description += f\"\\n[Jump back to when you created the reminder]({reminder['jump_url']})\"\n\n if late:\n embed.colour = Colour.red()\n embed.set_author(\n icon_url=Icons.remind_red,\n name=f\"Sorry it arrived {humanize_delta(late, max_units=2)} late!\"\n )\n\n await channel.send(\n content=user.mention,\n embed=embed\n )\n await self._delete_reminder(reminder[\"id\"])\n\n @group(name=\"remind\", aliases=(\"reminder\", \"reminders\"), invoke_without_command=True)\n async def remind_group(self, ctx: Context, expiration: Duration, *, content: str) -> None:\n \"\"\"Commands for managing your reminders.\"\"\"\n await ctx.invoke(self.new_reminder, expiration=expiration, content=content)\n\n @remind_group.command(name=\"new\", aliases=(\"add\", \"create\"))\n async def new_reminder(self, ctx: Context, expiration: Duration, *, content: str) -> Optional[Message]:\n \"\"\"\n Set yourself a simple reminder.\n\n Expiration is parsed per: http://strftime.org/\n \"\"\"\n embed = Embed()\n\n # If the user is not staff, we need to verify whether or not to make a reminder at all.\n if without_role_check(ctx, *STAFF_ROLES):\n\n # If they don't have permission to set a reminder in this channel\n if ctx.channel.id not in WHITELISTED_CHANNELS:\n embed.colour = Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = \"Sorry, you can't do that here!\"\n\n return await ctx.send(embed=embed)\n\n # Get their current active reminders\n active_reminders = await self.bot.api_client.get(\n 'bot/reminders',\n params={\n 'author__id': str(ctx.author.id)\n }\n )\n\n # Let's limit this, so we don't get 10 000\n # reminders from kip or something like that :P\n if len(active_reminders) > MAXIMUM_REMINDERS:\n embed.colour = Colour.red()\n embed.title = random.choice(NEGATIVE_REPLIES)\n embed.description = \"You have too many active reminders!\"\n\n return await ctx.send(embed=embed)\n\n # Now we can attempt to actually set the reminder.\n reminder = await self.bot.api_client.post(\n 'bot/reminders',\n json={\n 'author': ctx.author.id,\n 'channel_id': ctx.message.channel.id,\n 'jump_url': ctx.message.jump_url,\n 'content': content,\n 'expiration': expiration.isoformat()\n }\n )\n\n now = datetime.utcnow() - timedelta(seconds=1)\n\n # Confirm to the user that it worked.\n await self._send_confirmation(\n ctx,\n on_success=f\"Your reminder will arrive in {humanize_delta(relativedelta(expiration, now))}!\"\n )\n\n loop = asyncio.get_event_loop()\n self.schedule_task(loop, reminder[\"id\"], reminder)\n\n @remind_group.command(name=\"list\")\n async def list_reminders(self, ctx: Context) -> Optional[Message]:\n \"\"\"View a paginated embed of all reminders for your user.\"\"\"\n # Get all the user's reminders from the database.\n data = await self.bot.api_client.get(\n 'bot/reminders',\n params={'author__id': str(ctx.author.id)}\n )\n\n now = datetime.utcnow()\n\n # Make a list of tuples so it can be sorted by time.\n reminders = sorted(\n (\n (rem['content'], rem['expiration'], rem['id'])\n for rem in data\n ),\n key=itemgetter(1)\n )\n\n lines = []\n\n for content, remind_at, id_ in reminders:\n # Parse and humanize the time, make it pretty :D\n remind_datetime = datetime.fromisoformat(remind_at[:-1])\n time = humanize_delta(relativedelta(remind_datetime, now))\n\n text = textwrap.dedent(f\"\"\"\n **Reminder #{id_}:** *expires in {time}* (ID: {id_})\n {content}\n \"\"\").strip()\n\n lines.append(text)\n\n embed = Embed()\n embed.colour = Colour.blurple()\n embed.title = f\"Reminders for {ctx.author}\"\n\n # Remind the user that they have no reminders :^)\n if not lines:\n embed.description = \"No active reminders could be found.\"\n return await ctx.send(embed=embed)\n\n # Construct the embed and paginate it.\n embed.colour = Colour.blurple()\n\n await LinePaginator.paginate(\n lines,\n ctx, embed,\n max_lines=3,\n empty=True\n )\n\n @remind_group.group(name=\"edit\", aliases=(\"change\", \"modify\"), invoke_without_command=True)\n async def edit_reminder_group(self, ctx: Context) -> None:\n \"\"\"Commands for modifying your current reminders.\"\"\"\n await ctx.invoke(self.bot.get_command(\"help\"), \"reminders\", \"edit\")\n\n @edit_reminder_group.command(name=\"duration\", aliases=(\"time\",))\n async def edit_reminder_duration(self, ctx: Context, id_: int, expiration: Duration) -> None:\n \"\"\"\n Edit one of your reminder's expiration.\n\n Expiration is parsed per: http://strftime.org/\n \"\"\"\n # Send the request to update the reminder in the database\n reminder = await self.bot.api_client.patch(\n 'bot/reminders/' + str(id_),\n json={'expiration': expiration.isoformat()}\n )\n\n # Send a confirmation message to the channel\n await self._send_confirmation(\n ctx, on_success=\"That reminder has been edited successfully!\"\n )\n\n await self._reschedule_reminder(reminder)\n\n @edit_reminder_group.command(name=\"content\", aliases=(\"reason\",))\n async def edit_reminder_content(self, ctx: Context, id_: int, *, content: str) -> None:\n \"\"\"Edit one of your reminder's content.\"\"\"\n # Send the request to update the reminder in the database\n reminder = await self.bot.api_client.patch(\n 'bot/reminders/' + str(id_),\n json={'content': content}\n )\n\n # Send a confirmation message to the channel\n await self._send_confirmation(\n ctx, on_success=\"That reminder has been edited successfully!\"\n )\n await self._reschedule_reminder(reminder)\n\n @remind_group.command(\"delete\", aliases=(\"remove\",))\n async def delete_reminder(self, ctx: Context, id_: int) -> None:\n \"\"\"Delete one of your active reminders.\"\"\"\n await self._delete_reminder(id_)\n await self._send_confirmation(\n ctx, on_success=\"That reminder has been deleted successfully!\"\n )\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Reminders cog load.\"\"\"\n bot.add_cog(Reminders(bot))\n log.info(\"Cog loaded: Reminders\")\n", "path": "bot/cogs/reminders.py"}]}
3,429
354
gh_patches_debug_11047
rasdani/github-patches
git_diff
astronomer__astro-sdk-62
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unable to load_file using parquet Version: Astro 0.2.0 Python: 3.8, 3.9 Astro is unable to run the task `load_file` with a parquet file. It raises the following exception: ``` Traceback (most recent call last): File "pyarrow/io.pxi", line 1511, in pyarrow.lib.get_native_file File "/home/tati/.virtualenvs/astro-py38/lib/python3.8/site-packages/pyarrow/util.py", line 99, in _stringify_path raise TypeError("not a path-like object") TypeError: not a path-like object During handling of the above exception, another exception occurred: Traceback (most recent call last): File "pyarrow/io.pxi", line 1517, in pyarrow.lib.get_native_file File "pyarrow/io.pxi", line 729, in pyarrow.lib.PythonFile.__cinit__ TypeError: binary file expected, got text file warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/astro/sql/operators/agnostic_load_file.py` Content: ``` 1 """ 2 Copyright Astronomer, Inc. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 """ 16 17 import os 18 from typing import Union 19 from urllib.parse import urlparse 20 21 import pandas as pd 22 from airflow.hooks.base import BaseHook 23 from airflow.models import BaseOperator 24 from smart_open import open 25 26 from astro.sql.table import Table, TempTable, create_table_name 27 from astro.utils.cloud_storage_creds import gcs_client, s3fs_creds 28 from astro.utils.load_dataframe import move_dataframe_to_sql 29 from astro.utils.schema_util import get_schema 30 from astro.utils.task_id_helper import get_task_id 31 32 33 class AgnosticLoadFile(BaseOperator): 34 """Load S3/local table to postgres/snowflake database. 35 36 :param path: File path. 37 :type path: str 38 :param output_table_name: Name of table to create. 39 :type output_table_name: str 40 :param file_conn_id: Airflow connection id of input file (optional) 41 :type file_conn_id: str 42 :param output_conn_id: Database connection id. 43 :type output_conn_id: str 44 """ 45 46 def __init__( 47 self, 48 path, 49 output_table: Union[TempTable, Table], 50 file_conn_id="", 51 chunksize=None, 52 **kwargs, 53 ) -> None: 54 super().__init__(**kwargs) 55 self.output_table: Union[TempTable, Table] = output_table 56 self.path = path 57 self.chunksize = chunksize 58 self.file_conn_id = file_conn_id 59 self.kwargs = kwargs 60 self.output_table = output_table 61 62 def execute(self, context): 63 """Loads csv/parquet table from local/S3/GCS with Pandas. 64 65 Infers SQL database type based on connection then loads table to db. 66 """ 67 68 # Read file with Pandas load method based on `file_type` (S3 or local). 69 df = self._load_dataframe(self.path) 70 71 # Retrieve conn type 72 conn = BaseHook.get_connection(self.output_table.conn_id) 73 if type(self.output_table) == TempTable: 74 self.output_table = self.output_table.to_table( 75 create_table_name(context=context), get_schema() 76 ) 77 else: 78 self.output_table.schema = self.output_table.schema or get_schema() 79 move_dataframe_to_sql( 80 output_table_name=self.output_table.table_name, 81 conn_id=self.output_table.conn_id, 82 database=self.output_table.database, 83 warehouse=self.output_table.warehouse, 84 schema=self.output_table.schema, 85 df=df, 86 conn_type=conn.conn_type, 87 user=conn.login, 88 ) 89 self.log.info(f"returning table {self.output_table}") 90 return self.output_table 91 92 @staticmethod 93 def validate_path(path): 94 """Validate a URL or local file path""" 95 try: 96 result = urlparse(path) 97 return all([result.scheme, result.netloc]) or os.path.isfile(path) 98 except: 99 return False 100 101 def _load_dataframe(self, path): 102 """Read file with Pandas. 103 104 Select method based on `file_type` (S3 or local). 105 """ 106 107 if not AgnosticLoadFile.validate_path(path): 108 raise ValueError("Invalid path: {}".format(path)) 109 110 file_type = path.split(".")[-1] 111 transport_params = { 112 "s3": s3fs_creds, 113 "gs": gcs_client, 114 "": lambda: None, 115 }[urlparse(path).scheme]() 116 deserialiser = { 117 "parquet": pd.read_parquet, 118 "csv": pd.read_csv, 119 "json": pd.read_json, 120 "ndjson": pd.read_json, 121 } 122 deserialiser_params = {"ndjson": {"lines": True}} 123 with open(path, transport_params=transport_params) as stream: 124 return deserialiser[file_type]( 125 stream, **deserialiser_params.get(file_type, {}) 126 ) 127 128 129 def load_file( 130 path, 131 output_table=None, 132 file_conn_id=None, 133 task_id=None, 134 **kwargs, 135 ): 136 """Convert AgnosticLoadFile into a function. 137 138 Returns an XComArg object. 139 140 :param path: File path. 141 :type path: str 142 :param output_table: Table to create 143 :type output_table: Table 144 :param file_conn_id: Airflow connection id of input file (optional) 145 :type file_conn_id: str 146 :param task_id: task id, optional. 147 :type task_id: str 148 """ 149 150 task_id = task_id if task_id is not None else get_task_id("load_file", path) 151 152 return AgnosticLoadFile( 153 task_id=task_id, 154 path=path, 155 output_table=output_table, 156 file_conn_id=file_conn_id, 157 **kwargs, 158 ).output 159 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/astro/sql/operators/agnostic_load_file.py b/src/astro/sql/operators/agnostic_load_file.py --- a/src/astro/sql/operators/agnostic_load_file.py +++ b/src/astro/sql/operators/agnostic_load_file.py @@ -119,8 +119,11 @@ "json": pd.read_json, "ndjson": pd.read_json, } + mode = {"parquet": "rb"} deserialiser_params = {"ndjson": {"lines": True}} - with open(path, transport_params=transport_params) as stream: + with open( + path, mode=mode.get(file_type, "r"), transport_params=transport_params + ) as stream: return deserialiser[file_type]( stream, **deserialiser_params.get(file_type, {}) )
{"golden_diff": "diff --git a/src/astro/sql/operators/agnostic_load_file.py b/src/astro/sql/operators/agnostic_load_file.py\n--- a/src/astro/sql/operators/agnostic_load_file.py\n+++ b/src/astro/sql/operators/agnostic_load_file.py\n@@ -119,8 +119,11 @@\n \"json\": pd.read_json,\n \"ndjson\": pd.read_json,\n }\n+ mode = {\"parquet\": \"rb\"}\n deserialiser_params = {\"ndjson\": {\"lines\": True}}\n- with open(path, transport_params=transport_params) as stream:\n+ with open(\n+ path, mode=mode.get(file_type, \"r\"), transport_params=transport_params\n+ ) as stream:\n return deserialiser[file_type](\n stream, **deserialiser_params.get(file_type, {})\n )\n", "issue": "Unable to load_file using parquet\nVersion: Astro 0.2.0\r\nPython: 3.8, 3.9\r\n\r\nAstro is unable to run the task `load_file` with a parquet file.\r\n\r\nIt raises the following exception:\r\n```\r\n Traceback (most recent call last):\r\n File \"pyarrow/io.pxi\", line 1511, in pyarrow.lib.get_native_file\r\n File \"/home/tati/.virtualenvs/astro-py38/lib/python3.8/site-packages/pyarrow/util.py\", line 99, in _stringify_path\r\n raise TypeError(\"not a path-like object\")\r\n TypeError: not a path-like object\r\n \r\n During handling of the above exception, another exception occurred:\r\n \r\n Traceback (most recent call last):\r\n File \"pyarrow/io.pxi\", line 1517, in pyarrow.lib.get_native_file\r\n File \"pyarrow/io.pxi\", line 729, in pyarrow.lib.PythonFile.__cinit__\r\n TypeError: binary file expected, got text file\r\n \r\n warnings.warn(pytest.PytestUnraisableExceptionWarning(msg))\r\n```\n", "before_files": [{"content": "\"\"\"\nCopyright Astronomer, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nfrom typing import Union\nfrom urllib.parse import urlparse\n\nimport pandas as pd\nfrom airflow.hooks.base import BaseHook\nfrom airflow.models import BaseOperator\nfrom smart_open import open\n\nfrom astro.sql.table import Table, TempTable, create_table_name\nfrom astro.utils.cloud_storage_creds import gcs_client, s3fs_creds\nfrom astro.utils.load_dataframe import move_dataframe_to_sql\nfrom astro.utils.schema_util import get_schema\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass AgnosticLoadFile(BaseOperator):\n \"\"\"Load S3/local table to postgres/snowflake database.\n\n :param path: File path.\n :type path: str\n :param output_table_name: Name of table to create.\n :type output_table_name: str\n :param file_conn_id: Airflow connection id of input file (optional)\n :type file_conn_id: str\n :param output_conn_id: Database connection id.\n :type output_conn_id: str\n \"\"\"\n\n def __init__(\n self,\n path,\n output_table: Union[TempTable, Table],\n file_conn_id=\"\",\n chunksize=None,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_table: Union[TempTable, Table] = output_table\n self.path = path\n self.chunksize = chunksize\n self.file_conn_id = file_conn_id\n self.kwargs = kwargs\n self.output_table = output_table\n\n def execute(self, context):\n \"\"\"Loads csv/parquet table from local/S3/GCS with Pandas.\n\n Infers SQL database type based on connection then loads table to db.\n \"\"\"\n\n # Read file with Pandas load method based on `file_type` (S3 or local).\n df = self._load_dataframe(self.path)\n\n # Retrieve conn type\n conn = BaseHook.get_connection(self.output_table.conn_id)\n if type(self.output_table) == TempTable:\n self.output_table = self.output_table.to_table(\n create_table_name(context=context), get_schema()\n )\n else:\n self.output_table.schema = self.output_table.schema or get_schema()\n move_dataframe_to_sql(\n output_table_name=self.output_table.table_name,\n conn_id=self.output_table.conn_id,\n database=self.output_table.database,\n warehouse=self.output_table.warehouse,\n schema=self.output_table.schema,\n df=df,\n conn_type=conn.conn_type,\n user=conn.login,\n )\n self.log.info(f\"returning table {self.output_table}\")\n return self.output_table\n\n @staticmethod\n def validate_path(path):\n \"\"\"Validate a URL or local file path\"\"\"\n try:\n result = urlparse(path)\n return all([result.scheme, result.netloc]) or os.path.isfile(path)\n except:\n return False\n\n def _load_dataframe(self, path):\n \"\"\"Read file with Pandas.\n\n Select method based on `file_type` (S3 or local).\n \"\"\"\n\n if not AgnosticLoadFile.validate_path(path):\n raise ValueError(\"Invalid path: {}\".format(path))\n\n file_type = path.split(\".\")[-1]\n transport_params = {\n \"s3\": s3fs_creds,\n \"gs\": gcs_client,\n \"\": lambda: None,\n }[urlparse(path).scheme]()\n deserialiser = {\n \"parquet\": pd.read_parquet,\n \"csv\": pd.read_csv,\n \"json\": pd.read_json,\n \"ndjson\": pd.read_json,\n }\n deserialiser_params = {\"ndjson\": {\"lines\": True}}\n with open(path, transport_params=transport_params) as stream:\n return deserialiser[file_type](\n stream, **deserialiser_params.get(file_type, {})\n )\n\n\ndef load_file(\n path,\n output_table=None,\n file_conn_id=None,\n task_id=None,\n **kwargs,\n):\n \"\"\"Convert AgnosticLoadFile into a function.\n\n Returns an XComArg object.\n\n :param path: File path.\n :type path: str\n :param output_table: Table to create\n :type output_table: Table\n :param file_conn_id: Airflow connection id of input file (optional)\n :type file_conn_id: str\n :param task_id: task id, optional.\n :type task_id: str\n \"\"\"\n\n task_id = task_id if task_id is not None else get_task_id(\"load_file\", path)\n\n return AgnosticLoadFile(\n task_id=task_id,\n path=path,\n output_table=output_table,\n file_conn_id=file_conn_id,\n **kwargs,\n ).output\n", "path": "src/astro/sql/operators/agnostic_load_file.py"}], "after_files": [{"content": "\"\"\"\nCopyright Astronomer, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport os\nfrom typing import Union\nfrom urllib.parse import urlparse\n\nimport pandas as pd\nfrom airflow.hooks.base import BaseHook\nfrom airflow.models import BaseOperator\nfrom smart_open import open\n\nfrom astro.sql.table import Table, TempTable, create_table_name\nfrom astro.utils.cloud_storage_creds import gcs_client, s3fs_creds\nfrom astro.utils.load_dataframe import move_dataframe_to_sql\nfrom astro.utils.schema_util import get_schema\nfrom astro.utils.task_id_helper import get_task_id\n\n\nclass AgnosticLoadFile(BaseOperator):\n \"\"\"Load S3/local table to postgres/snowflake database.\n\n :param path: File path.\n :type path: str\n :param output_table_name: Name of table to create.\n :type output_table_name: str\n :param file_conn_id: Airflow connection id of input file (optional)\n :type file_conn_id: str\n :param output_conn_id: Database connection id.\n :type output_conn_id: str\n \"\"\"\n\n def __init__(\n self,\n path,\n output_table: Union[TempTable, Table],\n file_conn_id=\"\",\n chunksize=None,\n **kwargs,\n ) -> None:\n super().__init__(**kwargs)\n self.output_table: Union[TempTable, Table] = output_table\n self.path = path\n self.chunksize = chunksize\n self.file_conn_id = file_conn_id\n self.kwargs = kwargs\n self.output_table = output_table\n\n def execute(self, context):\n \"\"\"Loads csv/parquet table from local/S3/GCS with Pandas.\n\n Infers SQL database type based on connection then loads table to db.\n \"\"\"\n\n # Read file with Pandas load method based on `file_type` (S3 or local).\n df = self._load_dataframe(self.path)\n\n # Retrieve conn type\n conn = BaseHook.get_connection(self.output_table.conn_id)\n if type(self.output_table) == TempTable:\n self.output_table = self.output_table.to_table(\n create_table_name(context=context), get_schema()\n )\n else:\n self.output_table.schema = self.output_table.schema or get_schema()\n move_dataframe_to_sql(\n output_table_name=self.output_table.table_name,\n conn_id=self.output_table.conn_id,\n database=self.output_table.database,\n warehouse=self.output_table.warehouse,\n schema=self.output_table.schema,\n df=df,\n conn_type=conn.conn_type,\n user=conn.login,\n )\n self.log.info(f\"returning table {self.output_table}\")\n return self.output_table\n\n @staticmethod\n def validate_path(path):\n \"\"\"Validate a URL or local file path\"\"\"\n try:\n result = urlparse(path)\n return all([result.scheme, result.netloc]) or os.path.isfile(path)\n except:\n return False\n\n def _load_dataframe(self, path):\n \"\"\"Read file with Pandas.\n\n Select method based on `file_type` (S3 or local).\n \"\"\"\n\n if not AgnosticLoadFile.validate_path(path):\n raise ValueError(\"Invalid path: {}\".format(path))\n\n file_type = path.split(\".\")[-1]\n transport_params = {\n \"s3\": s3fs_creds,\n \"gs\": gcs_client,\n \"\": lambda: None,\n }[urlparse(path).scheme]()\n deserialiser = {\n \"parquet\": pd.read_parquet,\n \"csv\": pd.read_csv,\n \"json\": pd.read_json,\n \"ndjson\": pd.read_json,\n }\n mode = {\"parquet\": \"rb\"}\n deserialiser_params = {\"ndjson\": {\"lines\": True}}\n with open(\n path, mode=mode.get(file_type, \"r\"), transport_params=transport_params\n ) as stream:\n return deserialiser[file_type](\n stream, **deserialiser_params.get(file_type, {})\n )\n\n\ndef load_file(\n path,\n output_table=None,\n file_conn_id=None,\n task_id=None,\n **kwargs,\n):\n \"\"\"Convert AgnosticLoadFile into a function.\n\n Returns an XComArg object.\n\n :param path: File path.\n :type path: str\n :param output_table: Table to create\n :type output_table: Table\n :param file_conn_id: Airflow connection id of input file (optional)\n :type file_conn_id: str\n :param task_id: task id, optional.\n :type task_id: str\n \"\"\"\n\n task_id = task_id if task_id is not None else get_task_id(\"load_file\", path)\n\n return AgnosticLoadFile(\n task_id=task_id,\n path=path,\n output_table=output_table,\n file_conn_id=file_conn_id,\n **kwargs,\n ).output\n", "path": "src/astro/sql/operators/agnostic_load_file.py"}]}
2,012
181
gh_patches_debug_1174
rasdani/github-patches
git_diff
cupy__cupy-5225
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [info] NumPy/SciPy new version pinning recommendation See: - https://github.com/numpy/numpy/pull/18505 - scipy/scipy#12862 The most important takeaway is that NumPy/SciPy now recommend downstream distributions to pin the upper bound version if NumPy/Scipy are runtime dependencies. (The example is if the latest NumPy out there is 1.20, one should pin to `<1.23`; the notation used in the docs `<1.xx+3.0` is a bit confusing, see the clarification in https://github.com/scipy/scipy/pull/12862#discussion_r575790007.) There are other suggestions too, but I think this is potentially the most impactful one. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 import glob 4 import os 5 from setuptools import setup, find_packages 6 import sys 7 8 import cupy_setup_build 9 10 11 for submodule in ('cupy/_core/include/cupy/cub/', 12 'cupy/_core/include/cupy/jitify'): 13 if len(os.listdir(submodule)) == 0: 14 msg = ''' 15 The folder %s is a git submodule but is 16 currently empty. Please use the command 17 18 git submodule update --init 19 20 to populate the folder before building from source. 21 ''' % submodule 22 print(msg, file=sys.stderr) 23 sys.exit(1) 24 25 26 requirements = { 27 # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619) 28 'setup': [ 29 'Cython>=0.29.22', 30 'fastrlock>=0.5', 31 ], 32 33 'install': [ 34 'numpy>=1.17', 35 'fastrlock>=0.5', 36 ], 37 'all': [ 38 'scipy>=1.4', 39 'optuna>=2.0', 40 ], 41 42 'stylecheck': [ 43 'autopep8==1.5.5', 44 'flake8==3.8.4', 45 'pbr==5.5.1', 46 'pycodestyle==2.6.0', 47 ], 48 'test': [ 49 # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI. 50 'pytest>=6.2', 51 ], 52 'jenkins': [ 53 '-r test', 54 'pytest-timeout', 55 'pytest-cov', 56 'coveralls', 57 'codecov', 58 'coverage<5', # Otherwise, Python must be built with sqlite 59 ], 60 } 61 62 63 def reduce_requirements(key): 64 # Resolve recursive requirements notation (-r) 65 reqs = requirements[key] 66 resolved_reqs = [] 67 for req in reqs: 68 if req.startswith('-r'): 69 depend_key = req[2:].lstrip() 70 reduce_requirements(depend_key) 71 resolved_reqs += requirements[depend_key] 72 else: 73 resolved_reqs.append(req) 74 requirements[key] = resolved_reqs 75 76 77 for k in requirements.keys(): 78 reduce_requirements(k) 79 80 81 extras_require = {k: v for k, v in requirements.items() if k != 'install'} 82 83 84 setup_requires = requirements['setup'] 85 install_requires = requirements['install'] 86 tests_require = requirements['test'] 87 88 # List of files that needs to be in the distribution (sdist/wheel). 89 # Notes: 90 # - Files only needed in sdist should be added to `MANIFEST.in`. 91 # - The following glob (`**`) ignores items starting with `.`. 92 cupy_package_data = [ 93 'cupy/cuda/cupy_thrust.cu', 94 'cupy/cuda/cupy_cub.cu', 95 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback 96 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback 97 'cupy/cuda/cupy_cufft.h', # for cuFFT callback 98 'cupy/cuda/cufft.pxd', # for cuFFT callback 99 'cupy/cuda/cufft.pyx', # for cuFFT callback 100 'cupy/random/cupy_distributions.cu', 101 'cupy/random/cupy_distributions.cuh', 102 ] + [ 103 x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True) 104 if os.path.isfile(x) 105 ] 106 107 package_data = { 108 'cupy': [ 109 os.path.relpath(x, 'cupy') for x in cupy_package_data 110 ], 111 } 112 113 package_data['cupy'] += cupy_setup_build.prepare_wheel_libs() 114 115 package_name = cupy_setup_build.get_package_name() 116 long_description = cupy_setup_build.get_long_description() 117 ext_modules = cupy_setup_build.get_ext_modules() 118 build_ext = cupy_setup_build.custom_build_ext 119 120 here = os.path.abspath(os.path.dirname(__file__)) 121 # Get __version__ variable 122 with open(os.path.join(here, 'cupy', '_version.py')) as f: 123 exec(f.read()) 124 125 CLASSIFIERS = """\ 126 Development Status :: 5 - Production/Stable 127 Intended Audience :: Science/Research 128 Intended Audience :: Developers 129 License :: OSI Approved :: MIT License 130 Programming Language :: Python 131 Programming Language :: Python :: 3 132 Programming Language :: Python :: 3.6 133 Programming Language :: Python :: 3.7 134 Programming Language :: Python :: 3.8 135 Programming Language :: Python :: 3.9 136 Programming Language :: Python :: 3 :: Only 137 Programming Language :: Cython 138 Topic :: Software Development 139 Topic :: Scientific/Engineering 140 Operating System :: POSIX 141 Operating System :: Microsoft :: Windows 142 """ 143 144 145 setup( 146 name=package_name, 147 version=__version__, # NOQA 148 description='CuPy: A NumPy-compatible array library accelerated by CUDA', 149 long_description=long_description, 150 author='Seiya Tokui', 151 author_email='[email protected]', 152 url='https://cupy.dev/', 153 license='MIT License', 154 project_urls={ 155 "Bug Tracker": "https://github.com/cupy/cupy/issues", 156 "Documentation": "https://docs.cupy.dev/", 157 "Source Code": "https://github.com/cupy/cupy", 158 }, 159 classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], 160 packages=find_packages(exclude=['install', 'tests']), 161 package_data=package_data, 162 zip_safe=False, 163 python_requires='>=3.6.0', 164 setup_requires=setup_requires, 165 install_requires=install_requires, 166 tests_require=tests_require, 167 extras_require=extras_require, 168 ext_modules=ext_modules, 169 cmdclass={'build_ext': build_ext}, 170 ) 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -31,11 +31,11 @@ ], 'install': [ - 'numpy>=1.17', + 'numpy>=1.17,<1.23', # see #4773 'fastrlock>=0.5', ], 'all': [ - 'scipy>=1.4', + 'scipy>=1.4,<1.9', # see #4773 'optuna>=2.0', ],
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -31,11 +31,11 @@\n ],\n \n 'install': [\n- 'numpy>=1.17',\n+ 'numpy>=1.17,<1.23', # see #4773\n 'fastrlock>=0.5',\n ],\n 'all': [\n- 'scipy>=1.4',\n+ 'scipy>=1.4,<1.9', # see #4773\n 'optuna>=2.0',\n ],\n", "issue": "[info] NumPy/SciPy new version pinning recommendation\nSee:\r\n- https://github.com/numpy/numpy/pull/18505\r\n- scipy/scipy#12862\r\n\r\nThe most important takeaway is that NumPy/SciPy now recommend downstream distributions to pin the upper bound version if NumPy/Scipy are runtime dependencies. (The example is if the latest NumPy out there is 1.20, one should pin to `<1.23`; the notation used in the docs `<1.xx+3.0` is a bit confusing, see the clarification in https://github.com/scipy/scipy/pull/12862#discussion_r575790007.) There are other suggestions too, but I think this is potentially the most impactful one.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nfor submodule in ('cupy/_core/include/cupy/cub/',\n 'cupy/_core/include/cupy/jitify'):\n if len(os.listdir(submodule)) == 0:\n msg = '''\n The folder %s is a git submodule but is\n currently empty. Please use the command\n\n git submodule update --init\n\n to populate the folder before building from source.\n ''' % submodule\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\nrequirements = {\n # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\n 'setup': [\n 'Cython>=0.29.22',\n 'fastrlock>=0.5',\n ],\n\n 'install': [\n 'numpy>=1.17',\n 'fastrlock>=0.5',\n ],\n 'all': [\n 'scipy>=1.4',\n 'optuna>=2.0',\n ],\n\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nwith open(os.path.join(here, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: A NumPy-compatible array library accelerated by CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.6.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport glob\nimport os\nfrom setuptools import setup, find_packages\nimport sys\n\nimport cupy_setup_build\n\n\nfor submodule in ('cupy/_core/include/cupy/cub/',\n 'cupy/_core/include/cupy/jitify'):\n if len(os.listdir(submodule)) == 0:\n msg = '''\n The folder %s is a git submodule but is\n currently empty. Please use the command\n\n git submodule update --init\n\n to populate the folder before building from source.\n ''' % submodule\n print(msg, file=sys.stderr)\n sys.exit(1)\n\n\nrequirements = {\n # TODO(kmaehashi): migrate to pyproject.toml (see #4727, #4619)\n 'setup': [\n 'Cython>=0.29.22',\n 'fastrlock>=0.5',\n ],\n\n 'install': [\n 'numpy>=1.17,<1.23', # see #4773\n 'fastrlock>=0.5',\n ],\n 'all': [\n 'scipy>=1.4,<1.9', # see #4773\n 'optuna>=2.0',\n ],\n\n 'stylecheck': [\n 'autopep8==1.5.5',\n 'flake8==3.8.4',\n 'pbr==5.5.1',\n 'pycodestyle==2.6.0',\n ],\n 'test': [\n # 4.2 <= pytest < 6.2 is slow collecting tests and times out on CI.\n 'pytest>=6.2',\n ],\n 'jenkins': [\n '-r test',\n 'pytest-timeout',\n 'pytest-cov',\n 'coveralls',\n 'codecov',\n 'coverage<5', # Otherwise, Python must be built with sqlite\n ],\n}\n\n\ndef reduce_requirements(key):\n # Resolve recursive requirements notation (-r)\n reqs = requirements[key]\n resolved_reqs = []\n for req in reqs:\n if req.startswith('-r'):\n depend_key = req[2:].lstrip()\n reduce_requirements(depend_key)\n resolved_reqs += requirements[depend_key]\n else:\n resolved_reqs.append(req)\n requirements[key] = resolved_reqs\n\n\nfor k in requirements.keys():\n reduce_requirements(k)\n\n\nextras_require = {k: v for k, v in requirements.items() if k != 'install'}\n\n\nsetup_requires = requirements['setup']\ninstall_requires = requirements['install']\ntests_require = requirements['test']\n\n# List of files that needs to be in the distribution (sdist/wheel).\n# Notes:\n# - Files only needed in sdist should be added to `MANIFEST.in`.\n# - The following glob (`**`) ignores items starting with `.`.\ncupy_package_data = [\n 'cupy/cuda/cupy_thrust.cu',\n 'cupy/cuda/cupy_cub.cu',\n 'cupy/cuda/cupy_cufftXt.cu', # for cuFFT callback\n 'cupy/cuda/cupy_cufftXt.h', # for cuFFT callback\n 'cupy/cuda/cupy_cufft.h', # for cuFFT callback\n 'cupy/cuda/cufft.pxd', # for cuFFT callback\n 'cupy/cuda/cufft.pyx', # for cuFFT callback\n 'cupy/random/cupy_distributions.cu',\n 'cupy/random/cupy_distributions.cuh',\n] + [\n x for x in glob.glob('cupy/_core/include/cupy/**', recursive=True)\n if os.path.isfile(x)\n]\n\npackage_data = {\n 'cupy': [\n os.path.relpath(x, 'cupy') for x in cupy_package_data\n ],\n}\n\npackage_data['cupy'] += cupy_setup_build.prepare_wheel_libs()\n\npackage_name = cupy_setup_build.get_package_name()\nlong_description = cupy_setup_build.get_long_description()\next_modules = cupy_setup_build.get_ext_modules()\nbuild_ext = cupy_setup_build.custom_build_ext\n\nhere = os.path.abspath(os.path.dirname(__file__))\n# Get __version__ variable\nwith open(os.path.join(here, 'cupy', '_version.py')) as f:\n exec(f.read())\n\nCLASSIFIERS = \"\"\"\\\nDevelopment Status :: 5 - Production/Stable\nIntended Audience :: Science/Research\nIntended Audience :: Developers\nLicense :: OSI Approved :: MIT License\nProgramming Language :: Python\nProgramming Language :: Python :: 3\nProgramming Language :: Python :: 3.6\nProgramming Language :: Python :: 3.7\nProgramming Language :: Python :: 3.8\nProgramming Language :: Python :: 3.9\nProgramming Language :: Python :: 3 :: Only\nProgramming Language :: Cython\nTopic :: Software Development\nTopic :: Scientific/Engineering\nOperating System :: POSIX\nOperating System :: Microsoft :: Windows\n\"\"\"\n\n\nsetup(\n name=package_name,\n version=__version__, # NOQA\n description='CuPy: A NumPy-compatible array library accelerated by CUDA',\n long_description=long_description,\n author='Seiya Tokui',\n author_email='[email protected]',\n url='https://cupy.dev/',\n license='MIT License',\n project_urls={\n \"Bug Tracker\": \"https://github.com/cupy/cupy/issues\",\n \"Documentation\": \"https://docs.cupy.dev/\",\n \"Source Code\": \"https://github.com/cupy/cupy\",\n },\n classifiers=[_f for _f in CLASSIFIERS.split('\\n') if _f],\n packages=find_packages(exclude=['install', 'tests']),\n package_data=package_data,\n zip_safe=False,\n python_requires='>=3.6.0',\n setup_requires=setup_requires,\n install_requires=install_requires,\n tests_require=tests_require,\n extras_require=extras_require,\n ext_modules=ext_modules,\n cmdclass={'build_ext': build_ext},\n)\n", "path": "setup.py"}]}
2,119
137
gh_patches_debug_30339
rasdani/github-patches
git_diff
OCHA-DAP__hdx-ckan-2182
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Search and Dataset List Pages: Move the filter button Move the filter button on /search and /dataset so that it apears as part of the search input in main nav: https://cloud.githubusercontent.com/assets/1654485/5780030/a25eaeb6-9da7-11e4-9e5e-bdca79e549ab.png When open, the filter button remains in the same place but has a "-" added to indicate that it can be closed. SVGs will be linked below shortly. - Open filter button: https://drive.google.com/file/d/0Bx1KWNTx8Ij3SW42UEFNaTdFQXc/view?usp=sharing - Close filter button: https://drive.google.com/file/d/0Bx1KWNTx8Ij3NzdEXzFmRlJZZU0/view?usp=sharing Let me know if these SVGs cause any problem. They are filled black, but I can easily change that. Default Country Page: graphs section Blocked by #2102 and #2103 Annotated (very annotated) design is here: https://docs.google.com/drawings/d/1qOBKZ7IO7zkEMHh2g3ZiAynh5PiAbO-_-SL4rd9uO_M/edit Implement the section with 5 graphs --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py` Content: ``` 1 ''' 2 Created on Jan 13, 2015 3 4 @author: alexandru-m-g 5 ''' 6 import json 7 8 import logging 9 import datetime as dt 10 11 import ckan.lib.base as base 12 import ckan.logic as logic 13 import ckan.model as model 14 import ckan.common as common 15 import ckan.controllers.group as group 16 17 render = base.render 18 abort = base.abort 19 NotFound = logic.NotFound 20 NotAuthorized = logic.NotAuthorized 21 get_action = logic.get_action 22 c = common.c 23 request = common.request 24 _ = common._ 25 26 27 log = logging.getLogger(__name__) 28 29 group_type = 'group' 30 31 indicators_4_charts = ['PVH140', 'PVN010', 'PVW010', 'PVF020', 32 'PSE160', 'PCX051', 'PVE130', 'PCX060', 33 'RW002', 'PVE110', 'PVN050', 'PVN070', 34 'PVW040'] 35 # http://localhost:8080/public/api2/values?it=PSP120&it=PSP090&l=CHN&sorting=INDICATOR_TYPE_ASC 36 37 indicators_4_top_line = ['PSP120', 'PSP090', 'PSE220', 'PSE030', 38 'CG300'] 39 # http://localhost:8080/public/api2/values?it=PSP120&l=CHN&periodType=LATEST_YEAR 40 41 42 class CountryController(group.GroupController): 43 44 def read(self, id): 45 self.get_country(id) 46 self.get_dataset_results(c.group_dict.get('name', id)) 47 48 # activity stream 49 context = {'model': model, 'session': model.Session, 50 'user': c.user or c.author, 51 'for_view': True} 52 country_uuid = c.group_dict.get('id', id) 53 self.get_activity_stream(context, country_uuid) 54 55 return render('country/country.html') 56 57 def get_country(self, id): 58 if group_type != self.group_type: 59 abort(404, _('Incorrect group type')) 60 61 context = {'model': model, 'session': model.Session, 62 'user': c.user or c.author, 63 'schema': self._db_to_form_schema(group_type=group_type), 64 'for_view': True} 65 data_dict = {'id': id} 66 67 try: 68 context['include_datasets'] = False 69 c.group_dict = self._action('group_show')(context, data_dict) 70 c.group = context['group'] 71 except NotFound: 72 abort(404, _('Group not found')) 73 except NotAuthorized: 74 abort(401, _('Unauthorized to read group %s') % id) 75 76 77 def get_dataset_results(self, country_id): 78 upper_case_id = country_id.upper() 79 top_line_results = self._get_top_line_num(upper_case_id) 80 top_line_data = top_line_results.get('results', []) 81 82 if not top_line_data: 83 log.warn('No top line numbers found for country: {}'.format(country_id)) 84 85 sorted_top_line_data = sorted(top_line_data, 86 key=lambda x: indicators_4_top_line.index(x['indicatorTypeCode'])) 87 88 c.top_line_data_list = sorted_top_line_data 89 90 chart_results = self._get_chart_data(upper_case_id) 91 chart_data = chart_results.get('results', []) 92 if not chart_data: 93 log.warn('No chart data found for country: {}'.format(country_id)) 94 chart_data_dict = {} 95 96 # for el in chart_data: 97 # ind_type = el.get('indicatorTypeCode', None) 98 # if ind_type: 99 # d = dt.datetime.strptime(el.get('time', ''), '%Y-%m-%d') 100 # el['datetime'] = d 101 # if ind_type in chart_data_dict: 102 # chart_data_dict[ind_type].append(el) 103 # else: 104 # chart_data_dict[ind_type] = [el] 105 106 for el in chart_data: 107 ind_type = el.get('indicatorTypeCode', None) 108 if ind_type: 109 # d = dt.datetime.strptime(el.get('time', ''), '%Y-%m-%d') 110 val = { 111 'date': el.get('time'), 112 'value': el.get('value') 113 } 114 115 if ind_type in chart_data_dict: 116 chart_data_dict[ind_type]['data'].append(val); 117 else: 118 newel = { 119 'title': el.get('unitName'), 120 'code': ind_type, 121 'data': [val] 122 } 123 chart_data_dict[ind_type] = newel 124 125 126 127 # for code in chart_data_dict.keys(): 128 # chart_data_dict[code] = sorted(chart_data_dict[code], key=lambda x: x.get('datetime', None)) 129 130 for code in chart_data_dict.keys(): 131 chart_data_dict[code]['data'] = json.dumps(chart_data_dict[code]['data']) 132 133 chart_data_list = [] 134 for code in indicators_4_charts: 135 if code in chart_data_dict and len(chart_data_list) < 5: 136 chart_data_list.append(chart_data_dict[code]) 137 138 c.chart_data_list = chart_data_list 139 140 # c.chart_data_dict = chart_data_dict 141 142 def _get_chart_data(self, country_id): 143 data_dict = { 144 'sorting': 'INDICATOR_TYPE_ASC', 145 'l': country_id, 146 'it': indicators_4_charts 147 } 148 result = get_action('hdx_get_indicator_values')({}, data_dict) 149 return result 150 151 def _get_top_line_num(self, country_id): 152 data_dict = { 153 'periodType': 'LATEST_YEAR', 154 'l': country_id, 155 'it': indicators_4_top_line 156 } 157 result = get_action('hdx_get_indicator_values')({}, data_dict) 158 return result 159 160 def get_activity_stream(self, context, country_id): 161 act_data_dict = {'id': country_id, 'limit': 7} 162 c.hdx_group_activities = get_action( 163 'hdx_get_group_activity_list')(context, act_data_dict) 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py b/ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py --- a/ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py +++ b/ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py @@ -107,18 +107,35 @@ ind_type = el.get('indicatorTypeCode', None) if ind_type: # d = dt.datetime.strptime(el.get('time', ''), '%Y-%m-%d') + el_time = el.get('time') + el_value = el.get('value') val = { - 'date': el.get('time'), - 'value': el.get('value') + 'date': el_time, + 'value': el_value } if ind_type in chart_data_dict: chart_data_dict[ind_type]['data'].append(val); + + last_date = dt.datetime.strptime(chart_data_dict[ind_type]['lastDate'], '%Y-%m-%d') + curr_date = dt.datetime.strptime(el_time, '%Y-%m-%d') + + if last_date < curr_date: + chart_data_dict[ind_type]['lastDate'] = el_time + chart_data_dict[ind_type]['lastValue'] = el_value + else: newel = { - 'title': el.get('unitName'), + 'title': el.get('indicatorTypeName'), + 'sourceName': el.get('sourceName'), + 'sourceCode': el.get('sourceCode'), + 'lastDate': el_time, + 'lastValue': el_value, + 'unit': el.get('unitName'), 'code': ind_type, - 'data': [val] + 'data': [val], + 'datasetLink': '/todo/changeme', + 'datasetUpdateDate': 'Jun 21, 1985' } chart_data_dict[ind_type] = newel
{"golden_diff": "diff --git a/ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py b/ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py\n--- a/ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py\n+++ b/ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py\n@@ -107,18 +107,35 @@\n ind_type = el.get('indicatorTypeCode', None)\n if ind_type:\n # d = dt.datetime.strptime(el.get('time', ''), '%Y-%m-%d')\n+ el_time = el.get('time')\n+ el_value = el.get('value')\n val = {\n- 'date': el.get('time'),\n- 'value': el.get('value')\n+ 'date': el_time,\n+ 'value': el_value\n }\n \n if ind_type in chart_data_dict:\n chart_data_dict[ind_type]['data'].append(val);\n+\n+ last_date = dt.datetime.strptime(chart_data_dict[ind_type]['lastDate'], '%Y-%m-%d')\n+ curr_date = dt.datetime.strptime(el_time, '%Y-%m-%d')\n+\n+ if last_date < curr_date:\n+ chart_data_dict[ind_type]['lastDate'] = el_time\n+ chart_data_dict[ind_type]['lastValue'] = el_value\n+\n else:\n newel = {\n- 'title': el.get('unitName'),\n+ 'title': el.get('indicatorTypeName'),\n+ 'sourceName': el.get('sourceName'),\n+ 'sourceCode': el.get('sourceCode'),\n+ 'lastDate': el_time,\n+ 'lastValue': el_value,\n+ 'unit': el.get('unitName'),\n 'code': ind_type,\n- 'data': [val]\n+ 'data': [val],\n+ 'datasetLink': '/todo/changeme',\n+ 'datasetUpdateDate': 'Jun 21, 1985'\n }\n chart_data_dict[ind_type] = newel\n", "issue": "Search and Dataset List Pages: Move the filter button\nMove the filter button on /search and /dataset so that it apears as part of the search input in main nav:\nhttps://cloud.githubusercontent.com/assets/1654485/5780030/a25eaeb6-9da7-11e4-9e5e-bdca79e549ab.png\n\nWhen open, the filter button remains in the same place but has a \"-\" added to indicate that it can be closed. SVGs will be linked below shortly.\n- Open filter button: https://drive.google.com/file/d/0Bx1KWNTx8Ij3SW42UEFNaTdFQXc/view?usp=sharing\n- Close filter button: https://drive.google.com/file/d/0Bx1KWNTx8Ij3NzdEXzFmRlJZZU0/view?usp=sharing\n\nLet me know if these SVGs cause any problem. They are filled black, but I can easily change that. \n\nDefault Country Page: graphs section \nBlocked by #2102 and #2103 \nAnnotated (very annotated) design is here: \nhttps://docs.google.com/drawings/d/1qOBKZ7IO7zkEMHh2g3ZiAynh5PiAbO-_-SL4rd9uO_M/edit\nImplement the section with 5 graphs\n\n", "before_files": [{"content": "'''\nCreated on Jan 13, 2015\n\n@author: alexandru-m-g\n'''\nimport json\n\nimport logging\nimport datetime as dt\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.controllers.group as group\n\nrender = base.render\nabort = base.abort\nNotFound = logic.NotFound\nNotAuthorized = logic.NotAuthorized\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\n\nlog = logging.getLogger(__name__)\n\ngroup_type = 'group'\n\nindicators_4_charts = ['PVH140', 'PVN010', 'PVW010', 'PVF020',\n 'PSE160', 'PCX051', 'PVE130', 'PCX060',\n 'RW002', 'PVE110', 'PVN050', 'PVN070',\n 'PVW040']\n# http://localhost:8080/public/api2/values?it=PSP120&it=PSP090&l=CHN&sorting=INDICATOR_TYPE_ASC\n\nindicators_4_top_line = ['PSP120', 'PSP090', 'PSE220', 'PSE030',\n 'CG300']\n# http://localhost:8080/public/api2/values?it=PSP120&l=CHN&periodType=LATEST_YEAR\n\n\nclass CountryController(group.GroupController):\n\n def read(self, id):\n self.get_country(id)\n self.get_dataset_results(c.group_dict.get('name', id))\n\n # activity stream\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author,\n 'for_view': True}\n country_uuid = c.group_dict.get('id', id)\n self.get_activity_stream(context, country_uuid)\n\n return render('country/country.html')\n\n def get_country(self, id):\n if group_type != self.group_type:\n abort(404, _('Incorrect group type'))\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author,\n 'schema': self._db_to_form_schema(group_type=group_type),\n 'for_view': True}\n data_dict = {'id': id}\n\n try:\n context['include_datasets'] = False\n c.group_dict = self._action('group_show')(context, data_dict)\n c.group = context['group']\n except NotFound:\n abort(404, _('Group not found'))\n except NotAuthorized:\n abort(401, _('Unauthorized to read group %s') % id)\n\n\n def get_dataset_results(self, country_id):\n upper_case_id = country_id.upper()\n top_line_results = self._get_top_line_num(upper_case_id)\n top_line_data = top_line_results.get('results', [])\n\n if not top_line_data:\n log.warn('No top line numbers found for country: {}'.format(country_id))\n\n sorted_top_line_data = sorted(top_line_data,\n key=lambda x: indicators_4_top_line.index(x['indicatorTypeCode']))\n\n c.top_line_data_list = sorted_top_line_data\n\n chart_results = self._get_chart_data(upper_case_id)\n chart_data = chart_results.get('results', [])\n if not chart_data:\n log.warn('No chart data found for country: {}'.format(country_id))\n chart_data_dict = {}\n\n # for el in chart_data:\n # ind_type = el.get('indicatorTypeCode', None)\n # if ind_type:\n # d = dt.datetime.strptime(el.get('time', ''), '%Y-%m-%d')\n # el['datetime'] = d\n # if ind_type in chart_data_dict:\n # chart_data_dict[ind_type].append(el)\n # else:\n # chart_data_dict[ind_type] = [el]\n\n for el in chart_data:\n ind_type = el.get('indicatorTypeCode', None)\n if ind_type:\n # d = dt.datetime.strptime(el.get('time', ''), '%Y-%m-%d')\n val = {\n 'date': el.get('time'),\n 'value': el.get('value')\n }\n\n if ind_type in chart_data_dict:\n chart_data_dict[ind_type]['data'].append(val);\n else:\n newel = {\n 'title': el.get('unitName'),\n 'code': ind_type,\n 'data': [val]\n }\n chart_data_dict[ind_type] = newel\n\n\n\n # for code in chart_data_dict.keys():\n # chart_data_dict[code] = sorted(chart_data_dict[code], key=lambda x: x.get('datetime', None))\n\n for code in chart_data_dict.keys():\n chart_data_dict[code]['data'] = json.dumps(chart_data_dict[code]['data'])\n\n chart_data_list = []\n for code in indicators_4_charts:\n if code in chart_data_dict and len(chart_data_list) < 5:\n chart_data_list.append(chart_data_dict[code])\n\n c.chart_data_list = chart_data_list\n\n # c.chart_data_dict = chart_data_dict\n\n def _get_chart_data(self, country_id):\n data_dict = {\n 'sorting': 'INDICATOR_TYPE_ASC',\n 'l': country_id,\n 'it': indicators_4_charts\n }\n result = get_action('hdx_get_indicator_values')({}, data_dict)\n return result\n\n def _get_top_line_num(self, country_id):\n data_dict = {\n 'periodType': 'LATEST_YEAR',\n 'l': country_id,\n 'it': indicators_4_top_line\n }\n result = get_action('hdx_get_indicator_values')({}, data_dict)\n return result\n\n def get_activity_stream(self, context, country_id):\n act_data_dict = {'id': country_id, 'limit': 7}\n c.hdx_group_activities = get_action(\n 'hdx_get_group_activity_list')(context, act_data_dict)\n", "path": "ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py"}], "after_files": [{"content": "'''\nCreated on Jan 13, 2015\n\n@author: alexandru-m-g\n'''\nimport json\n\nimport logging\nimport datetime as dt\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.controllers.group as group\n\nrender = base.render\nabort = base.abort\nNotFound = logic.NotFound\nNotAuthorized = logic.NotAuthorized\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\n\nlog = logging.getLogger(__name__)\n\ngroup_type = 'group'\n\nindicators_4_charts = ['PVH140', 'PVN010', 'PVW010', 'PVF020',\n 'PSE160', 'PCX051', 'PVE130', 'PCX060',\n 'RW002', 'PVE110', 'PVN050', 'PVN070',\n 'PVW040']\n# http://localhost:8080/public/api2/values?it=PSP120&it=PSP090&l=CHN&sorting=INDICATOR_TYPE_ASC\n\nindicators_4_top_line = ['PSP120', 'PSP090', 'PSE220', 'PSE030',\n 'CG300']\n# http://localhost:8080/public/api2/values?it=PSP120&l=CHN&periodType=LATEST_YEAR\n\n\nclass CountryController(group.GroupController):\n\n def read(self, id):\n self.get_country(id)\n self.get_dataset_results(c.group_dict.get('name', id))\n\n # activity stream\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author,\n 'for_view': True}\n country_uuid = c.group_dict.get('id', id)\n self.get_activity_stream(context, country_uuid)\n\n return render('country/country.html')\n\n def get_country(self, id):\n if group_type != self.group_type:\n abort(404, _('Incorrect group type'))\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author,\n 'schema': self._db_to_form_schema(group_type=group_type),\n 'for_view': True}\n data_dict = {'id': id}\n\n try:\n context['include_datasets'] = False\n c.group_dict = self._action('group_show')(context, data_dict)\n c.group = context['group']\n except NotFound:\n abort(404, _('Group not found'))\n except NotAuthorized:\n abort(401, _('Unauthorized to read group %s') % id)\n\n\n def get_dataset_results(self, country_id):\n upper_case_id = country_id.upper()\n top_line_results = self._get_top_line_num(upper_case_id)\n top_line_data = top_line_results.get('results', [])\n\n if not top_line_data:\n log.warn('No top line numbers found for country: {}'.format(country_id))\n\n sorted_top_line_data = sorted(top_line_data,\n key=lambda x: indicators_4_top_line.index(x['indicatorTypeCode']))\n\n c.top_line_data_list = sorted_top_line_data\n\n chart_results = self._get_chart_data(upper_case_id)\n chart_data = chart_results.get('results', [])\n if not chart_data:\n log.warn('No chart data found for country: {}'.format(country_id))\n chart_data_dict = {}\n\n # for el in chart_data:\n # ind_type = el.get('indicatorTypeCode', None)\n # if ind_type:\n # d = dt.datetime.strptime(el.get('time', ''), '%Y-%m-%d')\n # el['datetime'] = d\n # if ind_type in chart_data_dict:\n # chart_data_dict[ind_type].append(el)\n # else:\n # chart_data_dict[ind_type] = [el]\n\n for el in chart_data:\n ind_type = el.get('indicatorTypeCode', None)\n if ind_type:\n # d = dt.datetime.strptime(el.get('time', ''), '%Y-%m-%d')\n el_time = el.get('time')\n el_value = el.get('value')\n val = {\n 'date': el_time,\n 'value': el_value\n }\n\n if ind_type in chart_data_dict:\n chart_data_dict[ind_type]['data'].append(val);\n\n last_date = dt.datetime.strptime(chart_data_dict[ind_type]['lastDate'], '%Y-%m-%d')\n curr_date = dt.datetime.strptime(el_time, '%Y-%m-%d')\n\n if last_date < curr_date:\n chart_data_dict[ind_type]['lastDate'] = el_time\n chart_data_dict[ind_type]['lastValue'] = el_value\n\n else:\n newel = {\n 'title': el.get('indicatorTypeName'),\n 'sourceName': el.get('sourceName'),\n 'sourceCode': el.get('sourceCode'),\n 'lastDate': el_time,\n 'lastValue': el_value,\n 'unit': el.get('unitName'),\n 'code': ind_type,\n 'data': [val],\n 'datasetLink': '/todo/changeme',\n 'datasetUpdateDate': 'Jun 21, 1985'\n }\n chart_data_dict[ind_type] = newel\n\n\n\n # for code in chart_data_dict.keys():\n # chart_data_dict[code] = sorted(chart_data_dict[code], key=lambda x: x.get('datetime', None))\n\n for code in chart_data_dict.keys():\n chart_data_dict[code]['data'] = json.dumps(chart_data_dict[code]['data'])\n\n chart_data_list = []\n for code in indicators_4_charts:\n if code in chart_data_dict and len(chart_data_list) < 5:\n chart_data_list.append(chart_data_dict[code])\n\n c.chart_data_list = chart_data_list\n\n # c.chart_data_dict = chart_data_dict\n\n def _get_chart_data(self, country_id):\n data_dict = {\n 'sorting': 'INDICATOR_TYPE_ASC',\n 'l': country_id,\n 'it': indicators_4_charts\n }\n result = get_action('hdx_get_indicator_values')({}, data_dict)\n return result\n\n def _get_top_line_num(self, country_id):\n data_dict = {\n 'periodType': 'LATEST_YEAR',\n 'l': country_id,\n 'it': indicators_4_top_line\n }\n result = get_action('hdx_get_indicator_values')({}, data_dict)\n return result\n\n def get_activity_stream(self, context, country_id):\n act_data_dict = {'id': country_id, 'limit': 7}\n c.hdx_group_activities = get_action(\n 'hdx_get_group_activity_list')(context, act_data_dict)\n", "path": "ckanext-hdx_org_group/ckanext/hdx_org_group/controllers/country_controller.py"}]}
2,330
468
gh_patches_debug_56255
rasdani/github-patches
git_diff
litestar-org__litestar-1377
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- StaticFilesConfig and virtual directories I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems. https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `starlite/events/emitter.py` Content: ``` 1 from __future__ import annotations 2 3 from abc import ABC, abstractmethod 4 from asyncio import CancelledError, Queue, Task, create_task 5 from collections import defaultdict 6 from contextlib import suppress 7 from typing import TYPE_CHECKING, Any, DefaultDict, Sequence 8 9 import sniffio 10 11 from starlite.exceptions import ImproperlyConfiguredException 12 13 __all__ = ("BaseEventEmitterBackend", "SimpleEventEmitter") 14 15 16 if TYPE_CHECKING: 17 from starlite.events.listener import EventListener 18 19 20 class BaseEventEmitterBackend(ABC): 21 """Abstract class used to define event emitter backends.""" 22 23 __slots__ = ("listeners",) 24 25 listeners: DefaultDict[str, set[EventListener]] 26 27 def __init__(self, listeners: Sequence[EventListener]): 28 """Create an event emitter instance. 29 30 Args: 31 listeners: A list of listeners. 32 """ 33 self.listeners = defaultdict(set) 34 for listener in listeners: 35 for event_id in listener.event_ids: 36 self.listeners[event_id].add(listener) 37 38 @abstractmethod 39 def emit(self, event_id: str, *args: Any, **kwargs: Any) -> None: # pragma: no cover 40 """Emit an event to all attached listeners. 41 42 Args: 43 event_id: The ID of the event to emit, e.g 'my_event'. 44 *args: args to pass to the listener(s). 45 **kwargs: kwargs to pass to the listener(s) 46 47 Returns: 48 None 49 """ 50 raise NotImplementedError("not implemented") 51 52 @abstractmethod 53 async def on_startup(self) -> None: # pragma: no cover 54 """Hook called on application startup, used to establish connection or perform other async operations. 55 56 Returns: 57 None 58 """ 59 raise NotImplementedError("not implemented") 60 61 @abstractmethod 62 async def on_shutdown(self) -> None: # pragma: no cover 63 """Hook called on application shutdown, used to perform cleanup. 64 65 Returns: 66 None 67 """ 68 raise NotImplementedError("not implemented") 69 70 71 class SimpleEventEmitter(BaseEventEmitterBackend): 72 """Event emitter the works only in the current process""" 73 74 __slots__ = ("_queue", "_worker_task") 75 76 _worker_task: Task | None 77 78 def __init__(self, listeners: Sequence[EventListener]): 79 """Create an event emitter instance. 80 81 Args: 82 listeners: A list of listeners. 83 """ 84 super().__init__(listeners=listeners) 85 self._queue: Queue | None = None 86 self._worker_task = None 87 88 async def _worker(self) -> None: 89 """Worker that runs in a separate task and continuously pulls events from asyncio queue. 90 91 Returns: 92 None 93 """ 94 while self._queue: 95 fn, args, kwargs = await self._queue.get() 96 await fn(*args, *kwargs) 97 self._queue.task_done() 98 99 async def on_startup(self) -> None: 100 """Hook called on application startup, used to establish connection or perform other async operations. 101 102 Returns: 103 None 104 """ 105 if sniffio.current_async_library() != "asyncio": 106 return 107 108 self._queue = Queue() 109 self._worker_task = create_task(self._worker()) 110 111 async def on_shutdown(self) -> None: 112 """Hook called on application shutdown, used to perform cleanup. 113 114 Returns: 115 None 116 """ 117 118 if self._queue: 119 await self._queue.join() 120 121 if self._worker_task: 122 self._worker_task.cancel() 123 with suppress(CancelledError): 124 await self._worker_task 125 126 self._worker_task = None 127 self._queue = None 128 129 def emit(self, event_id: str, *args: Any, **kwargs: Any) -> None: 130 """Emit an event to all attached listeners. 131 132 Args: 133 event_id: The ID of the event to emit, e.g 'my_event'. 134 *args: args to pass to the listener(s). 135 **kwargs: kwargs to pass to the listener(s) 136 137 Returns: 138 None 139 """ 140 if not (self._worker_task and self._queue): 141 if sniffio.current_async_library() != "asyncio": 142 raise ImproperlyConfiguredException("{type(self).__name__} only supports 'asyncio' based event loops") 143 144 raise ImproperlyConfiguredException("Worker not running") 145 146 if listeners := self.listeners.get(event_id): 147 for listener in listeners: 148 self._queue.put_nowait((listener.fn, args, kwargs)) 149 return 150 raise ImproperlyConfiguredException(f"no event listeners are registered for event ID: {event_id}") 151 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/starlite/events/emitter.py b/starlite/events/emitter.py --- a/starlite/events/emitter.py +++ b/starlite/events/emitter.py @@ -93,7 +93,7 @@ """ while self._queue: fn, args, kwargs = await self._queue.get() - await fn(*args, *kwargs) + await fn(*args, **kwargs) self._queue.task_done() async def on_startup(self) -> None:
{"golden_diff": "diff --git a/starlite/events/emitter.py b/starlite/events/emitter.py\n--- a/starlite/events/emitter.py\n+++ b/starlite/events/emitter.py\n@@ -93,7 +93,7 @@\n \"\"\"\n while self._queue:\n fn, args, kwargs = await self._queue.get()\n- await fn(*args, *kwargs)\n+ await fn(*args, **kwargs)\n self._queue.task_done()\n \n async def on_startup(self) -> None:\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom asyncio import CancelledError, Queue, Task, create_task\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom typing import TYPE_CHECKING, Any, DefaultDict, Sequence\n\nimport sniffio\n\nfrom starlite.exceptions import ImproperlyConfiguredException\n\n__all__ = (\"BaseEventEmitterBackend\", \"SimpleEventEmitter\")\n\n\nif TYPE_CHECKING:\n from starlite.events.listener import EventListener\n\n\nclass BaseEventEmitterBackend(ABC):\n \"\"\"Abstract class used to define event emitter backends.\"\"\"\n\n __slots__ = (\"listeners\",)\n\n listeners: DefaultDict[str, set[EventListener]]\n\n def __init__(self, listeners: Sequence[EventListener]):\n \"\"\"Create an event emitter instance.\n\n Args:\n listeners: A list of listeners.\n \"\"\"\n self.listeners = defaultdict(set)\n for listener in listeners:\n for event_id in listener.event_ids:\n self.listeners[event_id].add(listener)\n\n @abstractmethod\n def emit(self, event_id: str, *args: Any, **kwargs: Any) -> None: # pragma: no cover\n \"\"\"Emit an event to all attached listeners.\n\n Args:\n event_id: The ID of the event to emit, e.g 'my_event'.\n *args: args to pass to the listener(s).\n **kwargs: kwargs to pass to the listener(s)\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(\"not implemented\")\n\n @abstractmethod\n async def on_startup(self) -> None: # pragma: no cover\n \"\"\"Hook called on application startup, used to establish connection or perform other async operations.\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(\"not implemented\")\n\n @abstractmethod\n async def on_shutdown(self) -> None: # pragma: no cover\n \"\"\"Hook called on application shutdown, used to perform cleanup.\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(\"not implemented\")\n\n\nclass SimpleEventEmitter(BaseEventEmitterBackend):\n \"\"\"Event emitter the works only in the current process\"\"\"\n\n __slots__ = (\"_queue\", \"_worker_task\")\n\n _worker_task: Task | None\n\n def __init__(self, listeners: Sequence[EventListener]):\n \"\"\"Create an event emitter instance.\n\n Args:\n listeners: A list of listeners.\n \"\"\"\n super().__init__(listeners=listeners)\n self._queue: Queue | None = None\n self._worker_task = None\n\n async def _worker(self) -> None:\n \"\"\"Worker that runs in a separate task and continuously pulls events from asyncio queue.\n\n Returns:\n None\n \"\"\"\n while self._queue:\n fn, args, kwargs = await self._queue.get()\n await fn(*args, *kwargs)\n self._queue.task_done()\n\n async def on_startup(self) -> None:\n \"\"\"Hook called on application startup, used to establish connection or perform other async operations.\n\n Returns:\n None\n \"\"\"\n if sniffio.current_async_library() != \"asyncio\":\n return\n\n self._queue = Queue()\n self._worker_task = create_task(self._worker())\n\n async def on_shutdown(self) -> None:\n \"\"\"Hook called on application shutdown, used to perform cleanup.\n\n Returns:\n None\n \"\"\"\n\n if self._queue:\n await self._queue.join()\n\n if self._worker_task:\n self._worker_task.cancel()\n with suppress(CancelledError):\n await self._worker_task\n\n self._worker_task = None\n self._queue = None\n\n def emit(self, event_id: str, *args: Any, **kwargs: Any) -> None:\n \"\"\"Emit an event to all attached listeners.\n\n Args:\n event_id: The ID of the event to emit, e.g 'my_event'.\n *args: args to pass to the listener(s).\n **kwargs: kwargs to pass to the listener(s)\n\n Returns:\n None\n \"\"\"\n if not (self._worker_task and self._queue):\n if sniffio.current_async_library() != \"asyncio\":\n raise ImproperlyConfiguredException(\"{type(self).__name__} only supports 'asyncio' based event loops\")\n\n raise ImproperlyConfiguredException(\"Worker not running\")\n\n if listeners := self.listeners.get(event_id):\n for listener in listeners:\n self._queue.put_nowait((listener.fn, args, kwargs))\n return\n raise ImproperlyConfiguredException(f\"no event listeners are registered for event ID: {event_id}\")\n", "path": "starlite/events/emitter.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom abc import ABC, abstractmethod\nfrom asyncio import CancelledError, Queue, Task, create_task\nfrom collections import defaultdict\nfrom contextlib import suppress\nfrom typing import TYPE_CHECKING, Any, DefaultDict, Sequence\n\nimport sniffio\n\nfrom starlite.exceptions import ImproperlyConfiguredException\n\n__all__ = (\"BaseEventEmitterBackend\", \"SimpleEventEmitter\")\n\n\nif TYPE_CHECKING:\n from starlite.events.listener import EventListener\n\n\nclass BaseEventEmitterBackend(ABC):\n \"\"\"Abstract class used to define event emitter backends.\"\"\"\n\n __slots__ = (\"listeners\",)\n\n listeners: DefaultDict[str, set[EventListener]]\n\n def __init__(self, listeners: Sequence[EventListener]):\n \"\"\"Create an event emitter instance.\n\n Args:\n listeners: A list of listeners.\n \"\"\"\n self.listeners = defaultdict(set)\n for listener in listeners:\n for event_id in listener.event_ids:\n self.listeners[event_id].add(listener)\n\n @abstractmethod\n def emit(self, event_id: str, *args: Any, **kwargs: Any) -> None: # pragma: no cover\n \"\"\"Emit an event to all attached listeners.\n\n Args:\n event_id: The ID of the event to emit, e.g 'my_event'.\n *args: args to pass to the listener(s).\n **kwargs: kwargs to pass to the listener(s)\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(\"not implemented\")\n\n @abstractmethod\n async def on_startup(self) -> None: # pragma: no cover\n \"\"\"Hook called on application startup, used to establish connection or perform other async operations.\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(\"not implemented\")\n\n @abstractmethod\n async def on_shutdown(self) -> None: # pragma: no cover\n \"\"\"Hook called on application shutdown, used to perform cleanup.\n\n Returns:\n None\n \"\"\"\n raise NotImplementedError(\"not implemented\")\n\n\nclass SimpleEventEmitter(BaseEventEmitterBackend):\n \"\"\"Event emitter the works only in the current process\"\"\"\n\n __slots__ = (\"_queue\", \"_worker_task\")\n\n _worker_task: Task | None\n\n def __init__(self, listeners: Sequence[EventListener]):\n \"\"\"Create an event emitter instance.\n\n Args:\n listeners: A list of listeners.\n \"\"\"\n super().__init__(listeners=listeners)\n self._queue: Queue | None = None\n self._worker_task = None\n\n async def _worker(self) -> None:\n \"\"\"Worker that runs in a separate task and continuously pulls events from asyncio queue.\n\n Returns:\n None\n \"\"\"\n while self._queue:\n fn, args, kwargs = await self._queue.get()\n await fn(*args, **kwargs)\n self._queue.task_done()\n\n async def on_startup(self) -> None:\n \"\"\"Hook called on application startup, used to establish connection or perform other async operations.\n\n Returns:\n None\n \"\"\"\n if sniffio.current_async_library() != \"asyncio\":\n return\n\n self._queue = Queue()\n self._worker_task = create_task(self._worker())\n\n async def on_shutdown(self) -> None:\n \"\"\"Hook called on application shutdown, used to perform cleanup.\n\n Returns:\n None\n \"\"\"\n\n if self._queue:\n await self._queue.join()\n\n if self._worker_task:\n self._worker_task.cancel()\n with suppress(CancelledError):\n await self._worker_task\n\n self._worker_task = None\n self._queue = None\n\n def emit(self, event_id: str, *args: Any, **kwargs: Any) -> None:\n \"\"\"Emit an event to all attached listeners.\n\n Args:\n event_id: The ID of the event to emit, e.g 'my_event'.\n *args: args to pass to the listener(s).\n **kwargs: kwargs to pass to the listener(s)\n\n Returns:\n None\n \"\"\"\n if not (self._worker_task and self._queue):\n if sniffio.current_async_library() != \"asyncio\":\n raise ImproperlyConfiguredException(\"{type(self).__name__} only supports 'asyncio' based event loops\")\n\n raise ImproperlyConfiguredException(\"Worker not running\")\n\n if listeners := self.listeners.get(event_id):\n for listener in listeners:\n self._queue.put_nowait((listener.fn, args, kwargs))\n return\n raise ImproperlyConfiguredException(f\"no event listeners are registered for event ID: {event_id}\")\n", "path": "starlite/events/emitter.py"}]}
1,767
107
gh_patches_debug_19682
rasdani/github-patches
git_diff
dotkom__onlineweb4-325
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- All strings should be unicode I'm quite sure there are some strings still missing the u. Should have a look at this as it can cause trouble. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `apps/authentication/models.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import datetime 4 5 from django.conf import settings 6 from django.contrib.auth.models import AbstractUser 7 from django.db import models 8 from django.utils.translation import ugettext as _ 9 10 11 # If this list is changed, remember to check that the year property on 12 # OnlineUser is still correct! 13 FIELD_OF_STUDY_CHOICES = [ 14 (0, _(u'Gjest')), 15 (1, _(u'Bachelor i Informatikk (BIT)')), 16 # master degrees take up the interval [10,30> 17 (10, _(u'Software (SW)')), 18 (11, _(u'Informasjonsforvaltning (DIF)')), 19 (12, _(u'Komplekse Datasystemer (KDS)')), 20 (13, _(u'Spillteknologi (SPT)')), 21 (14, _(u'Intelligente Systemer (IRS)')), 22 (15, _(u'Helseinformatikk (MSMEDTEK)')), 23 (30, _(u'Annen mastergrad')), 24 (80, _(u'PhD')), 25 (90, _(u'International')), 26 (100, _(u'Annet Onlinemedlem')), 27 ] 28 29 class OnlineUser(AbstractUser): 30 31 IMAGE_FOLDER = "images/profiles" 32 IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png'] 33 34 # Online related fields 35 field_of_study = models.SmallIntegerField(_(u"studieretning"), choices=FIELD_OF_STUDY_CHOICES, default=0) 36 started_date = models.DateField(_(u"startet studie"), default=datetime.datetime.now()) 37 compiled = models.BooleanField(_(u"kompilert"), default=False) 38 39 # Email 40 infomail = models.BooleanField(_(u"vil ha infomail"), default=True) 41 42 # Address 43 phone_number = models.CharField(_(u"telefonnummer"), max_length=20, blank=True, null=True) 44 address = models.CharField(_(u"adresse"), max_length=30, blank=True, null=True) 45 zip_code = models.CharField(_(u"postnummer"), max_length=4, blank=True, null=True) 46 47 # Other 48 allergies = models.TextField(_(u"allergier"), blank=True, null=True) 49 mark_rules = models.BooleanField(_(u"godtatt prikkeregler"), default=False) 50 rfid = models.CharField(_(u"RFID"), max_length=50, blank=True, null=True) 51 nickname = models.CharField(_(u"nickname"), max_length=50, blank=True, null=True) 52 website = models.CharField(_(u"hjemmeside"), max_length=50, blank=True, null=True) 53 54 image = models.ImageField(_(u"bilde"), max_length=200, upload_to=IMAGE_FOLDER, blank=True, null=True, 55 default=settings.DEFAULT_PROFILE_PICTURE_URL) 56 57 # NTNU credentials 58 ntnu_username = models.CharField(_(u"NTNU-brukernavn"), max_length=10, blank=True, null=True) 59 60 # TODO profile pictures 61 # TODO checkbox for forwarding of @online.ntnu.no mail 62 63 @property 64 def is_member(self): 65 """ 66 Returns true if the User object is associated with Online. 67 """ 68 if AllowedUsername.objects.filter(username=self.ntnu_username).filter(expiration_date__gte=datetime.datetime.now()).count() > 0: 69 return True 70 return False 71 72 def get_full_name(self): 73 """ 74 Returns the first_name plus the last_name, with a space in between. 75 """ 76 full_name = u'%s %s' % (self.first_name, self.last_name) 77 return full_name.strip() 78 79 def get_email(self): 80 return self.get_emails().filter(primary = True)[0] 81 82 def get_emails(self): 83 return Email.objects.all().filter(user = self) 84 85 @property 86 def year(self): 87 today = datetime.datetime.now().date() 88 started = self.started_date 89 90 # We say that a year is 360 days incase we are a bit slower to 91 # add users one year. 92 year = ((today - started).days / 360) + 1 93 94 if self.field_of_study == 0 or self.field_of_study == 100: # others 95 return 0 96 # dont return a bachelor student as 4th or 5th grade 97 elif self.field_of_study == 1: # bachelor 98 if year > 3: 99 return 3 100 return year 101 elif 9 < self.field_of_study < 30: # 10-29 is considered master 102 if year >= 2: 103 return 5 104 return 4 105 elif self.field_of_study == 80: # phd 106 return year + 5 107 elif self.field_of_study == 90: # international 108 if year == 1: 109 return 1 110 return 4 111 112 def __unicode__(self): 113 return self.username 114 115 class Meta: 116 verbose_name = _(u"brukerprofil") 117 verbose_name_plural = _(u"brukerprofiler") 118 119 120 class Email(models.Model): 121 user = models.ForeignKey(OnlineUser, related_name="email_user") 122 email = models.EmailField(_(u"epostadresse"), unique=True) 123 primary = models.BooleanField(_(u"aktiv"), default=False) 124 verified = models.BooleanField(_(u"verifisert"), default=False) 125 126 def __unicode__(self): 127 return self.email 128 129 class Meta: 130 verbose_name = _(u"epostadresse") 131 verbose_name_plural = _(u"epostadresser") 132 133 134 class RegisterToken(models.Model): 135 user = models.ForeignKey(OnlineUser, related_name="register_user") 136 email = models.EmailField(_("epost"), max_length=254) 137 token = models.CharField(_("token"), max_length=32) 138 created = models.DateTimeField(_("opprettet dato"), editable=False, auto_now_add=True, default=datetime.datetime.now()) 139 140 @property 141 def is_valid(self): 142 valid_period = datetime.timedelta(days=1) 143 now = datetime.datetime.now() 144 return now < self.created + valid_period 145 146 147 class AllowedUsername(models.Model): 148 """ 149 Holds usernames that are considered valid members of Online and the time they expire. 150 """ 151 username = models.CharField(_(u"brukernavn"), max_length=10) 152 registered = models.DateField(_(u"registrert")) 153 note = models.CharField(_(u"notat"), max_length=100) 154 description = models.TextField(_(u"beskrivelse"), blank=True, null=True) 155 expiration_date = models.DateField(_(u"utløpsdato")) 156 157 @property 158 def is_active(self): 159 return datetime.datetime.now() < self.expiration_date 160 161 def __unicode__(self): 162 return self.username 163 164 class Meta: 165 verbose_name = _("tillatt brukernavn") 166 verbose_name_plural = _("tillatte brukernavn") 167 ordering = ("username",) 168 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/apps/authentication/models.py b/apps/authentication/models.py --- a/apps/authentication/models.py +++ b/apps/authentication/models.py @@ -133,9 +133,9 @@ class RegisterToken(models.Model): user = models.ForeignKey(OnlineUser, related_name="register_user") - email = models.EmailField(_("epost"), max_length=254) - token = models.CharField(_("token"), max_length=32) - created = models.DateTimeField(_("opprettet dato"), editable=False, auto_now_add=True, default=datetime.datetime.now()) + email = models.EmailField(_(u"epost"), max_length=254) + token = models.CharField(_(u"token"), max_length=32) + created = models.DateTimeField(_(u"opprettet dato"), editable=False, auto_now_add=True, default=datetime.datetime.now()) @property def is_valid(self): @@ -162,6 +162,6 @@ return self.username class Meta: - verbose_name = _("tillatt brukernavn") - verbose_name_plural = _("tillatte brukernavn") - ordering = ("username",) + verbose_name = _(u"tillatt brukernavn") + verbose_name_plural = _(u"tillatte brukernavn") + ordering = (u"username",)
{"golden_diff": "diff --git a/apps/authentication/models.py b/apps/authentication/models.py\n--- a/apps/authentication/models.py\n+++ b/apps/authentication/models.py\n@@ -133,9 +133,9 @@\n \n class RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n- email = models.EmailField(_(\"epost\"), max_length=254)\n- token = models.CharField(_(\"token\"), max_length=32)\n- created = models.DateTimeField(_(\"opprettet dato\"), editable=False, auto_now_add=True, default=datetime.datetime.now())\n+ email = models.EmailField(_(u\"epost\"), max_length=254)\n+ token = models.CharField(_(u\"token\"), max_length=32)\n+ created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True, default=datetime.datetime.now())\n \n @property\n def is_valid(self):\n@@ -162,6 +162,6 @@\n return self.username\n \n class Meta:\n- verbose_name = _(\"tillatt brukernavn\")\n- verbose_name_plural = _(\"tillatte brukernavn\")\n- ordering = (\"username\",)\n+ verbose_name = _(u\"tillatt brukernavn\")\n+ verbose_name_plural = _(u\"tillatte brukernavn\")\n+ ordering = (u\"username\",)\n", "issue": "All strings should be unicode\nI'm quite sure there are some strings still missing the u. Should have a look at this as it can cause trouble. \n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30>\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=datetime.datetime.now())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=30, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.CharField(_(u\"hjemmeside\"), max_length=50, blank=True, null=True)\n\n image = models.ImageField(_(u\"bilde\"), max_length=200, upload_to=IMAGE_FOLDER, blank=True, null=True,\n default=settings.DEFAULT_PROFILE_PICTURE_URL)\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True)\n\n # TODO profile pictures\n # TODO checkbox for forwarding of @online.ntnu.no mail\n \n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if AllowedUsername.objects.filter(username=self.ntnu_username).filter(expiration_date__gte=datetime.datetime.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n return self.get_emails().filter(primary = True)[0]\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n @property\n def year(self):\n today = datetime.datetime.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 9 < self.field_of_study < 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"aktiv\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(\"epost\"), max_length=254)\n token = models.CharField(_(\"token\"), max_length=32)\n created = models.DateTimeField(_(\"opprettet dato\"), editable=False, auto_now_add=True, default=datetime.datetime.now())\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = datetime.datetime.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"brukernavn\"), max_length=10)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utl\u00f8psdato\"))\n\n @property\n def is_active(self):\n return datetime.datetime.now() < self.expiration_date\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(\"tillatt brukernavn\")\n verbose_name_plural = _(\"tillatte brukernavn\")\n ordering = (\"username\",)\n", "path": "apps/authentication/models.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport datetime\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import AbstractUser\nfrom django.db import models\nfrom django.utils.translation import ugettext as _\n\n\n# If this list is changed, remember to check that the year property on\n# OnlineUser is still correct!\nFIELD_OF_STUDY_CHOICES = [\n (0, _(u'Gjest')),\n (1, _(u'Bachelor i Informatikk (BIT)')),\n # master degrees take up the interval [10,30>\n (10, _(u'Software (SW)')),\n (11, _(u'Informasjonsforvaltning (DIF)')),\n (12, _(u'Komplekse Datasystemer (KDS)')),\n (13, _(u'Spillteknologi (SPT)')),\n (14, _(u'Intelligente Systemer (IRS)')),\n (15, _(u'Helseinformatikk (MSMEDTEK)')),\n (30, _(u'Annen mastergrad')),\n (80, _(u'PhD')),\n (90, _(u'International')),\n (100, _(u'Annet Onlinemedlem')),\n]\n\nclass OnlineUser(AbstractUser):\n\n IMAGE_FOLDER = \"images/profiles\"\n IMAGE_EXTENSIONS = ['.jpg', '.jpeg', '.gif', '.png']\n \n # Online related fields\n field_of_study = models.SmallIntegerField(_(u\"studieretning\"), choices=FIELD_OF_STUDY_CHOICES, default=0)\n started_date = models.DateField(_(u\"startet studie\"), default=datetime.datetime.now())\n compiled = models.BooleanField(_(u\"kompilert\"), default=False)\n\n # Email\n infomail = models.BooleanField(_(u\"vil ha infomail\"), default=True)\n\n # Address\n phone_number = models.CharField(_(u\"telefonnummer\"), max_length=20, blank=True, null=True)\n address = models.CharField(_(u\"adresse\"), max_length=30, blank=True, null=True)\n zip_code = models.CharField(_(u\"postnummer\"), max_length=4, blank=True, null=True)\n\n # Other\n allergies = models.TextField(_(u\"allergier\"), blank=True, null=True)\n mark_rules = models.BooleanField(_(u\"godtatt prikkeregler\"), default=False)\n rfid = models.CharField(_(u\"RFID\"), max_length=50, blank=True, null=True)\n nickname = models.CharField(_(u\"nickname\"), max_length=50, blank=True, null=True)\n website = models.CharField(_(u\"hjemmeside\"), max_length=50, blank=True, null=True)\n\n image = models.ImageField(_(u\"bilde\"), max_length=200, upload_to=IMAGE_FOLDER, blank=True, null=True,\n default=settings.DEFAULT_PROFILE_PICTURE_URL)\n\n # NTNU credentials\n ntnu_username = models.CharField(_(u\"NTNU-brukernavn\"), max_length=10, blank=True, null=True)\n\n # TODO profile pictures\n # TODO checkbox for forwarding of @online.ntnu.no mail\n \n @property\n def is_member(self):\n \"\"\"\n Returns true if the User object is associated with Online.\n \"\"\"\n if AllowedUsername.objects.filter(username=self.ntnu_username).filter(expiration_date__gte=datetime.datetime.now()).count() > 0:\n return True\n return False\n\n def get_full_name(self):\n \"\"\"\n Returns the first_name plus the last_name, with a space in between.\n \"\"\"\n full_name = u'%s %s' % (self.first_name, self.last_name)\n return full_name.strip()\n\n def get_email(self):\n return self.get_emails().filter(primary = True)[0]\n\n def get_emails(self):\n return Email.objects.all().filter(user = self)\n\n @property\n def year(self):\n today = datetime.datetime.now().date()\n started = self.started_date\n\n # We say that a year is 360 days incase we are a bit slower to\n # add users one year.\n year = ((today - started).days / 360) + 1\n\n if self.field_of_study == 0 or self.field_of_study == 100: # others\n return 0\n # dont return a bachelor student as 4th or 5th grade\n elif self.field_of_study == 1: # bachelor\n if year > 3:\n return 3\n return year\n elif 9 < self.field_of_study < 30: # 10-29 is considered master\n if year >= 2:\n return 5\n return 4\n elif self.field_of_study == 80: # phd\n return year + 5\n elif self.field_of_study == 90: # international\n if year == 1:\n return 1\n return 4\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"brukerprofil\")\n verbose_name_plural = _(u\"brukerprofiler\")\n\n\nclass Email(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"email_user\")\n email = models.EmailField(_(u\"epostadresse\"), unique=True)\n primary = models.BooleanField(_(u\"aktiv\"), default=False)\n verified = models.BooleanField(_(u\"verifisert\"), default=False)\n\n def __unicode__(self):\n return self.email\n\n class Meta:\n verbose_name = _(u\"epostadresse\")\n verbose_name_plural = _(u\"epostadresser\")\n\n\nclass RegisterToken(models.Model):\n user = models.ForeignKey(OnlineUser, related_name=\"register_user\")\n email = models.EmailField(_(u\"epost\"), max_length=254)\n token = models.CharField(_(u\"token\"), max_length=32)\n created = models.DateTimeField(_(u\"opprettet dato\"), editable=False, auto_now_add=True, default=datetime.datetime.now())\n\n @property\n def is_valid(self):\n valid_period = datetime.timedelta(days=1)\n now = datetime.datetime.now()\n return now < self.created + valid_period \n\n\nclass AllowedUsername(models.Model):\n \"\"\"\n Holds usernames that are considered valid members of Online and the time they expire.\n \"\"\"\n username = models.CharField(_(u\"brukernavn\"), max_length=10)\n registered = models.DateField(_(u\"registrert\"))\n note = models.CharField(_(u\"notat\"), max_length=100)\n description = models.TextField(_(u\"beskrivelse\"), blank=True, null=True)\n expiration_date = models.DateField(_(u\"utl\u00f8psdato\"))\n\n @property\n def is_active(self):\n return datetime.datetime.now() < self.expiration_date\n\n def __unicode__(self):\n return self.username\n\n class Meta:\n verbose_name = _(u\"tillatt brukernavn\")\n verbose_name_plural = _(u\"tillatte brukernavn\")\n ordering = (u\"username\",)\n", "path": "apps/authentication/models.py"}]}
2,254
301
gh_patches_debug_5646
rasdani/github-patches
git_diff
napari__napari-5726
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 3D camera rotation is broken ## 🐛 Bug It seems we introduced a bug in #5701, according to `git bisect`. The 3D camera rotation is "absolute" instead of relative (i.e: the point you press on the canvas determines the starting point of that rotation). ## To Reproduce ```py import napari import numpy as np v = napari.Viewer(ndisplay=3) pl = v.add_points(np.random.rand(10, 3) * 100) ``` Then move the camera a few times. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `napari/_vispy/camera.py` Content: ``` 1 from typing import Type 2 3 import numpy as np 4 from vispy.scene import ArcballCamera, BaseCamera, PanZoomCamera 5 6 from napari._vispy.utils.quaternion import quaternion2euler 7 8 9 class VispyCamera: 10 """Vipsy camera for both 2D and 3D rendering. 11 12 Parameters 13 ---------- 14 view : vispy.scene.widgets.viewbox.ViewBox 15 Viewbox for current scene. 16 camera : napari.components.Camera 17 napari camera model. 18 dims : napari.components.Dims 19 napari dims model. 20 """ 21 22 def __init__(self, view, camera, dims) -> None: 23 self._view = view 24 self._camera = camera 25 self._dims = dims 26 27 # Create 2D camera 28 self._2D_camera = MouseToggledPanZoomCamera(aspect=1) 29 # flip y-axis to have correct alignment 30 self._2D_camera.flip = (0, 1, 0) 31 self._2D_camera.viewbox_key_event = viewbox_key_event 32 33 # Create 3D camera 34 self._3D_camera = MouseToggledArcballCamera(fov=0) 35 self._3D_camera.viewbox_key_event = viewbox_key_event 36 37 # Set 2D camera by default 38 self._view.camera = self._2D_camera 39 40 self._dims.events.ndisplay.connect( 41 self._on_ndisplay_change, position='first' 42 ) 43 44 self._camera.events.center.connect(self._on_center_change) 45 self._camera.events.zoom.connect(self._on_zoom_change) 46 self._camera.events.angles.connect(self._on_angles_change) 47 self._camera.events.perspective.connect(self._on_perspective_change) 48 self._camera.events.mouse_pan.connect(self._on_mouse_toggles_change) 49 self._camera.events.mouse_zoom.connect(self._on_mouse_toggles_change) 50 51 self._on_ndisplay_change() 52 53 @property 54 def angles(self): 55 """3-tuple: Euler angles of camera in 3D viewing, in degrees. 56 Note that angles might be different than the ones that might have generated the quaternion. 57 """ 58 59 if self._view.camera == self._3D_camera: 60 # Do conversion from quaternion representation to euler angles 61 angles = quaternion2euler( 62 self._view.camera._quaternion, degrees=True 63 ) 64 else: 65 angles = (0, 0, 90) 66 return angles 67 68 @angles.setter 69 def angles(self, angles): 70 if self.angles == tuple(angles): 71 return 72 73 # Only update angles if current camera is 3D camera 74 if self._view.camera == self._3D_camera: 75 # Create and set quaternion 76 quat = self._view.camera._quaternion.create_from_euler_angles( 77 *angles, 78 degrees=True, 79 ) 80 self._view.camera._quaternion = quat 81 self._view.camera.view_changed() 82 83 @property 84 def center(self): 85 """tuple: Center point of camera view for 2D or 3D viewing.""" 86 if self._view.camera == self._3D_camera: 87 center = tuple(self._view.camera.center) 88 else: 89 # in 2D, we arbitrarily choose 0.0 as the center in z 90 center = (*self._view.camera.center[:2], 0.0) 91 # switch from VisPy xyz ordering to NumPy prc ordering 92 return center[::-1] 93 94 @center.setter 95 def center(self, center): 96 if self.center == tuple(center): 97 return 98 self._view.camera.center = center[::-1] 99 self._view.camera.view_changed() 100 101 @property 102 def zoom(self): 103 """float: Scale from canvas pixels to world pixels.""" 104 canvas_size = np.array(self._view.canvas.size) 105 if self._view.camera == self._3D_camera: 106 # For fov = 0.0 normalize scale factor by canvas size to get scale factor. 107 # Note that the scaling is stored in the `_projection` property of the 108 # camera which is updated in vispy here 109 # https://github.com/vispy/vispy/blob/v0.6.5/vispy/scene/cameras/perspective.py#L301-L313 110 scale = self._view.camera.scale_factor 111 else: 112 scale = np.array( 113 [self._view.camera.rect.width, self._view.camera.rect.height] 114 ) 115 scale[np.isclose(scale, 0)] = 1 # fix for #2875 116 zoom = np.min(canvas_size / scale) 117 return zoom 118 119 @zoom.setter 120 def zoom(self, zoom): 121 if self.zoom == zoom: 122 return 123 scale = np.array(self._view.canvas.size) / zoom 124 if self._view.camera == self._3D_camera: 125 self._view.camera.scale_factor = np.min(scale) 126 else: 127 # Set view rectangle, as left, right, width, height 128 corner = np.subtract(self._view.camera.center[:2], scale / 2) 129 self._view.camera.rect = tuple(corner) + tuple(scale) 130 131 @property 132 def perspective(self): 133 """Field of view of camera (only visible in 3D mode).""" 134 return self._3D_camera.fov 135 136 @perspective.setter 137 def perspective(self, perspective): 138 if self.perspective == perspective: 139 return 140 self._3D_camera.fov = perspective 141 self._view.camera.view_changed() 142 143 @property 144 def mouse_zoom(self) -> bool: 145 return self._view.camera.mouse_zoom 146 147 @mouse_zoom.setter 148 def mouse_zoom(self, mouse_zoom: bool): 149 self._view.camera.mouse_zoom = mouse_zoom 150 151 @property 152 def mouse_pan(self) -> bool: 153 return self._view.camera.mouse_pan 154 155 @mouse_pan.setter 156 def mouse_pan(self, mouse_pan: bool): 157 self._view.camera.mouse_pan = mouse_pan 158 159 def _on_ndisplay_change(self): 160 if self._dims.ndisplay == 3: 161 self._view.camera = self._3D_camera 162 else: 163 self._view.camera = self._2D_camera 164 165 self._on_mouse_toggles_change() 166 self._on_center_change() 167 self._on_zoom_change() 168 self._on_angles_change() 169 170 def _on_mouse_toggles_change(self): 171 self.mouse_pan = self._camera.mouse_pan 172 self.mouse_zoom = self._camera.mouse_zoom 173 174 def _on_center_change(self): 175 self.center = self._camera.center[-self._dims.ndisplay :] 176 177 def _on_zoom_change(self): 178 self.zoom = self._camera.zoom 179 180 def _on_perspective_change(self): 181 self.perspective = self._camera.perspective 182 183 def _on_angles_change(self): 184 self.angles = self._camera.angles 185 186 def on_draw(self, _event): 187 """Called whenever the canvas is drawn. 188 189 Update camera model angles, center, and zoom. 190 """ 191 with self._camera.events.angles.blocker(self._on_angles_change): 192 self._camera.angles = self.angles 193 with self._camera.events.center.blocker(self._on_center_change): 194 self._camera.center = self.center 195 with self._camera.events.zoom.blocker(self._on_zoom_change): 196 self._camera.zoom = self.zoom 197 with self._camera.events.perspective.blocker( 198 self._on_perspective_change 199 ): 200 self._camera.perspective = self.perspective 201 202 203 def viewbox_key_event(event): 204 """ViewBox key event handler. 205 206 Parameters 207 ---------- 208 event : vispy.util.event.Event 209 The vispy event that triggered this method. 210 """ 211 return 212 213 214 def add_mouse_pan_zoom_toggles( 215 vispy_camera_cls: Type[BaseCamera], 216 ) -> Type[BaseCamera]: 217 """Add separate mouse pan and mouse zoom toggles to VisPy. 218 219 By default, VisPy uses an ``interactive`` toggle that turns *both* 220 panning and zooming on and off. This decorator adds separate toggles, 221 ``mouse_pan`` and ``mouse_zoom``, to enable controlling them 222 separately. 223 224 Parameters 225 ---------- 226 vispy_camera_cls : Type[vispy.scene.cameras.BaseCamera] 227 A VisPy camera class to decorate. 228 229 Returns 230 ------- 231 A decorated VisPy camera class. 232 """ 233 234 class _vispy_camera_cls(vispy_camera_cls): 235 def __init__(self, **kwargs): 236 super().__init__(**kwargs) 237 self.mouse_pan = True 238 self.mouse_zoom = True 239 240 def viewbox_mouse_event(self, event): 241 if ( 242 self.mouse_zoom 243 and event.type == 'mouse_wheel' 244 or self.mouse_pan 245 and event.type in ('mouse_move', 'mouse_press') 246 ): 247 super().viewbox_mouse_event(event) 248 else: 249 event.handled = False 250 251 return _vispy_camera_cls 252 253 254 MouseToggledPanZoomCamera = add_mouse_pan_zoom_toggles(PanZoomCamera) 255 MouseToggledArcballCamera = add_mouse_pan_zoom_toggles(ArcballCamera) 256 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/napari/_vispy/camera.py b/napari/_vispy/camera.py --- a/napari/_vispy/camera.py +++ b/napari/_vispy/camera.py @@ -242,7 +242,8 @@ self.mouse_zoom and event.type == 'mouse_wheel' or self.mouse_pan - and event.type in ('mouse_move', 'mouse_press') + and event.type + in ('mouse_move', 'mouse_press', 'mouse_release') ): super().viewbox_mouse_event(event) else:
{"golden_diff": "diff --git a/napari/_vispy/camera.py b/napari/_vispy/camera.py\n--- a/napari/_vispy/camera.py\n+++ b/napari/_vispy/camera.py\n@@ -242,7 +242,8 @@\n self.mouse_zoom\n and event.type == 'mouse_wheel'\n or self.mouse_pan\n- and event.type in ('mouse_move', 'mouse_press')\n+ and event.type\n+ in ('mouse_move', 'mouse_press', 'mouse_release')\n ):\n super().viewbox_mouse_event(event)\n else:\n", "issue": "3D camera rotation is broken\n## \ud83d\udc1b Bug\r\n\r\nIt seems we introduced a bug in #5701, according to `git bisect`. The 3D camera rotation is \"absolute\" instead of relative (i.e: the point you press on the canvas determines the starting point of that rotation).\r\n\r\n## To Reproduce\r\n\r\n```py\r\nimport napari\r\nimport numpy as np\r\nv = napari.Viewer(ndisplay=3)\r\npl = v.add_points(np.random.rand(10, 3) * 100)\r\n```\r\n\r\nThen move the camera a few times.\n", "before_files": [{"content": "from typing import Type\n\nimport numpy as np\nfrom vispy.scene import ArcballCamera, BaseCamera, PanZoomCamera\n\nfrom napari._vispy.utils.quaternion import quaternion2euler\n\n\nclass VispyCamera:\n \"\"\"Vipsy camera for both 2D and 3D rendering.\n\n Parameters\n ----------\n view : vispy.scene.widgets.viewbox.ViewBox\n Viewbox for current scene.\n camera : napari.components.Camera\n napari camera model.\n dims : napari.components.Dims\n napari dims model.\n \"\"\"\n\n def __init__(self, view, camera, dims) -> None:\n self._view = view\n self._camera = camera\n self._dims = dims\n\n # Create 2D camera\n self._2D_camera = MouseToggledPanZoomCamera(aspect=1)\n # flip y-axis to have correct alignment\n self._2D_camera.flip = (0, 1, 0)\n self._2D_camera.viewbox_key_event = viewbox_key_event\n\n # Create 3D camera\n self._3D_camera = MouseToggledArcballCamera(fov=0)\n self._3D_camera.viewbox_key_event = viewbox_key_event\n\n # Set 2D camera by default\n self._view.camera = self._2D_camera\n\n self._dims.events.ndisplay.connect(\n self._on_ndisplay_change, position='first'\n )\n\n self._camera.events.center.connect(self._on_center_change)\n self._camera.events.zoom.connect(self._on_zoom_change)\n self._camera.events.angles.connect(self._on_angles_change)\n self._camera.events.perspective.connect(self._on_perspective_change)\n self._camera.events.mouse_pan.connect(self._on_mouse_toggles_change)\n self._camera.events.mouse_zoom.connect(self._on_mouse_toggles_change)\n\n self._on_ndisplay_change()\n\n @property\n def angles(self):\n \"\"\"3-tuple: Euler angles of camera in 3D viewing, in degrees.\n Note that angles might be different than the ones that might have generated the quaternion.\n \"\"\"\n\n if self._view.camera == self._3D_camera:\n # Do conversion from quaternion representation to euler angles\n angles = quaternion2euler(\n self._view.camera._quaternion, degrees=True\n )\n else:\n angles = (0, 0, 90)\n return angles\n\n @angles.setter\n def angles(self, angles):\n if self.angles == tuple(angles):\n return\n\n # Only update angles if current camera is 3D camera\n if self._view.camera == self._3D_camera:\n # Create and set quaternion\n quat = self._view.camera._quaternion.create_from_euler_angles(\n *angles,\n degrees=True,\n )\n self._view.camera._quaternion = quat\n self._view.camera.view_changed()\n\n @property\n def center(self):\n \"\"\"tuple: Center point of camera view for 2D or 3D viewing.\"\"\"\n if self._view.camera == self._3D_camera:\n center = tuple(self._view.camera.center)\n else:\n # in 2D, we arbitrarily choose 0.0 as the center in z\n center = (*self._view.camera.center[:2], 0.0)\n # switch from VisPy xyz ordering to NumPy prc ordering\n return center[::-1]\n\n @center.setter\n def center(self, center):\n if self.center == tuple(center):\n return\n self._view.camera.center = center[::-1]\n self._view.camera.view_changed()\n\n @property\n def zoom(self):\n \"\"\"float: Scale from canvas pixels to world pixels.\"\"\"\n canvas_size = np.array(self._view.canvas.size)\n if self._view.camera == self._3D_camera:\n # For fov = 0.0 normalize scale factor by canvas size to get scale factor.\n # Note that the scaling is stored in the `_projection` property of the\n # camera which is updated in vispy here\n # https://github.com/vispy/vispy/blob/v0.6.5/vispy/scene/cameras/perspective.py#L301-L313\n scale = self._view.camera.scale_factor\n else:\n scale = np.array(\n [self._view.camera.rect.width, self._view.camera.rect.height]\n )\n scale[np.isclose(scale, 0)] = 1 # fix for #2875\n zoom = np.min(canvas_size / scale)\n return zoom\n\n @zoom.setter\n def zoom(self, zoom):\n if self.zoom == zoom:\n return\n scale = np.array(self._view.canvas.size) / zoom\n if self._view.camera == self._3D_camera:\n self._view.camera.scale_factor = np.min(scale)\n else:\n # Set view rectangle, as left, right, width, height\n corner = np.subtract(self._view.camera.center[:2], scale / 2)\n self._view.camera.rect = tuple(corner) + tuple(scale)\n\n @property\n def perspective(self):\n \"\"\"Field of view of camera (only visible in 3D mode).\"\"\"\n return self._3D_camera.fov\n\n @perspective.setter\n def perspective(self, perspective):\n if self.perspective == perspective:\n return\n self._3D_camera.fov = perspective\n self._view.camera.view_changed()\n\n @property\n def mouse_zoom(self) -> bool:\n return self._view.camera.mouse_zoom\n\n @mouse_zoom.setter\n def mouse_zoom(self, mouse_zoom: bool):\n self._view.camera.mouse_zoom = mouse_zoom\n\n @property\n def mouse_pan(self) -> bool:\n return self._view.camera.mouse_pan\n\n @mouse_pan.setter\n def mouse_pan(self, mouse_pan: bool):\n self._view.camera.mouse_pan = mouse_pan\n\n def _on_ndisplay_change(self):\n if self._dims.ndisplay == 3:\n self._view.camera = self._3D_camera\n else:\n self._view.camera = self._2D_camera\n\n self._on_mouse_toggles_change()\n self._on_center_change()\n self._on_zoom_change()\n self._on_angles_change()\n\n def _on_mouse_toggles_change(self):\n self.mouse_pan = self._camera.mouse_pan\n self.mouse_zoom = self._camera.mouse_zoom\n\n def _on_center_change(self):\n self.center = self._camera.center[-self._dims.ndisplay :]\n\n def _on_zoom_change(self):\n self.zoom = self._camera.zoom\n\n def _on_perspective_change(self):\n self.perspective = self._camera.perspective\n\n def _on_angles_change(self):\n self.angles = self._camera.angles\n\n def on_draw(self, _event):\n \"\"\"Called whenever the canvas is drawn.\n\n Update camera model angles, center, and zoom.\n \"\"\"\n with self._camera.events.angles.blocker(self._on_angles_change):\n self._camera.angles = self.angles\n with self._camera.events.center.blocker(self._on_center_change):\n self._camera.center = self.center\n with self._camera.events.zoom.blocker(self._on_zoom_change):\n self._camera.zoom = self.zoom\n with self._camera.events.perspective.blocker(\n self._on_perspective_change\n ):\n self._camera.perspective = self.perspective\n\n\ndef viewbox_key_event(event):\n \"\"\"ViewBox key event handler.\n\n Parameters\n ----------\n event : vispy.util.event.Event\n The vispy event that triggered this method.\n \"\"\"\n return\n\n\ndef add_mouse_pan_zoom_toggles(\n vispy_camera_cls: Type[BaseCamera],\n) -> Type[BaseCamera]:\n \"\"\"Add separate mouse pan and mouse zoom toggles to VisPy.\n\n By default, VisPy uses an ``interactive`` toggle that turns *both*\n panning and zooming on and off. This decorator adds separate toggles,\n ``mouse_pan`` and ``mouse_zoom``, to enable controlling them\n separately.\n\n Parameters\n ----------\n vispy_camera_cls : Type[vispy.scene.cameras.BaseCamera]\n A VisPy camera class to decorate.\n\n Returns\n -------\n A decorated VisPy camera class.\n \"\"\"\n\n class _vispy_camera_cls(vispy_camera_cls):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.mouse_pan = True\n self.mouse_zoom = True\n\n def viewbox_mouse_event(self, event):\n if (\n self.mouse_zoom\n and event.type == 'mouse_wheel'\n or self.mouse_pan\n and event.type in ('mouse_move', 'mouse_press')\n ):\n super().viewbox_mouse_event(event)\n else:\n event.handled = False\n\n return _vispy_camera_cls\n\n\nMouseToggledPanZoomCamera = add_mouse_pan_zoom_toggles(PanZoomCamera)\nMouseToggledArcballCamera = add_mouse_pan_zoom_toggles(ArcballCamera)\n", "path": "napari/_vispy/camera.py"}], "after_files": [{"content": "from typing import Type\n\nimport numpy as np\nfrom vispy.scene import ArcballCamera, BaseCamera, PanZoomCamera\n\nfrom napari._vispy.utils.quaternion import quaternion2euler\n\n\nclass VispyCamera:\n \"\"\"Vipsy camera for both 2D and 3D rendering.\n\n Parameters\n ----------\n view : vispy.scene.widgets.viewbox.ViewBox\n Viewbox for current scene.\n camera : napari.components.Camera\n napari camera model.\n dims : napari.components.Dims\n napari dims model.\n \"\"\"\n\n def __init__(self, view, camera, dims) -> None:\n self._view = view\n self._camera = camera\n self._dims = dims\n\n # Create 2D camera\n self._2D_camera = MouseToggledPanZoomCamera(aspect=1)\n # flip y-axis to have correct alignment\n self._2D_camera.flip = (0, 1, 0)\n self._2D_camera.viewbox_key_event = viewbox_key_event\n\n # Create 3D camera\n self._3D_camera = MouseToggledArcballCamera(fov=0)\n self._3D_camera.viewbox_key_event = viewbox_key_event\n\n # Set 2D camera by default\n self._view.camera = self._2D_camera\n\n self._dims.events.ndisplay.connect(\n self._on_ndisplay_change, position='first'\n )\n\n self._camera.events.center.connect(self._on_center_change)\n self._camera.events.zoom.connect(self._on_zoom_change)\n self._camera.events.angles.connect(self._on_angles_change)\n self._camera.events.perspective.connect(self._on_perspective_change)\n self._camera.events.mouse_pan.connect(self._on_mouse_toggles_change)\n self._camera.events.mouse_zoom.connect(self._on_mouse_toggles_change)\n\n self._on_ndisplay_change()\n\n @property\n def angles(self):\n \"\"\"3-tuple: Euler angles of camera in 3D viewing, in degrees.\n Note that angles might be different than the ones that might have generated the quaternion.\n \"\"\"\n\n if self._view.camera == self._3D_camera:\n # Do conversion from quaternion representation to euler angles\n angles = quaternion2euler(\n self._view.camera._quaternion, degrees=True\n )\n else:\n angles = (0, 0, 90)\n return angles\n\n @angles.setter\n def angles(self, angles):\n if self.angles == tuple(angles):\n return\n\n # Only update angles if current camera is 3D camera\n if self._view.camera == self._3D_camera:\n # Create and set quaternion\n quat = self._view.camera._quaternion.create_from_euler_angles(\n *angles,\n degrees=True,\n )\n self._view.camera._quaternion = quat\n self._view.camera.view_changed()\n\n @property\n def center(self):\n \"\"\"tuple: Center point of camera view for 2D or 3D viewing.\"\"\"\n if self._view.camera == self._3D_camera:\n center = tuple(self._view.camera.center)\n else:\n # in 2D, we arbitrarily choose 0.0 as the center in z\n center = (*self._view.camera.center[:2], 0.0)\n # switch from VisPy xyz ordering to NumPy prc ordering\n return center[::-1]\n\n @center.setter\n def center(self, center):\n if self.center == tuple(center):\n return\n self._view.camera.center = center[::-1]\n self._view.camera.view_changed()\n\n @property\n def zoom(self):\n \"\"\"float: Scale from canvas pixels to world pixels.\"\"\"\n canvas_size = np.array(self._view.canvas.size)\n if self._view.camera == self._3D_camera:\n # For fov = 0.0 normalize scale factor by canvas size to get scale factor.\n # Note that the scaling is stored in the `_projection` property of the\n # camera which is updated in vispy here\n # https://github.com/vispy/vispy/blob/v0.6.5/vispy/scene/cameras/perspective.py#L301-L313\n scale = self._view.camera.scale_factor\n else:\n scale = np.array(\n [self._view.camera.rect.width, self._view.camera.rect.height]\n )\n scale[np.isclose(scale, 0)] = 1 # fix for #2875\n zoom = np.min(canvas_size / scale)\n return zoom\n\n @zoom.setter\n def zoom(self, zoom):\n if self.zoom == zoom:\n return\n scale = np.array(self._view.canvas.size) / zoom\n if self._view.camera == self._3D_camera:\n self._view.camera.scale_factor = np.min(scale)\n else:\n # Set view rectangle, as left, right, width, height\n corner = np.subtract(self._view.camera.center[:2], scale / 2)\n self._view.camera.rect = tuple(corner) + tuple(scale)\n\n @property\n def perspective(self):\n \"\"\"Field of view of camera (only visible in 3D mode).\"\"\"\n return self._3D_camera.fov\n\n @perspective.setter\n def perspective(self, perspective):\n if self.perspective == perspective:\n return\n self._3D_camera.fov = perspective\n self._view.camera.view_changed()\n\n @property\n def mouse_zoom(self) -> bool:\n return self._view.camera.mouse_zoom\n\n @mouse_zoom.setter\n def mouse_zoom(self, mouse_zoom: bool):\n self._view.camera.mouse_zoom = mouse_zoom\n\n @property\n def mouse_pan(self) -> bool:\n return self._view.camera.mouse_pan\n\n @mouse_pan.setter\n def mouse_pan(self, mouse_pan: bool):\n self._view.camera.mouse_pan = mouse_pan\n\n def _on_ndisplay_change(self):\n if self._dims.ndisplay == 3:\n self._view.camera = self._3D_camera\n else:\n self._view.camera = self._2D_camera\n\n self._on_mouse_toggles_change()\n self._on_center_change()\n self._on_zoom_change()\n self._on_angles_change()\n\n def _on_mouse_toggles_change(self):\n self.mouse_pan = self._camera.mouse_pan\n self.mouse_zoom = self._camera.mouse_zoom\n\n def _on_center_change(self):\n self.center = self._camera.center[-self._dims.ndisplay :]\n\n def _on_zoom_change(self):\n self.zoom = self._camera.zoom\n\n def _on_perspective_change(self):\n self.perspective = self._camera.perspective\n\n def _on_angles_change(self):\n self.angles = self._camera.angles\n\n def on_draw(self, _event):\n \"\"\"Called whenever the canvas is drawn.\n\n Update camera model angles, center, and zoom.\n \"\"\"\n with self._camera.events.angles.blocker(self._on_angles_change):\n self._camera.angles = self.angles\n with self._camera.events.center.blocker(self._on_center_change):\n self._camera.center = self.center\n with self._camera.events.zoom.blocker(self._on_zoom_change):\n self._camera.zoom = self.zoom\n with self._camera.events.perspective.blocker(\n self._on_perspective_change\n ):\n self._camera.perspective = self.perspective\n\n\ndef viewbox_key_event(event):\n \"\"\"ViewBox key event handler.\n\n Parameters\n ----------\n event : vispy.util.event.Event\n The vispy event that triggered this method.\n \"\"\"\n return\n\n\ndef add_mouse_pan_zoom_toggles(\n vispy_camera_cls: Type[BaseCamera],\n) -> Type[BaseCamera]:\n \"\"\"Add separate mouse pan and mouse zoom toggles to VisPy.\n\n By default, VisPy uses an ``interactive`` toggle that turns *both*\n panning and zooming on and off. This decorator adds separate toggles,\n ``mouse_pan`` and ``mouse_zoom``, to enable controlling them\n separately.\n\n Parameters\n ----------\n vispy_camera_cls : Type[vispy.scene.cameras.BaseCamera]\n A VisPy camera class to decorate.\n\n Returns\n -------\n A decorated VisPy camera class.\n \"\"\"\n\n class _vispy_camera_cls(vispy_camera_cls):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.mouse_pan = True\n self.mouse_zoom = True\n\n def viewbox_mouse_event(self, event):\n if (\n self.mouse_zoom\n and event.type == 'mouse_wheel'\n or self.mouse_pan\n and event.type\n in ('mouse_move', 'mouse_press', 'mouse_release')\n ):\n super().viewbox_mouse_event(event)\n else:\n event.handled = False\n\n return _vispy_camera_cls\n\n\nMouseToggledPanZoomCamera = add_mouse_pan_zoom_toggles(PanZoomCamera)\nMouseToggledArcballCamera = add_mouse_pan_zoom_toggles(ArcballCamera)\n", "path": "napari/_vispy/camera.py"}]}
3,034
131
gh_patches_debug_26955
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3411
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BMO Harris Bank https://branchlocator.bmoharris.com/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/bmo_harris.py` Content: ``` 1 import html 2 import json 3 import scrapy 4 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 9 class BMOHarrisSpider(scrapy.Spider): 10 name = "bmo-harris" 11 item_attributes = { 'brand': "BMO Harris Bank" } 12 allowed_domains = ["branches.bmoharris.com"] 13 download_delay = 0.5 14 start_urls = ( 15 'https://branches.bmoharris.com/', 16 ) 17 18 def parse_store(self, response): 19 properties = { 20 'addr_full': response.xpath('//meta[@property="business:contact_data:street_address"]/@content').extract_first(), 21 'phone': response.xpath('//meta[@property="business:contact_data:phone_number"]/@content').extract_first(), 22 'city': response.xpath('//meta[@property="business:contact_data:locality"]/@content').extract_first(), 23 'state': response.xpath('//meta[@property="business:contact_data:region"]/@content').extract_first(), 24 'postcode': response.xpath('//meta[@property="business:contact_data:postal_code"]/@content').extract_first(), 25 'country': response.xpath('//meta[@property="business:contact_data:country_name"]/@content').extract_first(), 26 'ref': response.url, 27 'website': response.url, 28 'lat': response.xpath('//meta[@property="place:location:latitude"]/@content').extract_first(), 29 'lon': response.xpath('//meta[@property="place:location:longitude"]/@content').extract_first(), 30 } 31 32 yield GeojsonPointItem(**properties) 33 34 def parse(self, response): 35 # Step into hierarchy of place 36 for url in response.xpath("//div[@class='itemlist']/p/a/@href").extract(): 37 yield scrapy.Request(response.urljoin(url)) 38 39 # Look for links to stores 40 for url in response.xpath("//div[@class='itemlist']/li/span[@itemprop='streetAddress']/a/@href").extract(): 41 yield scrapy.Request(response.urljoin(url), callback=self.parse_store) 42 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/bmo_harris.py b/locations/spiders/bmo_harris.py --- a/locations/spiders/bmo_harris.py +++ b/locations/spiders/bmo_harris.py @@ -7,13 +7,14 @@ class BMOHarrisSpider(scrapy.Spider): - name = "bmo-harris" - item_attributes = { 'brand': "BMO Harris Bank" } + name = "bmo_harris" + item_attributes = {'brand': "BMO Harris Bank", 'brand_wikidata': "Q4835981"} allowed_domains = ["branches.bmoharris.com"] download_delay = 0.5 start_urls = ( 'https://branches.bmoharris.com/', ) + user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36' def parse_store(self, response): properties = { @@ -33,9 +34,9 @@ def parse(self, response): # Step into hierarchy of place - for url in response.xpath("//div[@class='itemlist']/p/a/@href").extract(): + for url in response.xpath("//ul[@class='itemlist']/li/a/@href").extract(): yield scrapy.Request(response.urljoin(url)) # Look for links to stores - for url in response.xpath("//div[@class='itemlist']/li/span[@itemprop='streetAddress']/a/@href").extract(): + for url in response.xpath("//ul[@class='itemlist']/li/div/span[@itemprop='streetAddress']/a/@href").extract(): yield scrapy.Request(response.urljoin(url), callback=self.parse_store)
{"golden_diff": "diff --git a/locations/spiders/bmo_harris.py b/locations/spiders/bmo_harris.py\n--- a/locations/spiders/bmo_harris.py\n+++ b/locations/spiders/bmo_harris.py\n@@ -7,13 +7,14 @@\n \n \n class BMOHarrisSpider(scrapy.Spider):\n- name = \"bmo-harris\"\n- item_attributes = { 'brand': \"BMO Harris Bank\" }\n+ name = \"bmo_harris\"\n+ item_attributes = {'brand': \"BMO Harris Bank\", 'brand_wikidata': \"Q4835981\"}\n allowed_domains = [\"branches.bmoharris.com\"]\n download_delay = 0.5\n start_urls = (\n 'https://branches.bmoharris.com/',\n )\n+ user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'\n \n def parse_store(self, response):\n properties = {\n@@ -33,9 +34,9 @@\n \n def parse(self, response):\n # Step into hierarchy of place\n- for url in response.xpath(\"//div[@class='itemlist']/p/a/@href\").extract():\n+ for url in response.xpath(\"//ul[@class='itemlist']/li/a/@href\").extract():\n yield scrapy.Request(response.urljoin(url))\n \n # Look for links to stores\n- for url in response.xpath(\"//div[@class='itemlist']/li/span[@itemprop='streetAddress']/a/@href\").extract():\n+ for url in response.xpath(\"//ul[@class='itemlist']/li/div/span[@itemprop='streetAddress']/a/@href\").extract():\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n", "issue": "BMO Harris Bank\nhttps://branchlocator.bmoharris.com/\n", "before_files": [{"content": "import html\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass BMOHarrisSpider(scrapy.Spider):\n name = \"bmo-harris\"\n item_attributes = { 'brand': \"BMO Harris Bank\" }\n allowed_domains = [\"branches.bmoharris.com\"]\n download_delay = 0.5\n start_urls = (\n 'https://branches.bmoharris.com/',\n )\n\n def parse_store(self, response):\n properties = {\n 'addr_full': response.xpath('//meta[@property=\"business:contact_data:street_address\"]/@content').extract_first(),\n 'phone': response.xpath('//meta[@property=\"business:contact_data:phone_number\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@property=\"business:contact_data:locality\"]/@content').extract_first(),\n 'state': response.xpath('//meta[@property=\"business:contact_data:region\"]/@content').extract_first(),\n 'postcode': response.xpath('//meta[@property=\"business:contact_data:postal_code\"]/@content').extract_first(),\n 'country': response.xpath('//meta[@property=\"business:contact_data:country_name\"]/@content').extract_first(),\n 'ref': response.url,\n 'website': response.url,\n 'lat': response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first(),\n 'lon': response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first(),\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n # Step into hierarchy of place\n for url in response.xpath(\"//div[@class='itemlist']/p/a/@href\").extract():\n yield scrapy.Request(response.urljoin(url))\n\n # Look for links to stores\n for url in response.xpath(\"//div[@class='itemlist']/li/span[@itemprop='streetAddress']/a/@href\").extract():\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n", "path": "locations/spiders/bmo_harris.py"}], "after_files": [{"content": "import html\nimport json\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass BMOHarrisSpider(scrapy.Spider):\n name = \"bmo_harris\"\n item_attributes = {'brand': \"BMO Harris Bank\", 'brand_wikidata': \"Q4835981\"}\n allowed_domains = [\"branches.bmoharris.com\"]\n download_delay = 0.5\n start_urls = (\n 'https://branches.bmoharris.com/',\n )\n user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36'\n\n def parse_store(self, response):\n properties = {\n 'addr_full': response.xpath('//meta[@property=\"business:contact_data:street_address\"]/@content').extract_first(),\n 'phone': response.xpath('//meta[@property=\"business:contact_data:phone_number\"]/@content').extract_first(),\n 'city': response.xpath('//meta[@property=\"business:contact_data:locality\"]/@content').extract_first(),\n 'state': response.xpath('//meta[@property=\"business:contact_data:region\"]/@content').extract_first(),\n 'postcode': response.xpath('//meta[@property=\"business:contact_data:postal_code\"]/@content').extract_first(),\n 'country': response.xpath('//meta[@property=\"business:contact_data:country_name\"]/@content').extract_first(),\n 'ref': response.url,\n 'website': response.url,\n 'lat': response.xpath('//meta[@property=\"place:location:latitude\"]/@content').extract_first(),\n 'lon': response.xpath('//meta[@property=\"place:location:longitude\"]/@content').extract_first(),\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n # Step into hierarchy of place\n for url in response.xpath(\"//ul[@class='itemlist']/li/a/@href\").extract():\n yield scrapy.Request(response.urljoin(url))\n\n # Look for links to stores\n for url in response.xpath(\"//ul[@class='itemlist']/li/div/span[@itemprop='streetAddress']/a/@href\").extract():\n yield scrapy.Request(response.urljoin(url), callback=self.parse_store)\n", "path": "locations/spiders/bmo_harris.py"}]}
787
421
gh_patches_debug_5298
rasdani/github-patches
git_diff
pyca__cryptography-2845
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- _ModuleWithDeprecations doesn't handle patching properly. `_ModuleWithDeprecations` catches `__getattr__` and `__setattr__` to patch through to the underlying module, but does not intercept `__delattr__`. That means that if you're using something like `mock.patch`, the mock successfully lands in place, but cannot be removed: the mock was applied to the underlying module, but the delete comes from the proxy. Should be easily fixed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cryptography/utils.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 import abc 8 import binascii 9 import inspect 10 import struct 11 import sys 12 import warnings 13 14 15 # the functions deprecated in 1.0 are on an arbitrarily extended deprecation 16 # cycle and should not be removed until we agree on when that cycle ends. 17 DeprecatedIn10 = DeprecationWarning 18 DeprecatedIn12 = DeprecationWarning 19 20 21 def read_only_property(name): 22 return property(lambda self: getattr(self, name)) 23 24 25 def register_interface(iface): 26 def register_decorator(klass): 27 verify_interface(iface, klass) 28 iface.register(klass) 29 return klass 30 return register_decorator 31 32 33 if hasattr(int, "from_bytes"): 34 int_from_bytes = int.from_bytes 35 else: 36 def int_from_bytes(data, byteorder, signed=False): 37 assert byteorder == 'big' 38 assert not signed 39 40 if len(data) % 4 != 0: 41 data = (b'\x00' * (4 - (len(data) % 4))) + data 42 43 result = 0 44 45 while len(data) > 0: 46 digit, = struct.unpack('>I', data[:4]) 47 result = (result << 32) + digit 48 # TODO: this is quadratic in the length of data 49 data = data[4:] 50 51 return result 52 53 54 def int_to_bytes(integer, length=None): 55 hex_string = '%x' % integer 56 if length is None: 57 n = len(hex_string) 58 else: 59 n = length * 2 60 return binascii.unhexlify(hex_string.zfill(n + (n & 1))) 61 62 63 class InterfaceNotImplemented(Exception): 64 pass 65 66 67 if hasattr(inspect, "signature"): 68 signature = inspect.signature 69 else: 70 signature = inspect.getargspec 71 72 73 def verify_interface(iface, klass): 74 for method in iface.__abstractmethods__: 75 if not hasattr(klass, method): 76 raise InterfaceNotImplemented( 77 "{0} is missing a {1!r} method".format(klass, method) 78 ) 79 if isinstance(getattr(iface, method), abc.abstractproperty): 80 # Can't properly verify these yet. 81 continue 82 sig = signature(getattr(iface, method)) 83 actual = signature(getattr(klass, method)) 84 if sig != actual: 85 raise InterfaceNotImplemented( 86 "{0}.{1}'s signature differs from the expected. Expected: " 87 "{2!r}. Received: {3!r}".format( 88 klass, method, sig, actual 89 ) 90 ) 91 92 93 if sys.version_info >= (2, 7): 94 def bit_length(x): 95 return x.bit_length() 96 else: 97 def bit_length(x): 98 return len(bin(x)) - (2 + (x <= 0)) 99 100 101 class _DeprecatedValue(object): 102 def __init__(self, value, message, warning_class): 103 self.value = value 104 self.message = message 105 self.warning_class = warning_class 106 107 108 class _ModuleWithDeprecations(object): 109 def __init__(self, module): 110 self.__dict__["_module"] = module 111 112 def __getattr__(self, attr): 113 obj = getattr(self._module, attr) 114 if isinstance(obj, _DeprecatedValue): 115 warnings.warn(obj.message, obj.warning_class, stacklevel=2) 116 obj = obj.value 117 return obj 118 119 def __setattr__(self, attr, value): 120 setattr(self._module, attr, value) 121 122 def __dir__(self): 123 return ["_module"] + dir(self._module) 124 125 126 def deprecated(value, module_name, message, warning_class): 127 module = sys.modules[module_name] 128 if not isinstance(module, _ModuleWithDeprecations): 129 sys.modules[module_name] = module = _ModuleWithDeprecations(module) 130 return _DeprecatedValue(value, message, warning_class) 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py --- a/src/cryptography/utils.py +++ b/src/cryptography/utils.py @@ -119,6 +119,13 @@ def __setattr__(self, attr, value): setattr(self._module, attr, value) + def __delattr__(self, attr): + obj = getattr(self._module, attr) + if isinstance(obj, _DeprecatedValue): + warnings.warn(obj.message, obj.warning_class, stacklevel=2) + + delattr(self._module, attr) + def __dir__(self): return ["_module"] + dir(self._module)
{"golden_diff": "diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -119,6 +119,13 @@\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n \n+ def __delattr__(self, attr):\n+ obj = getattr(self._module, attr)\n+ if isinstance(obj, _DeprecatedValue):\n+ warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n+\n+ delattr(self._module, attr)\n+\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n", "issue": "_ModuleWithDeprecations doesn't handle patching properly.\n`_ModuleWithDeprecations` catches `__getattr__` and `__setattr__` to patch through to the underlying module, but does not intercept `__delattr__`. That means that if you're using something like `mock.patch`, the mock successfully lands in place, but cannot be removed: the mock was applied to the underlying module, but the delete comes from the proxy.\n\nShould be easily fixed.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport struct\nimport sys\nimport warnings\n\n\n# the functions deprecated in 1.0 are on an arbitrarily extended deprecation\n# cycle and should not be removed until we agree on when that cycle ends.\nDeprecatedIn10 = DeprecationWarning\nDeprecatedIn12 = DeprecationWarning\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n # TODO: this is quadratic in the length of data\n data = data[4:]\n\n return result\n\n\ndef int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\nif sys.version_info >= (2, 7):\n def bit_length(x):\n return x.bit_length()\nelse:\n def bit_length(x):\n return len(bin(x)) - (2 + (x <= 0))\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n", "path": "src/cryptography/utils.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport struct\nimport sys\nimport warnings\n\n\n# the functions deprecated in 1.0 are on an arbitrarily extended deprecation\n# cycle and should not be removed until we agree on when that cycle ends.\nDeprecatedIn10 = DeprecationWarning\nDeprecatedIn12 = DeprecationWarning\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n # TODO: this is quadratic in the length of data\n data = data[4:]\n\n return result\n\n\ndef int_to_bytes(integer, length=None):\n hex_string = '%x' % integer\n if length is None:\n n = len(hex_string)\n else:\n n = length * 2\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\nif sys.version_info >= (2, 7):\n def bit_length(x):\n return x.bit_length()\nelse:\n def bit_length(x):\n return len(bin(x)) - (2 + (x <= 0))\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __delattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n\n delattr(self._module, attr)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n", "path": "src/cryptography/utils.py"}]}
1,524
148
gh_patches_debug_6466
rasdani/github-patches
git_diff
plone__Products.CMFPlone-1417
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Return HTTP errors in proper format Proposer: Eric Brehault Seconder: # Motivation When a page does not exist, or has an error, or is not allowed for the user, Plone returns the appropriate HTTP error (404, 500, ...), and the response is a human readable page, properly skinned, which nice for the user. And if the requested resource is not a page (an image, a JS file, an AJAX call, etc.), Plone also returns this human readable page. It is useless because the page will not be displayed, and it produces many problems: - the response is very heavy, - it involves a lot of processing (Plone will render an entire page for nothing), - for AJAX call, the response cannot be easily interperted, - it might produce a cascade of errors (for instance: the regular response is not supposed to be rendered via Diazo, as it is not an HTML page, but the error is rendered by Diazo, and it might produce another error). # Proposed solution We could display the human readable error page only if the current request `HTTP_ACCEPT` parameter contains `text/html`, in other cases, we would just return a simple JSON error reponse. # Proposed implementation Test the `HTTP_ACCEPT` value in `Products/CMFPlone/skins/plone_templates/standard_error_message.py`, and call the existing template or make a JSON response accordingly. # Risks No identified risks. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Products/CMFPlone/skins/plone_templates/standard_error_message.py` Content: ``` 1 ## Script (Python) "standard_error_message" 2 ##bind container=container 3 ##bind context=context 4 ##bind namespace= 5 ##bind script=script 6 ##bind subpath=traverse_subpath 7 ##parameters=**kwargs 8 ##title=Dispatches to relevant error view 9 10 ## by default we handle everything in 1 PageTemplate. 11 # you could easily check for the error_type and 12 # dispatch to an appropriate PageTemplate. 13 14 # Check if the object is traversable, if not it might be a view, get its parent 15 # because we need to render the error on an actual content object 16 from AccessControl import Unauthorized 17 try: 18 while not hasattr(context.aq_explicit, 'restrictedTraverse'): 19 context = context.aq_parent 20 except (Unauthorized, AttributeError): 21 context = context.portal_url.getPortalObject() 22 23 error_type = kwargs.get('error_type', None) 24 error_message = kwargs.get('error_message', None) 25 error_log_url = kwargs.get('error_log_url', None) 26 error_tb = kwargs.get('error_tb', None) 27 error_traceback = kwargs.get('error_traceback', None) 28 error_value = kwargs.get('error_value', None) 29 30 if error_log_url: 31 error_log_id = error_log_url.split('?id=')[1] 32 else: 33 error_log_id = None 34 35 36 no_actions = {'folder': [], 'user': [], 'global': [], 'workflow': []} 37 error_page = context.default_error_message( 38 error_type=error_type, 39 error_message=error_message, 40 error_tb=error_tb, 41 error_value=error_value, 42 error_log_url=error_log_url, 43 error_log_id=error_log_id, 44 no_portlets=True, 45 actions=no_actions) 46 47 return error_page 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Products/CMFPlone/skins/plone_templates/standard_error_message.py b/Products/CMFPlone/skins/plone_templates/standard_error_message.py --- a/Products/CMFPlone/skins/plone_templates/standard_error_message.py +++ b/Products/CMFPlone/skins/plone_templates/standard_error_message.py @@ -27,6 +27,10 @@ error_traceback = kwargs.get('error_traceback', None) error_value = kwargs.get('error_value', None) +if "text/html" not in context.REQUEST.getHeader('Accept', ''): + context.REQUEST.RESPONSE.setHeader("Content-Type", "application/json") + return '{"error_type": "{0:s}"}'.format(error_type) + if error_log_url: error_log_id = error_log_url.split('?id=')[1] else:
{"golden_diff": "diff --git a/Products/CMFPlone/skins/plone_templates/standard_error_message.py b/Products/CMFPlone/skins/plone_templates/standard_error_message.py\n--- a/Products/CMFPlone/skins/plone_templates/standard_error_message.py\n+++ b/Products/CMFPlone/skins/plone_templates/standard_error_message.py\n@@ -27,6 +27,10 @@\n error_traceback = kwargs.get('error_traceback', None)\n error_value = kwargs.get('error_value', None)\n \n+if \"text/html\" not in context.REQUEST.getHeader('Accept', ''):\n+ context.REQUEST.RESPONSE.setHeader(\"Content-Type\", \"application/json\")\n+ return '{\"error_type\": \"{0:s}\"}'.format(error_type)\n+\n if error_log_url:\n error_log_id = error_log_url.split('?id=')[1]\n else:\n", "issue": "Return HTTP errors in proper format\nProposer: Eric Brehault\nSeconder:\n# Motivation\n\nWhen a page does not exist, or has an error, or is not allowed for the user, Plone returns the appropriate HTTP error (404, 500, ...), and the response is a human readable page, properly skinned, which nice for the user.\nAnd if the requested resource is not a page (an image, a JS file, an AJAX call, etc.), Plone also returns this human readable page.\nIt is useless because the page will not be displayed, and it produces many problems:\n- the response is very heavy,\n- it involves a lot of processing (Plone will render an entire page for nothing),\n- for AJAX call, the response cannot be easily interperted,\n- it might produce a cascade of errors (for instance: the regular response is not supposed to be rendered via Diazo, as it is not an HTML page, but the error is rendered by Diazo, and it might produce another error).\n# Proposed solution\n\nWe could display the human readable error page only if the current request `HTTP_ACCEPT` parameter contains `text/html`, in other cases, we would just return a simple JSON error reponse.\n# Proposed implementation\n\nTest the `HTTP_ACCEPT` value in `Products/CMFPlone/skins/plone_templates/standard_error_message.py`, and call the existing template or make a JSON response accordingly.\n# Risks\n\nNo identified risks.\n\n", "before_files": [{"content": "## Script (Python) \"standard_error_message\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=**kwargs\n##title=Dispatches to relevant error view\n\n## by default we handle everything in 1 PageTemplate.\n# you could easily check for the error_type and\n# dispatch to an appropriate PageTemplate.\n\n# Check if the object is traversable, if not it might be a view, get its parent\n# because we need to render the error on an actual content object\nfrom AccessControl import Unauthorized\ntry:\n while not hasattr(context.aq_explicit, 'restrictedTraverse'):\n context = context.aq_parent\nexcept (Unauthorized, AttributeError):\n context = context.portal_url.getPortalObject()\n\nerror_type = kwargs.get('error_type', None)\nerror_message = kwargs.get('error_message', None)\nerror_log_url = kwargs.get('error_log_url', None)\nerror_tb = kwargs.get('error_tb', None)\nerror_traceback = kwargs.get('error_traceback', None)\nerror_value = kwargs.get('error_value', None)\n\nif error_log_url:\n error_log_id = error_log_url.split('?id=')[1]\nelse:\n error_log_id = None\n\n\nno_actions = {'folder': [], 'user': [], 'global': [], 'workflow': []}\nerror_page = context.default_error_message(\n error_type=error_type,\n error_message=error_message,\n error_tb=error_tb,\n error_value=error_value,\n error_log_url=error_log_url,\n error_log_id=error_log_id,\n no_portlets=True,\n actions=no_actions)\n\nreturn error_page\n", "path": "Products/CMFPlone/skins/plone_templates/standard_error_message.py"}], "after_files": [{"content": "## Script (Python) \"standard_error_message\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=**kwargs\n##title=Dispatches to relevant error view\n\n## by default we handle everything in 1 PageTemplate.\n# you could easily check for the error_type and\n# dispatch to an appropriate PageTemplate.\n\n# Check if the object is traversable, if not it might be a view, get its parent\n# because we need to render the error on an actual content object\nfrom AccessControl import Unauthorized\ntry:\n while not hasattr(context.aq_explicit, 'restrictedTraverse'):\n context = context.aq_parent\nexcept (Unauthorized, AttributeError):\n context = context.portal_url.getPortalObject()\n\nerror_type = kwargs.get('error_type', None)\nerror_message = kwargs.get('error_message', None)\nerror_log_url = kwargs.get('error_log_url', None)\nerror_tb = kwargs.get('error_tb', None)\nerror_traceback = kwargs.get('error_traceback', None)\nerror_value = kwargs.get('error_value', None)\n\nif \"text/html\" not in context.REQUEST.getHeader('Accept', ''):\n context.REQUEST.RESPONSE.setHeader(\"Content-Type\", \"application/json\")\n return '{\"error_type\": \"{0:s}\"}'.format(error_type)\n\nif error_log_url:\n error_log_id = error_log_url.split('?id=')[1]\nelse:\n error_log_id = None\n\n\nno_actions = {'folder': [], 'user': [], 'global': [], 'workflow': []}\nerror_page = context.default_error_message(\n error_type=error_type,\n error_message=error_message,\n error_tb=error_tb,\n error_value=error_value,\n error_log_url=error_log_url,\n error_log_id=error_log_id,\n no_portlets=True,\n actions=no_actions)\n\nreturn error_page\n", "path": "Products/CMFPlone/skins/plone_templates/standard_error_message.py"}]}
1,034
190
gh_patches_debug_3520
rasdani/github-patches
git_diff
encode__uvicorn-1328
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- No `python_requires` defined ### Checklist - [X] The bug is reproducible against the latest release or `master`. - [X] There are no similar issues or pull requests to fix it yet. ### Describe the bug It seems that no `python_requires` is defined for the `uvicorn` package, which in turn results in the latest version being installed in a Python 3.6 (CI) environment (that subsequently fails). If `python_requires` were defined to restrict the package to supported versions of the interpreter, I would have got an older version (that supported `py36`) instead. ### Steps to reproduce the bug In a `py36` environment ``` pip install uvicorn # Run uvicorn # ... ``` ### Expected behavior An older version is installed that works. ### Actual behavior `uvicorn` errors out, says `py36` is unsupported. ### Debugging material _No response_ ### Environment CPython 3.6 ### Additional context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 import os 5 import re 6 7 from setuptools import setup 8 9 10 def get_version(package): 11 """ 12 Return package version as listed in `__version__` in `init.py`. 13 """ 14 path = os.path.join(package, "__init__.py") 15 init_py = open(path, "r", encoding="utf8").read() 16 return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1) 17 18 19 def get_long_description(): 20 """ 21 Return the README. 22 """ 23 return open("README.md", "r", encoding="utf8").read() 24 25 26 def get_packages(package): 27 """ 28 Return root package and all sub-packages. 29 """ 30 return [ 31 dirpath 32 for dirpath, dirnames, filenames in os.walk(package) 33 if os.path.exists(os.path.join(dirpath, "__init__.py")) 34 ] 35 36 37 env_marker_cpython = ( 38 "sys_platform != 'win32'" 39 " and (sys_platform != 'cygwin'" 40 " and platform_python_implementation != 'PyPy')" 41 ) 42 43 env_marker_win = "sys_platform == 'win32'" 44 env_marker_below_38 = "python_version < '3.8'" 45 46 minimal_requirements = [ 47 "asgiref>=3.4.0", 48 "click>=7.0", 49 "h11>=0.8", 50 "typing-extensions;" + env_marker_below_38, 51 ] 52 53 54 extra_requirements = [ 55 "websockets>=10.0", 56 "httptools>=0.2.0,<0.4.0", 57 "uvloop>=0.14.0,!=0.15.0,!=0.15.1; " + env_marker_cpython, 58 "colorama>=0.4;" + env_marker_win, 59 "watchgod>=0.6", 60 "python-dotenv>=0.13", 61 "PyYAML>=5.1", 62 ] 63 64 65 setup( 66 name="uvicorn", 67 version=get_version("uvicorn"), 68 url="https://www.uvicorn.org/", 69 license="BSD", 70 description="The lightning-fast ASGI server.", 71 long_description=get_long_description(), 72 long_description_content_type="text/markdown", 73 author="Tom Christie", 74 author_email="[email protected]", 75 packages=get_packages("uvicorn"), 76 install_requires=minimal_requirements, 77 extras_require={"standard": extra_requirements}, 78 include_package_data=True, 79 classifiers=[ 80 "Development Status :: 4 - Beta", 81 "Environment :: Web Environment", 82 "Intended Audience :: Developers", 83 "License :: OSI Approved :: BSD License", 84 "Operating System :: OS Independent", 85 "Topic :: Internet :: WWW/HTTP", 86 "Programming Language :: Python :: 3", 87 "Programming Language :: Python :: 3.7", 88 "Programming Language :: Python :: 3.8", 89 "Programming Language :: Python :: 3.9", 90 "Programming Language :: Python :: 3.10", 91 "Programming Language :: Python :: Implementation :: CPython", 92 "Programming Language :: Python :: Implementation :: PyPy", 93 ], 94 entry_points=""" 95 [console_scripts] 96 uvicorn=uvicorn.main:main 97 """, 98 project_urls={ 99 "Funding": "https://github.com/sponsors/encode", 100 "Source": "https://github.com/encode/uvicorn", 101 "Changelog": "https://github.com/encode/uvicorn/blob/master/CHANGELOG.md", 102 }, 103 ) 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -73,6 +73,7 @@ author="Tom Christie", author_email="[email protected]", packages=get_packages("uvicorn"), + python_requires=">=3.7", install_requires=minimal_requirements, extras_require={"standard": extra_requirements}, include_package_data=True,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -73,6 +73,7 @@\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n+ python_requires=\">=3.7\",\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n", "issue": "No `python_requires` defined\n### Checklist\r\n\r\n- [X] The bug is reproducible against the latest release or `master`.\r\n- [X] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\nIt seems that no `python_requires` is defined for the `uvicorn` package, which in turn results in the latest version being installed in a Python 3.6 (CI) environment (that subsequently fails).\r\n\r\nIf `python_requires` were defined to restrict the package to supported versions of the interpreter, I would have got an older version (that supported `py36`) instead.\r\n\r\n### Steps to reproduce the bug\r\n\r\nIn a `py36` environment\r\n```\r\npip install uvicorn\r\n# Run uvicorn\r\n# ...\r\n```\r\n\r\n### Expected behavior\r\n\r\nAn older version is installed that works.\r\n\r\n### Actual behavior\r\n\r\n`uvicorn` errors out, says `py36` is unsupported.\r\n\r\n### Debugging material\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\nCPython 3.6\r\n\r\n### Additional context\r\n\r\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=10.0\",\n \"httptools>=0.2.0,<0.4.0\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://www.uvicorn.org/\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n project_urls={\n \"Funding\": \"https://github.com/sponsors/encode\",\n \"Source\": \"https://github.com/encode/uvicorn\",\n \"Changelog\": \"https://github.com/encode/uvicorn/blob/master/CHANGELOG.md\",\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nimport re\n\nfrom setuptools import setup\n\n\ndef get_version(package):\n \"\"\"\n Return package version as listed in `__version__` in `init.py`.\n \"\"\"\n path = os.path.join(package, \"__init__.py\")\n init_py = open(path, \"r\", encoding=\"utf8\").read()\n return re.search(\"__version__ = ['\\\"]([^'\\\"]+)['\\\"]\", init_py).group(1)\n\n\ndef get_long_description():\n \"\"\"\n Return the README.\n \"\"\"\n return open(\"README.md\", \"r\", encoding=\"utf8\").read()\n\n\ndef get_packages(package):\n \"\"\"\n Return root package and all sub-packages.\n \"\"\"\n return [\n dirpath\n for dirpath, dirnames, filenames in os.walk(package)\n if os.path.exists(os.path.join(dirpath, \"__init__.py\"))\n ]\n\n\nenv_marker_cpython = (\n \"sys_platform != 'win32'\"\n \" and (sys_platform != 'cygwin'\"\n \" and platform_python_implementation != 'PyPy')\"\n)\n\nenv_marker_win = \"sys_platform == 'win32'\"\nenv_marker_below_38 = \"python_version < '3.8'\"\n\nminimal_requirements = [\n \"asgiref>=3.4.0\",\n \"click>=7.0\",\n \"h11>=0.8\",\n \"typing-extensions;\" + env_marker_below_38,\n]\n\n\nextra_requirements = [\n \"websockets>=10.0\",\n \"httptools>=0.2.0,<0.4.0\",\n \"uvloop>=0.14.0,!=0.15.0,!=0.15.1; \" + env_marker_cpython,\n \"colorama>=0.4;\" + env_marker_win,\n \"watchgod>=0.6\",\n \"python-dotenv>=0.13\",\n \"PyYAML>=5.1\",\n]\n\n\nsetup(\n name=\"uvicorn\",\n version=get_version(\"uvicorn\"),\n url=\"https://www.uvicorn.org/\",\n license=\"BSD\",\n description=\"The lightning-fast ASGI server.\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Tom Christie\",\n author_email=\"[email protected]\",\n packages=get_packages(\"uvicorn\"),\n python_requires=\">=3.7\",\n install_requires=minimal_requirements,\n extras_require={\"standard\": extra_requirements},\n include_package_data=True,\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: BSD License\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: Implementation :: CPython\",\n \"Programming Language :: Python :: Implementation :: PyPy\",\n ],\n entry_points=\"\"\"\n [console_scripts]\n uvicorn=uvicorn.main:main\n \"\"\",\n project_urls={\n \"Funding\": \"https://github.com/sponsors/encode\",\n \"Source\": \"https://github.com/encode/uvicorn\",\n \"Changelog\": \"https://github.com/encode/uvicorn/blob/master/CHANGELOG.md\",\n },\n)\n", "path": "setup.py"}]}
1,450
89
gh_patches_debug_19035
rasdani/github-patches
git_diff
opsdroid__opsdroid-960
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Should we drop Python 3.5 support It has been raised a few times in the Matrix chat that we should consider dropping Python 3.5 support. There are a number of improvements in [Python 3.6](https://docs.python.org/3/whatsnew/3.6.html) which we could benefit from. This would also be a breaking change for users still on 3.5 and will require them to upgrade to continue using opsdroid. The reason this has cropped up for me again is because there appears to be a bug still in 3.5.7 which causes problems with catching exceptions within coroutines. This problem is not present in 3.6.8. I would rather not have to work around this bug if I can help it. We decided to support 3.5+ because that is the default version which comes pre-installed on the latest release of Debian and variations like Raspbian. This should ensure good support for many users without them having to tinker with their Python and provide a good beginner experience. As this is an open source software project with a motivation around self hosting and privacy it isn't possible to collect user metrics on Python versions being used. Otherwise we could use this data to asses the impact of dropping 3.5. [Home Assistant](https://www.home-assistant.io/) (the project which inspired much of opsdroid's community practices) are moving to only supporting the two most recent minor versions of Python, which means they will also be dropping 3.5 in the near future. My proposal would be to follow their lead. This does put a responsibility on us to keep an eye on Python releases (3.8 is [pencilled in](https://www.python.org/dev/peps/pep-0569/) for the end of the year) and remove support as versions are released. I would love thoughts and feedback from the community. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python3 2 import os 3 from setuptools import setup, find_packages 4 from setuptools.command.build_py import build_py 5 from setuptools.command.sdist import sdist 6 from setuptools.command.develop import develop 7 import versioneer 8 9 PACKAGE_NAME = 'opsdroid' 10 HERE = os.path.abspath(os.path.dirname(__file__)) 11 README = open(os.path.join(HERE, 'README.md'), encoding="utf8").read() 12 13 PACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules', 14 'modules.*', 'docs', 'docs.*']) 15 16 17 # For now we simply define the install_requires based on the contents 18 # of requirements.txt. In the future, install_requires may become much 19 # looser than the (automatically) resolved requirements.txt. 20 with open(os.path.join(HERE, 'requirements.txt'), 'r') as fh: 21 REQUIRES = [line.strip() for line in fh] 22 23 24 class Develop(develop): 25 """Custom `develop` command to always build mo files on install -e.""" 26 27 def run(self): 28 self.run_command('compile_catalog') 29 develop.run(self) # old style class 30 31 32 class BuildPy(build_py): 33 """Custom `build_py` command to always build mo files for wheels.""" 34 35 def run(self): 36 self.run_command('compile_catalog') 37 build_py.run(self) # old style class 38 39 40 class Sdist(sdist): 41 """Custom `sdist` command to ensure that mo files are always created.""" 42 43 def run(self): 44 self.run_command('compile_catalog') 45 sdist.run(self) # old style class 46 47 48 setup( 49 name=PACKAGE_NAME, 50 version=versioneer.get_version(), 51 license='Apache License 2.0', 52 url='https://opsdroid.github.io/', 53 download_url='https://github.com/opsdroid/opsdroid/releases', 54 author='Jacob Tomlinson', 55 author_email='[email protected]', 56 description='An open source ChatOps bot framework.', 57 long_description=README, 58 long_description_content_type='text/markdown', 59 packages=PACKAGES, 60 include_package_data=True, 61 zip_safe=False, 62 platforms='any', 63 classifiers=[ 64 'Development Status :: 4 - Beta', 65 'Environment :: Console', 66 'Framework :: AsyncIO', 67 'Intended Audience :: Developers', 68 'Intended Audience :: System Administrators', 69 'Intended Audience :: Information Technology', 70 'License :: OSI Approved :: Apache Software License', 71 'Programming Language :: Python', 72 'Programming Language :: Python :: 3', 73 'Programming Language :: Python :: 3 :: Only', 74 'Programming Language :: Python :: 3.5', 75 'Programming Language :: Python :: 3.6', 76 'Programming Language :: Python :: 3.7', 77 'Topic :: Communications :: Chat', 78 'Topic :: Scientific/Engineering :: Artificial Intelligence', 79 'Topic :: Software Development :: Libraries :: Python Modules' 80 ], 81 install_requires=REQUIRES, 82 test_suite='tests', 83 keywords=[ 84 'bot', 85 'bot-framework', 86 'opsdroid', 87 'botkit', 88 'python3', 89 'asyncio', 90 'chatops', 91 'devops', 92 'nlu' 93 ], 94 setup_requires=['Babel'], 95 cmdclass=versioneer.get_cmdclass({'sdist': Sdist, 96 'build_py': BuildPy, 97 'develop': Develop}), 98 entry_points={ 99 'console_scripts': [ 100 'opsdroid = opsdroid.__main__:main' 101 ] 102 }, 103 ) 104 ``` Path: `opsdroid/__main__.py` Content: ``` 1 """Starts opsdroid.""" 2 3 import os 4 import subprocess 5 import sys 6 import logging 7 import gettext 8 import time 9 import contextlib 10 11 import click 12 13 from opsdroid import __version__ 14 from opsdroid.core import OpsDroid 15 from opsdroid.loader import Loader 16 from opsdroid.const import DEFAULT_LOG_FILENAME, LOCALE_DIR, \ 17 EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, DEFAULT_CONFIG_PATH 18 19 20 gettext.install('opsdroid') 21 _LOGGER = logging.getLogger("opsdroid") 22 23 24 def configure_lang(config): 25 """Configure app language based on user config. 26 27 Args: 28 config: Language Configuration and it uses ISO 639-1 code. 29 for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes 30 31 32 """ 33 lang_code = config.get("lang", DEFAULT_LANGUAGE) 34 if lang_code != DEFAULT_LANGUAGE: 35 lang = gettext.translation( 36 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True) 37 lang.install() 38 39 40 def configure_logging(config): 41 """Configure the root logger based on user config.""" 42 rootlogger = logging.getLogger() 43 while rootlogger.handlers: 44 rootlogger.handlers.pop() 45 46 try: 47 if config["logging"]["path"]: 48 logfile_path = os.path.expanduser(config["logging"]["path"]) 49 else: 50 logfile_path = config["logging"]["path"] 51 except KeyError: 52 logfile_path = DEFAULT_LOG_FILENAME 53 54 try: 55 log_level = get_logging_level( 56 config["logging"]["level"]) 57 except KeyError: 58 log_level = logging.INFO 59 60 rootlogger.setLevel(log_level) 61 formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s') 62 63 console_handler = logging.StreamHandler() 64 console_handler.setLevel(log_level) 65 console_handler.setFormatter(formatter) 66 rootlogger.addHandler(console_handler) 67 68 with contextlib.suppress(KeyError): 69 if not config["logging"]["console"]: 70 console_handler.setLevel(logging.CRITICAL) 71 72 if logfile_path: 73 logdir = os.path.dirname(os.path.realpath(logfile_path)) 74 if not os.path.isdir(logdir): 75 os.makedirs(logdir) 76 file_handler = logging.FileHandler(logfile_path) 77 file_handler.setLevel(log_level) 78 file_handler.setFormatter(formatter) 79 rootlogger.addHandler(file_handler) 80 _LOGGER.info("="*40) 81 _LOGGER.info(_("Started opsdroid %s"), __version__) 82 83 84 def get_logging_level(logging_level): 85 """Get the logger level based on the user configuration. 86 87 Args: 88 logging_level: logging level from config file 89 90 Returns: 91 logging LEVEL -> 92 CRITICAL = 50 93 FATAL = CRITICAL 94 ERROR = 40 95 WARNING = 30 96 WARN = WARNING 97 INFO = 20 98 DEBUG = 10 99 NOTSET = 0 100 101 """ 102 if logging_level == 'critical': 103 return logging.CRITICAL 104 105 if logging_level == 'error': 106 return logging.ERROR 107 if logging_level == 'warning': 108 return logging.WARNING 109 110 if logging_level == 'debug': 111 return logging.DEBUG 112 113 return logging.INFO 114 115 116 def check_dependencies(): 117 """Check for system dependencies required by opsdroid.""" 118 if sys.version_info.major < 3 or sys.version_info.minor < 5: 119 logging.critical(_("Whoops! opsdroid requires python 3.5 or above.")) 120 sys.exit(1) 121 122 123 def print_version(ctx, param, value): 124 """Print out the version of opsdroid that is installed.""" 125 if not value or ctx.resilient_parsing: 126 return 127 click.echo('opsdroid {version}'.format(version=__version__)) 128 ctx.exit(0) 129 130 131 def print_example_config(ctx, param, value): 132 """Print out the example config.""" 133 if not value or ctx.resilient_parsing: 134 return 135 with open(EXAMPLE_CONFIG_FILE, 'r') as conf: 136 click.echo(conf.read()) 137 ctx.exit(0) 138 139 140 def edit_files(ctx, param, value): 141 """Open config/log file with favourite editor.""" 142 if value == 'config': 143 file = DEFAULT_CONFIG_PATH 144 elif value == 'log': 145 file = DEFAULT_LOG_FILENAME 146 else: 147 return 148 149 editor = os.environ.get('EDITOR', 'vi') 150 if editor == 'vi': 151 click.echo('You are about to edit a file in vim. \n' 152 'Read the tutorial on vim at: https://bit.ly/2HRvvrB') 153 time.sleep(3) 154 155 subprocess.run([editor, file]) 156 ctx.exit(0) 157 158 159 def welcome_message(config): 160 """Add welcome message if set to true in configuration. 161 162 Args: 163 config: config loaded by Loader 164 165 Raises: 166 KeyError: If 'welcome-message' key is not found in configuration file 167 168 """ 169 try: 170 if config['welcome-message']: 171 _LOGGER.info("=" * 40) 172 _LOGGER.info(_("You can customise your opsdroid by modifying " 173 "your configuration.yaml")) 174 _LOGGER.info(_("Read more at: " 175 "http://opsdroid.readthedocs.io/#configuration")) 176 _LOGGER.info(_("Watch the Get Started Videos at: " 177 "http://bit.ly/2fnC0Fh")) 178 _LOGGER.info(_("Install Opsdroid Desktop at: \n" 179 "https://github.com/opsdroid/opsdroid-desktop/" 180 "releases")) 181 _LOGGER.info("=" * 40) 182 except KeyError: 183 _LOGGER.warning(_("'welcome-message: true/false' is missing in " 184 "configuration.yaml")) 185 186 187 @click.command() 188 @click.option('--gen-config', is_flag=True, callback=print_example_config, 189 expose_value=False, default=False, 190 help='Print an example config and exit.') 191 @click.option('--version', '-v', is_flag=True, callback=print_version, 192 expose_value=False, default=False, is_eager=True, 193 help='Print the version and exit.') 194 @click.option('--edit-config', '-e', is_flag=True, callback=edit_files, 195 default=False, flag_value='config', expose_value=False, 196 help='Opens configuration.yaml with your favorite editor' 197 ' and exits.') 198 @click.option('--view-log', '-l', is_flag=True, callback=edit_files, 199 default=False, flag_value='log', expose_value=False, 200 help='Opens opsdroid logs with your favorite editor' 201 ' and exits.') 202 def main(): 203 """Opsdroid is a chat bot framework written in Python. 204 205 It is designed to be extendable, scalable and simple. 206 See https://opsdroid.github.io/ for more information. 207 """ 208 check_dependencies() 209 210 config = Loader.load_config_file([ 211 "configuration.yaml", 212 DEFAULT_CONFIG_PATH, 213 "/etc/opsdroid/configuration.yaml" 214 ]) 215 configure_lang(config) 216 configure_logging(config) 217 welcome_message(config) 218 219 with OpsDroid(config=config) as opsdroid: 220 opsdroid.load() 221 opsdroid.run() 222 223 224 def init(): 225 """Enter the application.""" 226 if __name__ == "__main__": 227 main() 228 229 230 init() 231 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py --- a/opsdroid/__main__.py +++ b/opsdroid/__main__.py @@ -115,8 +115,8 @@ def check_dependencies(): """Check for system dependencies required by opsdroid.""" - if sys.version_info.major < 3 or sys.version_info.minor < 5: - logging.critical(_("Whoops! opsdroid requires python 3.5 or above.")) + if sys.version_info.major < 3 or sys.version_info.minor < 6: + logging.critical(_("Whoops! opsdroid requires python 3.6 or above.")) sys.exit(1) diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -71,7 +71,6 @@ 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3 :: Only', - 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Topic :: Communications :: Chat',
{"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -115,8 +115,8 @@\n \n def check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n- if sys.version_info.major < 3 or sys.version_info.minor < 5:\n- logging.critical(_(\"Whoops! opsdroid requires python 3.5 or above.\"))\n+ if sys.version_info.major < 3 or sys.version_info.minor < 6:\n+ logging.critical(_(\"Whoops! opsdroid requires python 3.6 or above.\"))\n sys.exit(1)\n \n \ndiff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -71,7 +71,6 @@\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n- 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n", "issue": "Should we drop Python 3.5 support\nIt has been raised a few times in the Matrix chat that we should consider dropping Python 3.5 support.\r\n\r\nThere are a number of improvements in [Python 3.6](https://docs.python.org/3/whatsnew/3.6.html) which we could benefit from. This would also be a breaking change for users still on 3.5 and will require them to upgrade to continue using opsdroid.\r\n\r\nThe reason this has cropped up for me again is because there appears to be a bug still in 3.5.7 which causes problems with catching exceptions within coroutines. This problem is not present in 3.6.8. I would rather not have to work around this bug if I can help it.\r\n\r\nWe decided to support 3.5+ because that is the default version which comes pre-installed on the latest release of Debian and variations like Raspbian. This should ensure good support for many users without them having to tinker with their Python and provide a good beginner experience.\r\n\r\nAs this is an open source software project with a motivation around self hosting and privacy it isn't possible to collect user metrics on Python versions being used. Otherwise we could use this data to asses the impact of dropping 3.5.\r\n\r\n[Home Assistant](https://www.home-assistant.io/) (the project which inspired much of opsdroid's community practices) are moving to only supporting the two most recent minor versions of Python, which means they will also be dropping 3.5 in the near future.\r\n\r\nMy proposal would be to follow their lead. This does put a responsibility on us to keep an eye on Python releases (3.8 is [pencilled in](https://www.python.org/dev/peps/pep-0569/) for the end of the year) and remove support as versions are released.\r\n\r\nI would love thoughts and feedback from the community.\n", "before_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n long_description_content_type='text/markdown',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass=versioneer.get_cmdclass({'sdist': Sdist,\n 'build_py': BuildPy,\n 'develop': Develop}),\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport logging\nimport gettext\nimport time\nimport contextlib\n\nimport click\n\nfrom opsdroid import __version__\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.loader import Loader\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, LOCALE_DIR, \\\n EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, DEFAULT_CONFIG_PATH\n\n\ngettext.install('opsdroid')\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\n\n Args:\n config: Language Configuration and it uses ISO 639-1 code.\n for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes\n\n\n \"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\n 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n with contextlib.suppress(KeyError):\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(_(\"Started opsdroid %s\"), __version__)\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\n\n Args:\n logging_level: logging level from config file\n\n Returns:\n logging LEVEL ->\n CRITICAL = 50\n FATAL = CRITICAL\n ERROR = 40\n WARNING = 30\n WARN = WARNING\n INFO = 20\n DEBUG = 10\n NOTSET = 0\n\n \"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n\n if logging_level == 'error':\n return logging.ERROR\n if logging_level == 'warning':\n return logging.WARNING\n\n if logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 5:\n logging.critical(_(\"Whoops! opsdroid requires python 3.5 or above.\"))\n sys.exit(1)\n\n\ndef print_version(ctx, param, value):\n \"\"\"Print out the version of opsdroid that is installed.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo('opsdroid {version}'.format(version=__version__))\n ctx.exit(0)\n\n\ndef print_example_config(ctx, param, value):\n \"\"\"Print out the example config.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n click.echo(conf.read())\n ctx.exit(0)\n\n\ndef edit_files(ctx, param, value):\n \"\"\"Open config/log file with favourite editor.\"\"\"\n if value == 'config':\n file = DEFAULT_CONFIG_PATH\n elif value == 'log':\n file = DEFAULT_LOG_FILENAME\n else:\n return\n\n editor = os.environ.get('EDITOR', 'vi')\n if editor == 'vi':\n click.echo('You are about to edit a file in vim. \\n'\n 'Read the tutorial on vim at: https://bit.ly/2HRvvrB')\n time.sleep(3)\n\n subprocess.run([editor, file])\n ctx.exit(0)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\n\n Args:\n config: config loaded by Loader\n\n Raises:\n KeyError: If 'welcome-message' key is not found in configuration file\n\n \"\"\"\n try:\n if config['welcome-message']:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"You can customise your opsdroid by modifying \"\n \"your configuration.yaml\"))\n _LOGGER.info(_(\"Read more at: \"\n \"http://opsdroid.readthedocs.io/#configuration\"))\n _LOGGER.info(_(\"Watch the Get Started Videos at: \"\n \"http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(_(\"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/\"\n \"releases\"))\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(_(\"'welcome-message: true/false' is missing in \"\n \"configuration.yaml\"))\n\n\[email protected]()\[email protected]('--gen-config', is_flag=True, callback=print_example_config,\n expose_value=False, default=False,\n help='Print an example config and exit.')\[email protected]('--version', '-v', is_flag=True, callback=print_version,\n expose_value=False, default=False, is_eager=True,\n help='Print the version and exit.')\[email protected]('--edit-config', '-e', is_flag=True, callback=edit_files,\n default=False, flag_value='config', expose_value=False,\n help='Opens configuration.yaml with your favorite editor'\n ' and exits.')\[email protected]('--view-log', '-l', is_flag=True, callback=edit_files,\n default=False, flag_value='log', expose_value=False,\n help='Opens opsdroid logs with your favorite editor'\n ' and exits.')\ndef main():\n \"\"\"Opsdroid is a chat bot framework written in Python.\n\n It is designed to be extendable, scalable and simple.\n See https://opsdroid.github.io/ for more information.\n \"\"\"\n check_dependencies()\n\n config = Loader.load_config_file([\n \"configuration.yaml\",\n DEFAULT_CONFIG_PATH,\n \"/etc/opsdroid/configuration.yaml\"\n ])\n configure_lang(config)\n configure_logging(config)\n welcome_message(config)\n\n with OpsDroid(config=config) as opsdroid:\n opsdroid.load()\n opsdroid.run()\n\n\ndef init():\n \"\"\"Enter the application.\"\"\"\n if __name__ == \"__main__\":\n main()\n\n\ninit()\n", "path": "opsdroid/__main__.py"}], "after_files": [{"content": "#!/usr/bin/env python3\nimport os\nfrom setuptools import setup, find_packages\nfrom setuptools.command.build_py import build_py\nfrom setuptools.command.sdist import sdist\nfrom setuptools.command.develop import develop\nimport versioneer\n\nPACKAGE_NAME = 'opsdroid'\nHERE = os.path.abspath(os.path.dirname(__file__))\nREADME = open(os.path.join(HERE, 'README.md'), encoding=\"utf8\").read()\n\nPACKAGES = find_packages(exclude=['tests', 'tests.*', 'modules',\n 'modules.*', 'docs', 'docs.*'])\n\n\n# For now we simply define the install_requires based on the contents\n# of requirements.txt. In the future, install_requires may become much\n# looser than the (automatically) resolved requirements.txt.\nwith open(os.path.join(HERE, 'requirements.txt'), 'r') as fh:\n REQUIRES = [line.strip() for line in fh]\n\n\nclass Develop(develop):\n \"\"\"Custom `develop` command to always build mo files on install -e.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n develop.run(self) # old style class\n\n\nclass BuildPy(build_py):\n \"\"\"Custom `build_py` command to always build mo files for wheels.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n build_py.run(self) # old style class\n\n\nclass Sdist(sdist):\n \"\"\"Custom `sdist` command to ensure that mo files are always created.\"\"\"\n\n def run(self):\n self.run_command('compile_catalog')\n sdist.run(self) # old style class\n\n\nsetup(\n name=PACKAGE_NAME,\n version=versioneer.get_version(),\n license='Apache License 2.0',\n url='https://opsdroid.github.io/',\n download_url='https://github.com/opsdroid/opsdroid/releases',\n author='Jacob Tomlinson',\n author_email='[email protected]',\n description='An open source ChatOps bot framework.',\n long_description=README,\n long_description_content_type='text/markdown',\n packages=PACKAGES,\n include_package_data=True,\n zip_safe=False,\n platforms='any',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Environment :: Console',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Information Technology',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3 :: Only',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Communications :: Chat',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development :: Libraries :: Python Modules'\n ],\n install_requires=REQUIRES,\n test_suite='tests',\n keywords=[\n 'bot',\n 'bot-framework',\n 'opsdroid',\n 'botkit',\n 'python3',\n 'asyncio',\n 'chatops',\n 'devops',\n 'nlu'\n ],\n setup_requires=['Babel'],\n cmdclass=versioneer.get_cmdclass({'sdist': Sdist,\n 'build_py': BuildPy,\n 'develop': Develop}),\n entry_points={\n 'console_scripts': [\n 'opsdroid = opsdroid.__main__:main'\n ]\n },\n)\n", "path": "setup.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport subprocess\nimport sys\nimport logging\nimport gettext\nimport time\nimport contextlib\n\nimport click\n\nfrom opsdroid import __version__\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.loader import Loader\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, LOCALE_DIR, \\\n EXAMPLE_CONFIG_FILE, DEFAULT_LANGUAGE, DEFAULT_CONFIG_PATH\n\n\ngettext.install('opsdroid')\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_lang(config):\n \"\"\"Configure app language based on user config.\n\n Args:\n config: Language Configuration and it uses ISO 639-1 code.\n for more info https://en.m.wikipedia.org/wiki/List_of_ISO_639-1_codes\n\n\n \"\"\"\n lang_code = config.get(\"lang\", DEFAULT_LANGUAGE)\n if lang_code != DEFAULT_LANGUAGE:\n lang = gettext.translation(\n 'opsdroid', LOCALE_DIR, (lang_code,), fallback=True)\n lang.install()\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n with contextlib.suppress(KeyError):\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(_(\"Started opsdroid %s\"), __version__)\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\n\n Args:\n logging_level: logging level from config file\n\n Returns:\n logging LEVEL ->\n CRITICAL = 50\n FATAL = CRITICAL\n ERROR = 40\n WARNING = 30\n WARN = WARNING\n INFO = 20\n DEBUG = 10\n NOTSET = 0\n\n \"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n\n if logging_level == 'error':\n return logging.ERROR\n if logging_level == 'warning':\n return logging.WARNING\n\n if logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info.major < 3 or sys.version_info.minor < 6:\n logging.critical(_(\"Whoops! opsdroid requires python 3.6 or above.\"))\n sys.exit(1)\n\n\ndef print_version(ctx, param, value):\n \"\"\"Print out the version of opsdroid that is installed.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n click.echo('opsdroid {version}'.format(version=__version__))\n ctx.exit(0)\n\n\ndef print_example_config(ctx, param, value):\n \"\"\"Print out the example config.\"\"\"\n if not value or ctx.resilient_parsing:\n return\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n click.echo(conf.read())\n ctx.exit(0)\n\n\ndef edit_files(ctx, param, value):\n \"\"\"Open config/log file with favourite editor.\"\"\"\n if value == 'config':\n file = DEFAULT_CONFIG_PATH\n elif value == 'log':\n file = DEFAULT_LOG_FILENAME\n else:\n return\n\n editor = os.environ.get('EDITOR', 'vi')\n if editor == 'vi':\n click.echo('You are about to edit a file in vim. \\n'\n 'Read the tutorial on vim at: https://bit.ly/2HRvvrB')\n time.sleep(3)\n\n subprocess.run([editor, file])\n ctx.exit(0)\n\n\ndef welcome_message(config):\n \"\"\"Add welcome message if set to true in configuration.\n\n Args:\n config: config loaded by Loader\n\n Raises:\n KeyError: If 'welcome-message' key is not found in configuration file\n\n \"\"\"\n try:\n if config['welcome-message']:\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"You can customise your opsdroid by modifying \"\n \"your configuration.yaml\"))\n _LOGGER.info(_(\"Read more at: \"\n \"http://opsdroid.readthedocs.io/#configuration\"))\n _LOGGER.info(_(\"Watch the Get Started Videos at: \"\n \"http://bit.ly/2fnC0Fh\"))\n _LOGGER.info(_(\"Install Opsdroid Desktop at: \\n\"\n \"https://github.com/opsdroid/opsdroid-desktop/\"\n \"releases\"))\n _LOGGER.info(\"=\" * 40)\n except KeyError:\n _LOGGER.warning(_(\"'welcome-message: true/false' is missing in \"\n \"configuration.yaml\"))\n\n\[email protected]()\[email protected]('--gen-config', is_flag=True, callback=print_example_config,\n expose_value=False, default=False,\n help='Print an example config and exit.')\[email protected]('--version', '-v', is_flag=True, callback=print_version,\n expose_value=False, default=False, is_eager=True,\n help='Print the version and exit.')\[email protected]('--edit-config', '-e', is_flag=True, callback=edit_files,\n default=False, flag_value='config', expose_value=False,\n help='Opens configuration.yaml with your favorite editor'\n ' and exits.')\[email protected]('--view-log', '-l', is_flag=True, callback=edit_files,\n default=False, flag_value='log', expose_value=False,\n help='Opens opsdroid logs with your favorite editor'\n ' and exits.')\ndef main():\n \"\"\"Opsdroid is a chat bot framework written in Python.\n\n It is designed to be extendable, scalable and simple.\n See https://opsdroid.github.io/ for more information.\n \"\"\"\n check_dependencies()\n\n config = Loader.load_config_file([\n \"configuration.yaml\",\n DEFAULT_CONFIG_PATH,\n \"/etc/opsdroid/configuration.yaml\"\n ])\n configure_lang(config)\n configure_logging(config)\n welcome_message(config)\n\n with OpsDroid(config=config) as opsdroid:\n opsdroid.load()\n opsdroid.run()\n\n\ndef init():\n \"\"\"Enter the application.\"\"\"\n if __name__ == \"__main__\":\n main()\n\n\ninit()\n", "path": "opsdroid/__main__.py"}]}
3,760
272
gh_patches_debug_36295
rasdani/github-patches
git_diff
fossasia__open-event-server-2278
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tab Sales in Admin section with relevant information Add tab "Sales" in admin section at `https://open-event-dev.herokuapp.com/admin/` We need to display ticket sales in a suitable way to understand the status of the system. How can this be best achieved. Useful information includes: - [x] Sales by event - [x] Sales by organizer, and email of organizer - [x] Sales by location - [x] Sale depending on date and period (maybe search option) - [x] Fees by event - [x] Status/list of (automatic) emails sent to organizer. - [x] Status of fees paid (invoice to be sent, pending, late) - [ ] What other information is useful? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/models/event_invoice.py` Content: ``` 1 import uuid 2 from datetime import datetime 3 import time 4 5 from app.helpers.helpers import get_count 6 from . import db 7 8 def get_new_identifier(): 9 identifier = str(uuid.uuid4()) 10 count = get_count(EventInvoice.query.filter_by(identifier=identifier)) 11 if count == 0: 12 return identifier 13 else: 14 return get_new_identifier() 15 16 class EventInvoice(db.Model): 17 """ 18 Stripe authorization information for an event. 19 """ 20 __tablename__ = 'event_invoices' 21 22 id = db.Column(db.Integer, primary_key=True) 23 identifier = db.Column(db.String, unique=True) 24 amount = db.Column(db.Float) 25 address = db.Column(db.String) 26 city = db.Column(db.String) 27 state = db.Column(db.String) 28 country = db.Column(db.String) 29 zipcode = db.Column(db.String) 30 31 user_id = db.Column(db.Integer, db.ForeignKey('user.id')) 32 event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='SET NULL')) 33 34 created_at = db.Column(db.DateTime) 35 completed_at = db.Column(db.DateTime, nullable=True, default=None) 36 transaction_id = db.Column(db.String) 37 paid_via = db.Column(db.String) 38 payment_mode = db.Column(db.String) 39 brand = db.Column(db.String) 40 exp_month = db.Column(db.Integer) 41 exp_year = db.Column(db.Integer) 42 last4 = db.Column(db.String) 43 stripe_token = db.Column(db.String) 44 paypal_token = db.Column(db.String) 45 status = db.Column(db.String) 46 47 event = db.relationship('Event', backref='invoices') 48 user = db.relationship('User', backref='invoices') 49 50 def __init__(self, 51 amount=None, 52 address=None, 53 city=city, 54 state=None, 55 country=None, 56 zipcode=None, 57 transaction_id=None, 58 paid_via=None, 59 user_id=None, 60 event_id=None): 61 self.identifier = get_new_identifier() 62 self.amount = amount 63 self.address = address 64 self.state = state 65 self.country = country 66 self.zipcode = zipcode 67 self.user_id = user_id 68 self.event_id = event_id 69 self.transaction_id = transaction_id 70 self.paid_via = paid_via 71 self.created_at = datetime.utcnow() 72 73 def get_invoice_number(self): 74 return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id) 75 76 def __repr__(self): 77 return '<EventInvoice %r>' % self.stripe_user_id 78 79 def __str__(self): 80 return unicode(self).encode('utf-8') 81 82 def __unicode__(self): 83 return self.stripe_user_id 84 ``` Path: `app/views/admin/super_admin/sales.py` Content: ``` 1 import copy 2 from datetime import datetime 3 4 from flask import request 5 from flask import url_for 6 from flask_admin import expose 7 from werkzeug.exceptions import abort 8 from werkzeug.utils import redirect 9 10 from app import forex 11 from app.helpers.data_getter import DataGetter 12 from app.helpers.payment import get_fee 13 from app.views.admin.super_admin.super_admin_base import SuperAdminBaseView, SALES 14 from app.helpers.ticketing import TicketingManager 15 16 class SuperAdminSalesView(SuperAdminBaseView): 17 PANEL_NAME = SALES 18 display_currency = 'USD' 19 20 @expose('/') 21 def index(self): 22 return redirect(url_for('.sales_by_events_view', path='events')) 23 24 @expose('/fees/') 25 def fees_by_events_view(self): 26 from_date = request.args.get('from_date') 27 to_date = request.args.get('to_date') 28 29 if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \ 30 ('from_date' in request.args and 'to_date' not in request.args) or \ 31 ('to_date' in request.args and 'from_date' not in request.args): 32 33 return redirect(url_for('.fees_by_events_view')) 34 35 if from_date and to_date: 36 orders = TicketingManager.get_orders( 37 from_date=datetime.strptime(from_date, '%d/%m/%Y'), 38 to_date=datetime.strptime(to_date, '%d/%m/%Y'), 39 status='completed' 40 ) 41 else: 42 orders = TicketingManager.get_orders(status='completed') 43 44 events = DataGetter.get_all_events() 45 46 fee_summary = {} 47 for event in events: 48 fee_summary[str(event.id)] = { 49 'name': event.name, 50 'payment_currency': event.payment_currency, 51 'fee_rate': get_fee(event.payment_currency), 52 'fee_amount': 0, 53 'tickets_count': 0 54 } 55 56 fee_total = 0 57 tickets_total = 0 58 59 for order in orders: 60 for order_ticket in order.tickets: 61 fee_summary[str(order.event.id)]['tickets_count'] += order_ticket.quantity 62 tickets_total += order_ticket.quantity 63 ticket = TicketingManager.get_ticket(order_ticket.ticket_id) 64 if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0: 65 fee = ticket.price * (get_fee(order.event.payment_currency)/100) 66 fee = forex(order.event.payment_currency, self.display_currency, fee) 67 fee_summary[str(order.event.id)]['fee_amount'] += fee 68 fee_total += fee 69 70 return self.render('/gentelella/admin/super_admin/sales/fees.html', 71 fee_summary=fee_summary, 72 display_currency=self.display_currency, 73 from_date=from_date, 74 to_date=to_date, 75 tickets_total=tickets_total, 76 fee_total=fee_total) 77 78 @expose('/<path>/') 79 def sales_by_events_view(self, path): 80 81 from_date = request.args.get('from_date') 82 to_date = request.args.get('to_date') 83 84 if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \ 85 ('from_date' in request.args and 'to_date' not in request.args) or \ 86 ('to_date' in request.args and 'from_date' not in request.args): 87 88 return redirect(url_for('.sales_by_events_view', path=path)) 89 90 if from_date and to_date: 91 orders = TicketingManager.get_orders( 92 from_date=datetime.strptime(from_date, '%d/%m/%Y'), 93 to_date=datetime.strptime(to_date, '%d/%m/%Y') 94 ) 95 else: 96 orders = TicketingManager.get_orders() 97 98 events = DataGetter.get_all_events() 99 100 completed_count = 0 101 completed_amount = 0 102 tickets_count = 0 103 104 orders_summary = { 105 'completed': { 106 'class': 'success', 107 'tickets_count': 0, 108 'orders_count': 0, 109 'total_sales': 0 110 }, 111 'pending': { 112 'class': 'warning', 113 'tickets_count': 0, 114 'orders_count': 0, 115 'total_sales': 0 116 }, 117 'expired': { 118 'class': 'danger', 119 'tickets_count': 0, 120 'orders_count': 0, 121 'total_sales': 0 122 } 123 } 124 125 tickets_summary_event_wise = {} 126 tickets_summary_organizer_wise = {} 127 tickets_summary_location_wise = {} 128 for event in events: 129 tickets_summary_event_wise[str(event.id)] = { 130 'name': event.name, 131 'payment_currency': event.payment_currency, 132 'completed': { 133 'tickets_count': 0, 134 'sales': 0 135 }, 136 'pending': { 137 'tickets_count': 0, 138 'sales': 0 139 }, 140 'expired': { 141 'class': 'danger', 142 'tickets_count': 0, 143 'sales': 0 144 } 145 } 146 tickets_summary_organizer_wise[str(event.creator_id)] = \ 147 copy.deepcopy(tickets_summary_event_wise[str(event.id)]) 148 if event.creator: 149 tickets_summary_organizer_wise[str(event.creator_id)]['name'] = event.creator.email 150 151 tickets_summary_location_wise[unicode(event.searchable_location_name)] = \ 152 copy.deepcopy(tickets_summary_event_wise[str(event.id)]) 153 tickets_summary_location_wise[unicode(event.searchable_location_name)]['name'] = \ 154 event.searchable_location_name 155 156 for order in orders: 157 if order.status == 'initialized': 158 order.status = 'pending' 159 orders_summary[str(order.status)]['orders_count'] += 1 160 orders_summary[str(order.status)]['total_sales'] += forex(order.event.payment_currency, 161 self.display_currency, order.amount) 162 for order_ticket in order.tickets: 163 orders_summary[str(order.status)]['tickets_count'] += order_ticket.quantity 164 ticket = TicketingManager.get_ticket(order_ticket.ticket_id) 165 tickets_summary_event_wise[str(order.event_id)][str(order.status)]['tickets_count'] \ 166 += order_ticket.quantity 167 tickets_summary_organizer_wise[str(order.event.creator_id)][str(order.status)]['tickets_count'] \ 168 += order_ticket.quantity 169 tickets_summary_location_wise[str(order 170 .event.searchable_location_name)][str(order 171 .status)]['tickets_count'] \ 172 += order_ticket.quantity 173 174 if order.paid_via != 'free' and order.amount > 0: 175 tickets_summary_event_wise[str(order.event_id)][str(order.status)]['sales'] += \ 176 order_ticket.quantity * ticket.price 177 tickets_summary_organizer_wise[str(order.event.creator_id)][str(order.status)]['sales'] += \ 178 order_ticket.quantity * ticket.price 179 tickets_summary_location_wise[str(order.event. 180 searchable_location_name)][str(order. 181 status)]['sales'] += \ 182 order_ticket.quantity * ticket.price 183 184 if path == 'events': 185 return self.render('/gentelella/admin/super_admin/sales/by_events.html', 186 tickets_summary=tickets_summary_event_wise, 187 display_currency=self.display_currency, 188 from_date=from_date, 189 to_date=to_date, 190 orders_summary=orders_summary) 191 elif path == 'organizers': 192 return self.render('/gentelella/admin/super_admin/sales/by_organizer.html', 193 tickets_summary=tickets_summary_organizer_wise, 194 display_currency=self.display_currency, 195 from_date=from_date, 196 to_date=to_date, 197 orders_summary=orders_summary) 198 elif path == 'locations': 199 return self.render('/gentelella/admin/super_admin/sales/by_location.html', 200 tickets_summary=tickets_summary_location_wise, 201 display_currency=self.display_currency, 202 from_date=from_date, 203 to_date=to_date, 204 orders_summary=orders_summary) 205 206 else: 207 abort(404) 208 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/models/event_invoice.py b/app/models/event_invoice.py --- a/app/models/event_invoice.py +++ b/app/models/event_invoice.py @@ -69,6 +69,7 @@ self.transaction_id = transaction_id self.paid_via = paid_via self.created_at = datetime.utcnow() + self.status = 'pending' def get_invoice_number(self): return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id) diff --git a/app/views/admin/super_admin/sales.py b/app/views/admin/super_admin/sales.py --- a/app/views/admin/super_admin/sales.py +++ b/app/views/admin/super_admin/sales.py @@ -1,5 +1,5 @@ import copy -from datetime import datetime +from datetime import datetime, timedelta from flask import request from flask import url_for @@ -12,6 +12,7 @@ from app.helpers.payment import get_fee from app.views.admin.super_admin.super_admin_base import SuperAdminBaseView, SALES from app.helpers.ticketing import TicketingManager +from app.helpers.invoicing import InvoicingManager class SuperAdminSalesView(SuperAdminBaseView): PANEL_NAME = SALES @@ -75,6 +76,33 @@ tickets_total=tickets_total, fee_total=fee_total) + @expose('/fees/status/') + def fees_status_view(self): + from_date = request.args.get('from_date') + to_date = request.args.get('to_date') + + if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \ + ('from_date' in request.args and 'to_date' not in request.args) or \ + ('to_date' in request.args and 'from_date' not in request.args): + + return redirect(url_for('.fees_status_view')) + + if from_date and to_date: + invoices = InvoicingManager.get_invoices( + from_date=datetime.strptime(from_date, '%d/%m/%Y'), + to_date=datetime.strptime(to_date, '%d/%m/%Y'), + ) + else: + invoices = InvoicingManager.get_invoices() + + return self.render('/gentelella/admin/super_admin/sales/fees_status.html', + display_currency=self.display_currency, + from_date=from_date, + current_date=datetime.now(), + overdue_date=datetime.now() + timedelta(days=15), + invoices=invoices, + to_date=to_date) + @expose('/<path>/') def sales_by_events_view(self, path):
{"golden_diff": "diff --git a/app/models/event_invoice.py b/app/models/event_invoice.py\n--- a/app/models/event_invoice.py\n+++ b/app/models/event_invoice.py\n@@ -69,6 +69,7 @@\n self.transaction_id = transaction_id\n self.paid_via = paid_via\n self.created_at = datetime.utcnow()\n+ self.status = 'pending'\n \n def get_invoice_number(self):\n return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id)\ndiff --git a/app/views/admin/super_admin/sales.py b/app/views/admin/super_admin/sales.py\n--- a/app/views/admin/super_admin/sales.py\n+++ b/app/views/admin/super_admin/sales.py\n@@ -1,5 +1,5 @@\n import copy\n-from datetime import datetime\n+from datetime import datetime, timedelta\n \n from flask import request\n from flask import url_for\n@@ -12,6 +12,7 @@\n from app.helpers.payment import get_fee\n from app.views.admin.super_admin.super_admin_base import SuperAdminBaseView, SALES\n from app.helpers.ticketing import TicketingManager\n+from app.helpers.invoicing import InvoicingManager\n \n class SuperAdminSalesView(SuperAdminBaseView):\n PANEL_NAME = SALES\n@@ -75,6 +76,33 @@\n tickets_total=tickets_total,\n fee_total=fee_total)\n \n+ @expose('/fees/status/')\n+ def fees_status_view(self):\n+ from_date = request.args.get('from_date')\n+ to_date = request.args.get('to_date')\n+\n+ if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \\\n+ ('from_date' in request.args and 'to_date' not in request.args) or \\\n+ ('to_date' in request.args and 'from_date' not in request.args):\n+\n+ return redirect(url_for('.fees_status_view'))\n+\n+ if from_date and to_date:\n+ invoices = InvoicingManager.get_invoices(\n+ from_date=datetime.strptime(from_date, '%d/%m/%Y'),\n+ to_date=datetime.strptime(to_date, '%d/%m/%Y'),\n+ )\n+ else:\n+ invoices = InvoicingManager.get_invoices()\n+\n+ return self.render('/gentelella/admin/super_admin/sales/fees_status.html',\n+ display_currency=self.display_currency,\n+ from_date=from_date,\n+ current_date=datetime.now(),\n+ overdue_date=datetime.now() + timedelta(days=15),\n+ invoices=invoices,\n+ to_date=to_date)\n+\n @expose('/<path>/')\n def sales_by_events_view(self, path):\n", "issue": "Tab Sales in Admin section with relevant information\nAdd tab \"Sales\" in admin section at `https://open-event-dev.herokuapp.com/admin/`\n\nWe need to display ticket sales in a suitable way to understand the status of the system. How can this be best achieved. Useful information includes:\n- [x] Sales by event\n- [x] Sales by organizer, and email of organizer\n- [x] Sales by location\n- [x] Sale depending on date and period (maybe search option)\n- [x] Fees by event\n- [x] Status/list of (automatic) emails sent to organizer.\n- [x] Status of fees paid (invoice to be sent, pending, late)\n- [ ] What other information is useful?\n\n", "before_files": [{"content": "import uuid\nfrom datetime import datetime\nimport time\n\nfrom app.helpers.helpers import get_count\nfrom . import db\n\ndef get_new_identifier():\n identifier = str(uuid.uuid4())\n count = get_count(EventInvoice.query.filter_by(identifier=identifier))\n if count == 0:\n return identifier\n else:\n return get_new_identifier()\n\nclass EventInvoice(db.Model):\n \"\"\"\n Stripe authorization information for an event.\n \"\"\"\n __tablename__ = 'event_invoices'\n\n id = db.Column(db.Integer, primary_key=True)\n identifier = db.Column(db.String, unique=True)\n amount = db.Column(db.Float)\n address = db.Column(db.String)\n city = db.Column(db.String)\n state = db.Column(db.String)\n country = db.Column(db.String)\n zipcode = db.Column(db.String)\n\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='SET NULL'))\n\n created_at = db.Column(db.DateTime)\n completed_at = db.Column(db.DateTime, nullable=True, default=None)\n transaction_id = db.Column(db.String)\n paid_via = db.Column(db.String)\n payment_mode = db.Column(db.String)\n brand = db.Column(db.String)\n exp_month = db.Column(db.Integer)\n exp_year = db.Column(db.Integer)\n last4 = db.Column(db.String)\n stripe_token = db.Column(db.String)\n paypal_token = db.Column(db.String)\n status = db.Column(db.String)\n\n event = db.relationship('Event', backref='invoices')\n user = db.relationship('User', backref='invoices')\n\n def __init__(self,\n amount=None,\n address=None,\n city=city,\n state=None,\n country=None,\n zipcode=None,\n transaction_id=None,\n paid_via=None,\n user_id=None,\n event_id=None):\n self.identifier = get_new_identifier()\n self.amount = amount\n self.address = address\n self.state = state\n self.country = country\n self.zipcode = zipcode\n self.user_id = user_id\n self.event_id = event_id\n self.transaction_id = transaction_id\n self.paid_via = paid_via\n self.created_at = datetime.utcnow()\n\n def get_invoice_number(self):\n return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id)\n\n def __repr__(self):\n return '<EventInvoice %r>' % self.stripe_user_id\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n\n def __unicode__(self):\n return self.stripe_user_id\n", "path": "app/models/event_invoice.py"}, {"content": "import copy\nfrom datetime import datetime\n\nfrom flask import request\nfrom flask import url_for\nfrom flask_admin import expose\nfrom werkzeug.exceptions import abort\nfrom werkzeug.utils import redirect\n\nfrom app import forex\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.payment import get_fee\nfrom app.views.admin.super_admin.super_admin_base import SuperAdminBaseView, SALES\nfrom app.helpers.ticketing import TicketingManager\n\nclass SuperAdminSalesView(SuperAdminBaseView):\n PANEL_NAME = SALES\n display_currency = 'USD'\n\n @expose('/')\n def index(self):\n return redirect(url_for('.sales_by_events_view', path='events'))\n\n @expose('/fees/')\n def fees_by_events_view(self):\n from_date = request.args.get('from_date')\n to_date = request.args.get('to_date')\n\n if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \\\n ('from_date' in request.args and 'to_date' not in request.args) or \\\n ('to_date' in request.args and 'from_date' not in request.args):\n\n return redirect(url_for('.fees_by_events_view'))\n\n if from_date and to_date:\n orders = TicketingManager.get_orders(\n from_date=datetime.strptime(from_date, '%d/%m/%Y'),\n to_date=datetime.strptime(to_date, '%d/%m/%Y'),\n status='completed'\n )\n else:\n orders = TicketingManager.get_orders(status='completed')\n\n events = DataGetter.get_all_events()\n\n fee_summary = {}\n for event in events:\n fee_summary[str(event.id)] = {\n 'name': event.name,\n 'payment_currency': event.payment_currency,\n 'fee_rate': get_fee(event.payment_currency),\n 'fee_amount': 0,\n 'tickets_count': 0\n }\n\n fee_total = 0\n tickets_total = 0\n\n for order in orders:\n for order_ticket in order.tickets:\n fee_summary[str(order.event.id)]['tickets_count'] += order_ticket.quantity\n tickets_total += order_ticket.quantity\n ticket = TicketingManager.get_ticket(order_ticket.ticket_id)\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency)/100)\n fee = forex(order.event.payment_currency, self.display_currency, fee)\n fee_summary[str(order.event.id)]['fee_amount'] += fee\n fee_total += fee\n\n return self.render('/gentelella/admin/super_admin/sales/fees.html',\n fee_summary=fee_summary,\n display_currency=self.display_currency,\n from_date=from_date,\n to_date=to_date,\n tickets_total=tickets_total,\n fee_total=fee_total)\n\n @expose('/<path>/')\n def sales_by_events_view(self, path):\n\n from_date = request.args.get('from_date')\n to_date = request.args.get('to_date')\n\n if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \\\n ('from_date' in request.args and 'to_date' not in request.args) or \\\n ('to_date' in request.args and 'from_date' not in request.args):\n\n return redirect(url_for('.sales_by_events_view', path=path))\n\n if from_date and to_date:\n orders = TicketingManager.get_orders(\n from_date=datetime.strptime(from_date, '%d/%m/%Y'),\n to_date=datetime.strptime(to_date, '%d/%m/%Y')\n )\n else:\n orders = TicketingManager.get_orders()\n\n events = DataGetter.get_all_events()\n\n completed_count = 0\n completed_amount = 0\n tickets_count = 0\n\n orders_summary = {\n 'completed': {\n 'class': 'success',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'pending': {\n 'class': 'warning',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n }\n }\n\n tickets_summary_event_wise = {}\n tickets_summary_organizer_wise = {}\n tickets_summary_location_wise = {}\n for event in events:\n tickets_summary_event_wise[str(event.id)] = {\n 'name': event.name,\n 'payment_currency': event.payment_currency,\n 'completed': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'pending': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'sales': 0\n }\n }\n tickets_summary_organizer_wise[str(event.creator_id)] = \\\n copy.deepcopy(tickets_summary_event_wise[str(event.id)])\n if event.creator:\n tickets_summary_organizer_wise[str(event.creator_id)]['name'] = event.creator.email\n\n tickets_summary_location_wise[unicode(event.searchable_location_name)] = \\\n copy.deepcopy(tickets_summary_event_wise[str(event.id)])\n tickets_summary_location_wise[unicode(event.searchable_location_name)]['name'] = \\\n event.searchable_location_name\n\n for order in orders:\n if order.status == 'initialized':\n order.status = 'pending'\n orders_summary[str(order.status)]['orders_count'] += 1\n orders_summary[str(order.status)]['total_sales'] += forex(order.event.payment_currency,\n self.display_currency, order.amount)\n for order_ticket in order.tickets:\n orders_summary[str(order.status)]['tickets_count'] += order_ticket.quantity\n ticket = TicketingManager.get_ticket(order_ticket.ticket_id)\n tickets_summary_event_wise[str(order.event_id)][str(order.status)]['tickets_count'] \\\n += order_ticket.quantity\n tickets_summary_organizer_wise[str(order.event.creator_id)][str(order.status)]['tickets_count'] \\\n += order_ticket.quantity\n tickets_summary_location_wise[str(order\n .event.searchable_location_name)][str(order\n .status)]['tickets_count'] \\\n += order_ticket.quantity\n\n if order.paid_via != 'free' and order.amount > 0:\n tickets_summary_event_wise[str(order.event_id)][str(order.status)]['sales'] += \\\n order_ticket.quantity * ticket.price\n tickets_summary_organizer_wise[str(order.event.creator_id)][str(order.status)]['sales'] += \\\n order_ticket.quantity * ticket.price\n tickets_summary_location_wise[str(order.event.\n searchable_location_name)][str(order.\n status)]['sales'] += \\\n order_ticket.quantity * ticket.price\n\n if path == 'events':\n return self.render('/gentelella/admin/super_admin/sales/by_events.html',\n tickets_summary=tickets_summary_event_wise,\n display_currency=self.display_currency,\n from_date=from_date,\n to_date=to_date,\n orders_summary=orders_summary)\n elif path == 'organizers':\n return self.render('/gentelella/admin/super_admin/sales/by_organizer.html',\n tickets_summary=tickets_summary_organizer_wise,\n display_currency=self.display_currency,\n from_date=from_date,\n to_date=to_date,\n orders_summary=orders_summary)\n elif path == 'locations':\n return self.render('/gentelella/admin/super_admin/sales/by_location.html',\n tickets_summary=tickets_summary_location_wise,\n display_currency=self.display_currency,\n from_date=from_date,\n to_date=to_date,\n orders_summary=orders_summary)\n\n else:\n abort(404)\n", "path": "app/views/admin/super_admin/sales.py"}], "after_files": [{"content": "import uuid\nfrom datetime import datetime\nimport time\n\nfrom app.helpers.helpers import get_count\nfrom . import db\n\ndef get_new_identifier():\n identifier = str(uuid.uuid4())\n count = get_count(EventInvoice.query.filter_by(identifier=identifier))\n if count == 0:\n return identifier\n else:\n return get_new_identifier()\n\nclass EventInvoice(db.Model):\n \"\"\"\n Stripe authorization information for an event.\n \"\"\"\n __tablename__ = 'event_invoices'\n\n id = db.Column(db.Integer, primary_key=True)\n identifier = db.Column(db.String, unique=True)\n amount = db.Column(db.Float)\n address = db.Column(db.String)\n city = db.Column(db.String)\n state = db.Column(db.String)\n country = db.Column(db.String)\n zipcode = db.Column(db.String)\n\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'))\n event_id = db.Column(db.Integer, db.ForeignKey('events.id', ondelete='SET NULL'))\n\n created_at = db.Column(db.DateTime)\n completed_at = db.Column(db.DateTime, nullable=True, default=None)\n transaction_id = db.Column(db.String)\n paid_via = db.Column(db.String)\n payment_mode = db.Column(db.String)\n brand = db.Column(db.String)\n exp_month = db.Column(db.Integer)\n exp_year = db.Column(db.Integer)\n last4 = db.Column(db.String)\n stripe_token = db.Column(db.String)\n paypal_token = db.Column(db.String)\n status = db.Column(db.String)\n\n event = db.relationship('Event', backref='invoices')\n user = db.relationship('User', backref='invoices')\n\n def __init__(self,\n amount=None,\n address=None,\n city=city,\n state=None,\n country=None,\n zipcode=None,\n transaction_id=None,\n paid_via=None,\n user_id=None,\n event_id=None):\n self.identifier = get_new_identifier()\n self.amount = amount\n self.address = address\n self.state = state\n self.country = country\n self.zipcode = zipcode\n self.user_id = user_id\n self.event_id = event_id\n self.transaction_id = transaction_id\n self.paid_via = paid_via\n self.created_at = datetime.utcnow()\n self.status = 'pending'\n\n def get_invoice_number(self):\n return 'I' + str(int(time.mktime(self.created_at.timetuple()))) + '-' + str(self.id)\n\n def __repr__(self):\n return '<EventInvoice %r>' % self.stripe_user_id\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n\n def __unicode__(self):\n return self.stripe_user_id\n", "path": "app/models/event_invoice.py"}, {"content": "import copy\nfrom datetime import datetime, timedelta\n\nfrom flask import request\nfrom flask import url_for\nfrom flask_admin import expose\nfrom werkzeug.exceptions import abort\nfrom werkzeug.utils import redirect\n\nfrom app import forex\nfrom app.helpers.data_getter import DataGetter\nfrom app.helpers.payment import get_fee\nfrom app.views.admin.super_admin.super_admin_base import SuperAdminBaseView, SALES\nfrom app.helpers.ticketing import TicketingManager\nfrom app.helpers.invoicing import InvoicingManager\n\nclass SuperAdminSalesView(SuperAdminBaseView):\n PANEL_NAME = SALES\n display_currency = 'USD'\n\n @expose('/')\n def index(self):\n return redirect(url_for('.sales_by_events_view', path='events'))\n\n @expose('/fees/')\n def fees_by_events_view(self):\n from_date = request.args.get('from_date')\n to_date = request.args.get('to_date')\n\n if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \\\n ('from_date' in request.args and 'to_date' not in request.args) or \\\n ('to_date' in request.args and 'from_date' not in request.args):\n\n return redirect(url_for('.fees_by_events_view'))\n\n if from_date and to_date:\n orders = TicketingManager.get_orders(\n from_date=datetime.strptime(from_date, '%d/%m/%Y'),\n to_date=datetime.strptime(to_date, '%d/%m/%Y'),\n status='completed'\n )\n else:\n orders = TicketingManager.get_orders(status='completed')\n\n events = DataGetter.get_all_events()\n\n fee_summary = {}\n for event in events:\n fee_summary[str(event.id)] = {\n 'name': event.name,\n 'payment_currency': event.payment_currency,\n 'fee_rate': get_fee(event.payment_currency),\n 'fee_amount': 0,\n 'tickets_count': 0\n }\n\n fee_total = 0\n tickets_total = 0\n\n for order in orders:\n for order_ticket in order.tickets:\n fee_summary[str(order.event.id)]['tickets_count'] += order_ticket.quantity\n tickets_total += order_ticket.quantity\n ticket = TicketingManager.get_ticket(order_ticket.ticket_id)\n if order.paid_via != 'free' and order.amount > 0 and ticket.price > 0:\n fee = ticket.price * (get_fee(order.event.payment_currency)/100)\n fee = forex(order.event.payment_currency, self.display_currency, fee)\n fee_summary[str(order.event.id)]['fee_amount'] += fee\n fee_total += fee\n\n return self.render('/gentelella/admin/super_admin/sales/fees.html',\n fee_summary=fee_summary,\n display_currency=self.display_currency,\n from_date=from_date,\n to_date=to_date,\n tickets_total=tickets_total,\n fee_total=fee_total)\n\n @expose('/fees/status/')\n def fees_status_view(self):\n from_date = request.args.get('from_date')\n to_date = request.args.get('to_date')\n\n if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \\\n ('from_date' in request.args and 'to_date' not in request.args) or \\\n ('to_date' in request.args and 'from_date' not in request.args):\n\n return redirect(url_for('.fees_status_view'))\n\n if from_date and to_date:\n invoices = InvoicingManager.get_invoices(\n from_date=datetime.strptime(from_date, '%d/%m/%Y'),\n to_date=datetime.strptime(to_date, '%d/%m/%Y'),\n )\n else:\n invoices = InvoicingManager.get_invoices()\n\n return self.render('/gentelella/admin/super_admin/sales/fees_status.html',\n display_currency=self.display_currency,\n from_date=from_date,\n current_date=datetime.now(),\n overdue_date=datetime.now() + timedelta(days=15),\n invoices=invoices,\n to_date=to_date)\n\n @expose('/<path>/')\n def sales_by_events_view(self, path):\n\n from_date = request.args.get('from_date')\n to_date = request.args.get('to_date')\n\n if ('from_date' in request.args and not from_date) or ('to_date' in request.args and not to_date) or \\\n ('from_date' in request.args and 'to_date' not in request.args) or \\\n ('to_date' in request.args and 'from_date' not in request.args):\n\n return redirect(url_for('.sales_by_events_view', path=path))\n\n if from_date and to_date:\n orders = TicketingManager.get_orders(\n from_date=datetime.strptime(from_date, '%d/%m/%Y'),\n to_date=datetime.strptime(to_date, '%d/%m/%Y')\n )\n else:\n orders = TicketingManager.get_orders()\n\n events = DataGetter.get_all_events()\n\n completed_count = 0\n completed_amount = 0\n tickets_count = 0\n\n orders_summary = {\n 'completed': {\n 'class': 'success',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'pending': {\n 'class': 'warning',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'orders_count': 0,\n 'total_sales': 0\n }\n }\n\n tickets_summary_event_wise = {}\n tickets_summary_organizer_wise = {}\n tickets_summary_location_wise = {}\n for event in events:\n tickets_summary_event_wise[str(event.id)] = {\n 'name': event.name,\n 'payment_currency': event.payment_currency,\n 'completed': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'pending': {\n 'tickets_count': 0,\n 'sales': 0\n },\n 'expired': {\n 'class': 'danger',\n 'tickets_count': 0,\n 'sales': 0\n }\n }\n tickets_summary_organizer_wise[str(event.creator_id)] = \\\n copy.deepcopy(tickets_summary_event_wise[str(event.id)])\n if event.creator:\n tickets_summary_organizer_wise[str(event.creator_id)]['name'] = event.creator.email\n\n tickets_summary_location_wise[unicode(event.searchable_location_name)] = \\\n copy.deepcopy(tickets_summary_event_wise[str(event.id)])\n tickets_summary_location_wise[unicode(event.searchable_location_name)]['name'] = \\\n event.searchable_location_name\n\n for order in orders:\n if order.status == 'initialized':\n order.status = 'pending'\n orders_summary[str(order.status)]['orders_count'] += 1\n orders_summary[str(order.status)]['total_sales'] += forex(order.event.payment_currency,\n self.display_currency, order.amount)\n for order_ticket in order.tickets:\n orders_summary[str(order.status)]['tickets_count'] += order_ticket.quantity\n ticket = TicketingManager.get_ticket(order_ticket.ticket_id)\n tickets_summary_event_wise[str(order.event_id)][str(order.status)]['tickets_count'] \\\n += order_ticket.quantity\n tickets_summary_organizer_wise[str(order.event.creator_id)][str(order.status)]['tickets_count'] \\\n += order_ticket.quantity\n tickets_summary_location_wise[str(order\n .event.searchable_location_name)][str(order\n .status)]['tickets_count'] \\\n += order_ticket.quantity\n\n if order.paid_via != 'free' and order.amount > 0:\n tickets_summary_event_wise[str(order.event_id)][str(order.status)]['sales'] += \\\n order_ticket.quantity * ticket.price\n tickets_summary_organizer_wise[str(order.event.creator_id)][str(order.status)]['sales'] += \\\n order_ticket.quantity * ticket.price\n tickets_summary_location_wise[str(order.event.\n searchable_location_name)][str(order.\n status)]['sales'] += \\\n order_ticket.quantity * ticket.price\n\n if path == 'events':\n return self.render('/gentelella/admin/super_admin/sales/by_events.html',\n tickets_summary=tickets_summary_event_wise,\n display_currency=self.display_currency,\n from_date=from_date,\n to_date=to_date,\n orders_summary=orders_summary)\n elif path == 'organizers':\n return self.render('/gentelella/admin/super_admin/sales/by_organizer.html',\n tickets_summary=tickets_summary_organizer_wise,\n display_currency=self.display_currency,\n from_date=from_date,\n to_date=to_date,\n orders_summary=orders_summary)\n elif path == 'locations':\n return self.render('/gentelella/admin/super_admin/sales/by_location.html',\n tickets_summary=tickets_summary_location_wise,\n display_currency=self.display_currency,\n from_date=from_date,\n to_date=to_date,\n orders_summary=orders_summary)\n\n else:\n abort(404)\n", "path": "app/views/admin/super_admin/sales.py"}]}
3,401
595
gh_patches_debug_39867
rasdani/github-patches
git_diff
localstack__localstack-2313
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docker and es::CreateElasticsearchDomain / elasticsearch endpoint Hey all! I'm using localstack for a project where I consume data from kinesis, process it and put it in elasticsearch. In the later versions of localstack 'elasticsearch' was removed and is now lazily loaded after call to es::CreateElasticsearchDomain (at least that's how I understand it from #1881, #1865, #1870). I'm running localstack in a docker using testcontainers (which gives me dynamic port mappings etc). My processing code is running on localhost. The problem I'm facing now is that the response from es::CreateElasticsearchDomain returns an elasticsearch endpoint like: http://[docker container id]:port. With my set up, this gives me two issues: 1. [docker container id] host is not (I believe) reachable from localhost unless I add it to /etc/hosts or something like that. I can surely "hack" my way around this by just replacing [docker container id] with localhost. 2. It says here: https://github.com/localstack/localstack/issues/1881#issuecomment-567495056 that we should not assume that elasticsearch will run on a static port in the future. When running localstack in a docker, how would I know which port to expose then in order to reach elasticsearch from localhost? I currently have my tests running using 1.10.7 but there I just make a dummy call to es::CreateElasticsearchDomain and hardcode elasticsearch endpoint to http://localhost:4571. (It seems like it always chooses 4571 as of now?) Thanks Stefan --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `localstack/services/es/es_api.py` Content: ``` 1 import json 2 import time 3 import logging 4 from random import randint 5 from flask import Flask, jsonify, request, make_response 6 from localstack.utils import persistence 7 from localstack.services import generic_proxy, install 8 from localstack.utils.aws import aws_stack 9 from localstack.constants import TEST_AWS_ACCOUNT_ID, ELASTICSEARCH_DEFAULT_VERSION 10 from localstack.utils.common import to_str 11 from localstack.utils.tagging import TaggingService 12 from localstack.utils.analytics import event_publisher 13 from localstack.services.plugins import check_infra 14 15 LOG = logging.getLogger(__name__) 16 17 APP_NAME = 'es_api' 18 API_PREFIX = '/2015-01-01' 19 20 DEFAULT_ES_VERSION = '7.1' 21 22 ES_DOMAINS = {} 23 24 TAGS = TaggingService() 25 26 app = Flask(APP_NAME) 27 app.url_map.strict_slashes = False 28 29 30 def error_response(error_type, code=400, message='Unknown error.'): 31 if not message: 32 if error_type == 'ResourceNotFoundException': 33 message = 'Resource not found.' 34 elif error_type == 'ResourceAlreadyExistsException': 35 message = 'Resource already exists.' 36 response = make_response(jsonify({'error': message})) 37 response.headers['x-amzn-errortype'] = error_type 38 return response, code 39 40 41 def get_domain_config_status(): 42 return { 43 'CreationDate': '%.2f' % time.time(), 44 'PendingDeletion': False, 45 'State': 'Active', 46 'UpdateDate': '%.2f' % time.time(), 47 'UpdateVersion': randint(1, 100) 48 } 49 50 51 def get_domain_config(domain_name): 52 config_status = get_domain_config_status() 53 return { 54 'DomainConfig': { 55 'AccessPolicies': { 56 'Options': '{"Version":"2012-10-17","Statement":[{"Effect":"Allow","Principal":{"AWS":"arn:aws:iam::%s:root"},"Action":"es:*","Resource":"arn:aws:es:%s:%s:domain/%s/*"}]}' % (TEST_AWS_ACCOUNT_ID, aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), # noqa: E501 57 'Status': config_status 58 }, 59 'AdvancedOptions': { 60 'Options': { 61 'indices.fielddata.cache.size': '', 62 'rest.action.multi.allow_explicit_index': 'true' 63 }, 64 'Status': config_status 65 }, 66 'EBSOptions': { 67 'Options': { 68 'EBSEnabled': True, 69 'EncryptionEnabled': False, 70 'Iops': 0, 71 'VolumeSize': 10, 72 'VolumeType': 'gp2' 73 }, 74 'Status': config_status 75 }, 76 'ElasticsearchClusterConfig': { 77 'Options': { 78 'DedicatedMasterCount': 1, 79 'DedicatedMasterEnabled': True, 80 'DedicatedMasterType': 'm3.medium.elasticsearch', 81 'InstanceCount': 1, 82 'InstanceType': 'm3.medium.elasticsearch', 83 'ZoneAwarenessEnabled': False 84 }, 85 'Status': config_status 86 }, 87 'CognitoOptions': { 88 'Enabled': False 89 }, 90 'ElasticsearchVersion': { 91 'Options': '5.3', 92 'Status': config_status 93 }, 94 'EncryptionAtRestOptions': { 95 'Options': { 96 'Enabled': False, 97 'KmsKeyId': '' 98 }, 99 'Status': config_status 100 }, 101 'LogPublishingOptions': { 102 'Options': { 103 'INDEX_SLOW_LOGS': { 104 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501 105 'Enabled': False 106 }, 107 'SEARCH_SLOW_LOGS': { 108 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501 109 'Enabled': False, 110 } 111 }, 112 'Status': config_status 113 }, 114 'SnapshotOptions': { 115 'Options': { 116 'AutomatedSnapshotStartHour': randint(0, 23) 117 }, 118 'Status': config_status 119 }, 120 'VPCOptions': { 121 'Options': { 122 'AvailabilityZones': [ 123 'us-east-1b' 124 ], 125 'SecurityGroupIds': [ 126 'sg-12345678' 127 ], 128 'SubnetIds': [ 129 'subnet-12345678' 130 ], 131 'VPCId': 'vpc-12345678' 132 }, 133 'Status': config_status 134 } 135 } 136 } 137 138 139 def get_domain_status(domain_name, deleted=False): 140 status = ES_DOMAINS.get(domain_name) or {} 141 return { 142 'DomainStatus': { 143 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), 144 'Created': True, 145 'Deleted': deleted, 146 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name), 147 'DomainName': domain_name, 148 'ElasticsearchClusterConfig': { 149 'DedicatedMasterCount': 1, 150 'DedicatedMasterEnabled': True, 151 'DedicatedMasterType': 'm3.medium.elasticsearch', 152 'InstanceCount': 1, 153 'InstanceType': 'm3.medium.elasticsearch', 154 'ZoneAwarenessEnabled': False 155 }, 156 'ElasticsearchVersion': status.get('ElasticsearchVersion') or DEFAULT_ES_VERSION, 157 'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name), 158 'Processing': False, 159 'EBSOptions': { 160 'EBSEnabled': True, 161 'VolumeType': 'gp2', 162 'VolumeSize': 10, 163 'Iops': 0 164 }, 165 'CognitoOptions': { 166 'Enabled': False 167 }, 168 } 169 } 170 171 172 def get_install_version_for_api_version(version): 173 result = ELASTICSEARCH_DEFAULT_VERSION 174 if version.startswith('6.'): 175 result = '6.7.0' 176 elif version == '7.4': 177 result = '7.4.0' 178 if not result.startswith(result): 179 LOG.info('Elasticsearch version %s not yet supported, defaulting to %s' % (version, result)) 180 return result 181 182 183 def start_elasticsearch_instance(version): 184 # Note: keep imports here to avoid circular dependencies 185 from localstack.services.es import es_starter 186 187 # install ES version 188 install_version = get_install_version_for_api_version(version) 189 install.install_elasticsearch(install_version) 190 191 t1 = es_starter.start_elasticsearch(asynchronous=True, version=install_version) 192 # sleep some time to give Elasticsearch enough time to come up 193 time.sleep(8) 194 # ensure that all infra components are up and running 195 check_infra(apis=[], additional_checks=[es_starter.check_elasticsearch]) 196 return t1 197 198 199 def cleanup_elasticsearch_instance(status): 200 # Note: keep imports here to avoid circular dependencies 201 from localstack.services.es import es_starter 202 es_starter.stop_elasticsearch() 203 204 205 @app.route('%s/domain' % API_PREFIX, methods=['GET']) 206 def list_domain_names(): 207 result = { 208 'DomainNames': [{'DomainName': name} for name in ES_DOMAINS.keys()] 209 } 210 return jsonify(result) 211 212 213 @app.route('%s/es/domain' % API_PREFIX, methods=['POST']) 214 def create_domain(): 215 data = json.loads(to_str(request.data)) 216 domain_name = data['DomainName'] 217 if domain_name in ES_DOMAINS: 218 return error_response(error_type='ResourceAlreadyExistsException') 219 ES_DOMAINS[domain_name] = data 220 # start actual Elasticsearch instance 221 version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION 222 start_elasticsearch_instance(version=version) 223 result = get_domain_status(domain_name) 224 225 # record event 226 event_publisher.fire_event(event_publisher.EVENT_ES_CREATE_DOMAIN, 227 payload={'n': event_publisher.get_hash(domain_name)}) 228 persistence.record('es', request=request) 229 230 return jsonify(result) 231 232 233 @app.route('%s/es/domain/<domain_name>' % API_PREFIX, methods=['GET']) 234 def describe_domain(domain_name): 235 if domain_name not in ES_DOMAINS: 236 return error_response(error_type='ResourceNotFoundException') 237 result = get_domain_status(domain_name) 238 return jsonify(result) 239 240 241 @app.route('%s/es/domain-info' % API_PREFIX, methods=['POST']) 242 def describe_domains(): 243 data = json.loads(to_str(request.data)) 244 result = [] 245 domain_names = data.get('DomainNames', []) 246 for domain_name in ES_DOMAINS: 247 if domain_name in domain_names: 248 status = get_domain_status(domain_name) 249 status = status.get('DomainStatus') or status 250 result.append(status) 251 result = {'DomainStatusList': result} 252 return jsonify(result) 253 254 255 @app.route('%s/es/domain/<domain_name>/config' % API_PREFIX, methods=['GET', 'POST']) 256 def domain_config(domain_name): 257 config = get_domain_config(domain_name) 258 return jsonify(config) 259 260 261 @app.route('%s/es/domain/<domain_name>' % API_PREFIX, methods=['DELETE']) 262 def delete_domain(domain_name): 263 if domain_name not in ES_DOMAINS: 264 return error_response(error_type='ResourceNotFoundException') 265 result = get_domain_status(domain_name, deleted=True) 266 status = ES_DOMAINS.pop(domain_name) 267 if not ES_DOMAINS: 268 cleanup_elasticsearch_instance(status) 269 270 # record event 271 event_publisher.fire_event(event_publisher.EVENT_ES_DELETE_DOMAIN, 272 payload={'n': event_publisher.get_hash(domain_name)}) 273 persistence.record('es', request=request) 274 275 return jsonify(result) 276 277 278 @app.route('%s/es/compatibleVersions' % API_PREFIX, methods=['GET']) 279 def get_compatible_versions(): 280 result = [{ 281 'SourceVersion': '6.5', 282 'TargetVersions': ['6.7', '6.8'] 283 }, { 284 'SourceVersion': '6.7', 285 'TargetVersions': ['6.8'] 286 }, { 287 'SourceVersion': '6.8', 288 'TargetVersions': ['7.1'] 289 }] 290 return jsonify({'CompatibleElasticsearchVersions': result}) 291 292 293 @app.route('%s/tags' % API_PREFIX, methods=['GET', 'POST']) 294 def add_list_tags(): 295 if request.method == 'POST': 296 data = json.loads(to_str(request.data) or '{}') 297 arn = data.get('ARN') 298 TAGS.tag_resource(arn, data.get('TagList', [])) 299 if request.method == 'GET' and request.args.get('arn'): 300 arn = request.args.get('arn') 301 tags = TAGS.list_tags_for_resource(arn) 302 response = { 303 'TagList': tags.get('Tags') 304 } 305 return jsonify(response) 306 307 return jsonify({}) 308 309 310 def serve(port, quiet=True): 311 generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet) 312 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/localstack/services/es/es_api.py b/localstack/services/es/es_api.py --- a/localstack/services/es/es_api.py +++ b/localstack/services/es/es_api.py @@ -3,11 +3,12 @@ import logging from random import randint from flask import Flask, jsonify, request, make_response +from localstack import config from localstack.utils import persistence from localstack.services import generic_proxy, install from localstack.utils.aws import aws_stack from localstack.constants import TEST_AWS_ACCOUNT_ID, ELASTICSEARCH_DEFAULT_VERSION -from localstack.utils.common import to_str +from localstack.utils.common import to_str, FuncThread, get_service_protocol from localstack.utils.tagging import TaggingService from localstack.utils.analytics import event_publisher from localstack.services.plugins import check_infra @@ -138,10 +139,11 @@ def get_domain_status(domain_name, deleted=False): status = ES_DOMAINS.get(domain_name) or {} + endpoint = '%s://%s:%s' % (get_service_protocol(), config.HOSTNAME_EXTERNAL, config.PORT_ELASTICSEARCH) return { 'DomainStatus': { 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), - 'Created': True, + 'Created': status.get('Created', True), 'Deleted': deleted, 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name), 'DomainName': domain_name, @@ -154,7 +156,7 @@ 'ZoneAwarenessEnabled': False }, 'ElasticsearchVersion': status.get('ElasticsearchVersion') or DEFAULT_ES_VERSION, - 'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name), + 'Endpoint': endpoint, 'Processing': False, 'EBSOptions': { 'EBSEnabled': True, @@ -217,9 +219,18 @@ if domain_name in ES_DOMAINS: return error_response(error_type='ResourceAlreadyExistsException') ES_DOMAINS[domain_name] = data - # start actual Elasticsearch instance - version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION - start_elasticsearch_instance(version=version) + data['Created'] = False + + def do_start(*args): + # start actual Elasticsearch instance + version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION + start_elasticsearch_instance(version=version) + data['Created'] = True + + # start ES instance in the background + FuncThread(do_start).start() + # sleep a short while, then return + time.sleep(5) result = get_domain_status(domain_name) # record event
{"golden_diff": "diff --git a/localstack/services/es/es_api.py b/localstack/services/es/es_api.py\n--- a/localstack/services/es/es_api.py\n+++ b/localstack/services/es/es_api.py\n@@ -3,11 +3,12 @@\n import logging\n from random import randint\n from flask import Flask, jsonify, request, make_response\n+from localstack import config\n from localstack.utils import persistence\n from localstack.services import generic_proxy, install\n from localstack.utils.aws import aws_stack\n from localstack.constants import TEST_AWS_ACCOUNT_ID, ELASTICSEARCH_DEFAULT_VERSION\n-from localstack.utils.common import to_str\n+from localstack.utils.common import to_str, FuncThread, get_service_protocol\n from localstack.utils.tagging import TaggingService\n from localstack.utils.analytics import event_publisher\n from localstack.services.plugins import check_infra\n@@ -138,10 +139,11 @@\n \n def get_domain_status(domain_name, deleted=False):\n status = ES_DOMAINS.get(domain_name) or {}\n+ endpoint = '%s://%s:%s' % (get_service_protocol(), config.HOSTNAME_EXTERNAL, config.PORT_ELASTICSEARCH)\n return {\n 'DomainStatus': {\n 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),\n- 'Created': True,\n+ 'Created': status.get('Created', True),\n 'Deleted': deleted,\n 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name),\n 'DomainName': domain_name,\n@@ -154,7 +156,7 @@\n 'ZoneAwarenessEnabled': False\n },\n 'ElasticsearchVersion': status.get('ElasticsearchVersion') or DEFAULT_ES_VERSION,\n- 'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name),\n+ 'Endpoint': endpoint,\n 'Processing': False,\n 'EBSOptions': {\n 'EBSEnabled': True,\n@@ -217,9 +219,18 @@\n if domain_name in ES_DOMAINS:\n return error_response(error_type='ResourceAlreadyExistsException')\n ES_DOMAINS[domain_name] = data\n- # start actual Elasticsearch instance\n- version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION\n- start_elasticsearch_instance(version=version)\n+ data['Created'] = False\n+\n+ def do_start(*args):\n+ # start actual Elasticsearch instance\n+ version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION\n+ start_elasticsearch_instance(version=version)\n+ data['Created'] = True\n+\n+ # start ES instance in the background\n+ FuncThread(do_start).start()\n+ # sleep a short while, then return\n+ time.sleep(5)\n result = get_domain_status(domain_name)\n \n # record event\n", "issue": "Docker and es::CreateElasticsearchDomain / elasticsearch endpoint\nHey all!\r\n\r\nI'm using localstack for a project where I consume data from kinesis, process it and put it in elasticsearch.\r\n\r\nIn the later versions of localstack 'elasticsearch' was removed and is now lazily loaded after call to es::CreateElasticsearchDomain (at least that's how I understand it from #1881, #1865, #1870).\r\n\r\nI'm running localstack in a docker using testcontainers (which gives me dynamic port mappings etc). My processing code is running on localhost.\r\n\r\nThe problem I'm facing now is that the response from es::CreateElasticsearchDomain returns an elasticsearch endpoint like: http://[docker container id]:port.\r\nWith my set up, this gives me two issues:\r\n1. [docker container id] host is not (I believe) reachable from localhost unless I add it to /etc/hosts or something like that. I can surely \"hack\" my way around this by just replacing [docker container id] with localhost.\r\n2. It says here: https://github.com/localstack/localstack/issues/1881#issuecomment-567495056 that we should not assume that elasticsearch will run on a static port in the future. When running localstack in a docker, how would I know which port to expose then in order to reach elasticsearch from localhost?\r\n\r\nI currently have my tests running using 1.10.7 but there I just make a dummy call to es::CreateElasticsearchDomain and hardcode elasticsearch endpoint to http://localhost:4571. (It seems like it always chooses 4571 as of now?)\r\n\r\nThanks\r\nStefan\n", "before_files": [{"content": "import json\nimport time\nimport logging\nfrom random import randint\nfrom flask import Flask, jsonify, request, make_response\nfrom localstack.utils import persistence\nfrom localstack.services import generic_proxy, install\nfrom localstack.utils.aws import aws_stack\nfrom localstack.constants import TEST_AWS_ACCOUNT_ID, ELASTICSEARCH_DEFAULT_VERSION\nfrom localstack.utils.common import to_str\nfrom localstack.utils.tagging import TaggingService\nfrom localstack.utils.analytics import event_publisher\nfrom localstack.services.plugins import check_infra\n\nLOG = logging.getLogger(__name__)\n\nAPP_NAME = 'es_api'\nAPI_PREFIX = '/2015-01-01'\n\nDEFAULT_ES_VERSION = '7.1'\n\nES_DOMAINS = {}\n\nTAGS = TaggingService()\n\napp = Flask(APP_NAME)\napp.url_map.strict_slashes = False\n\n\ndef error_response(error_type, code=400, message='Unknown error.'):\n if not message:\n if error_type == 'ResourceNotFoundException':\n message = 'Resource not found.'\n elif error_type == 'ResourceAlreadyExistsException':\n message = 'Resource already exists.'\n response = make_response(jsonify({'error': message}))\n response.headers['x-amzn-errortype'] = error_type\n return response, code\n\n\ndef get_domain_config_status():\n return {\n 'CreationDate': '%.2f' % time.time(),\n 'PendingDeletion': False,\n 'State': 'Active',\n 'UpdateDate': '%.2f' % time.time(),\n 'UpdateVersion': randint(1, 100)\n }\n\n\ndef get_domain_config(domain_name):\n config_status = get_domain_config_status()\n return {\n 'DomainConfig': {\n 'AccessPolicies': {\n 'Options': '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::%s:root\"},\"Action\":\"es:*\",\"Resource\":\"arn:aws:es:%s:%s:domain/%s/*\"}]}' % (TEST_AWS_ACCOUNT_ID, aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), # noqa: E501\n 'Status': config_status\n },\n 'AdvancedOptions': {\n 'Options': {\n 'indices.fielddata.cache.size': '',\n 'rest.action.multi.allow_explicit_index': 'true'\n },\n 'Status': config_status\n },\n 'EBSOptions': {\n 'Options': {\n 'EBSEnabled': True,\n 'EncryptionEnabled': False,\n 'Iops': 0,\n 'VolumeSize': 10,\n 'VolumeType': 'gp2'\n },\n 'Status': config_status\n },\n 'ElasticsearchClusterConfig': {\n 'Options': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'Status': config_status\n },\n 'CognitoOptions': {\n 'Enabled': False\n },\n 'ElasticsearchVersion': {\n 'Options': '5.3',\n 'Status': config_status\n },\n 'EncryptionAtRestOptions': {\n 'Options': {\n 'Enabled': False,\n 'KmsKeyId': ''\n },\n 'Status': config_status\n },\n 'LogPublishingOptions': {\n 'Options': {\n 'INDEX_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False\n },\n 'SEARCH_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False,\n }\n },\n 'Status': config_status\n },\n 'SnapshotOptions': {\n 'Options': {\n 'AutomatedSnapshotStartHour': randint(0, 23)\n },\n 'Status': config_status\n },\n 'VPCOptions': {\n 'Options': {\n 'AvailabilityZones': [\n 'us-east-1b'\n ],\n 'SecurityGroupIds': [\n 'sg-12345678'\n ],\n 'SubnetIds': [\n 'subnet-12345678'\n ],\n 'VPCId': 'vpc-12345678'\n },\n 'Status': config_status\n }\n }\n }\n\n\ndef get_domain_status(domain_name, deleted=False):\n status = ES_DOMAINS.get(domain_name) or {}\n return {\n 'DomainStatus': {\n 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),\n 'Created': True,\n 'Deleted': deleted,\n 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name),\n 'DomainName': domain_name,\n 'ElasticsearchClusterConfig': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'ElasticsearchVersion': status.get('ElasticsearchVersion') or DEFAULT_ES_VERSION,\n 'Endpoint': aws_stack.get_elasticsearch_endpoint(domain_name),\n 'Processing': False,\n 'EBSOptions': {\n 'EBSEnabled': True,\n 'VolumeType': 'gp2',\n 'VolumeSize': 10,\n 'Iops': 0\n },\n 'CognitoOptions': {\n 'Enabled': False\n },\n }\n }\n\n\ndef get_install_version_for_api_version(version):\n result = ELASTICSEARCH_DEFAULT_VERSION\n if version.startswith('6.'):\n result = '6.7.0'\n elif version == '7.4':\n result = '7.4.0'\n if not result.startswith(result):\n LOG.info('Elasticsearch version %s not yet supported, defaulting to %s' % (version, result))\n return result\n\n\ndef start_elasticsearch_instance(version):\n # Note: keep imports here to avoid circular dependencies\n from localstack.services.es import es_starter\n\n # install ES version\n install_version = get_install_version_for_api_version(version)\n install.install_elasticsearch(install_version)\n\n t1 = es_starter.start_elasticsearch(asynchronous=True, version=install_version)\n # sleep some time to give Elasticsearch enough time to come up\n time.sleep(8)\n # ensure that all infra components are up and running\n check_infra(apis=[], additional_checks=[es_starter.check_elasticsearch])\n return t1\n\n\ndef cleanup_elasticsearch_instance(status):\n # Note: keep imports here to avoid circular dependencies\n from localstack.services.es import es_starter\n es_starter.stop_elasticsearch()\n\n\[email protected]('%s/domain' % API_PREFIX, methods=['GET'])\ndef list_domain_names():\n result = {\n 'DomainNames': [{'DomainName': name} for name in ES_DOMAINS.keys()]\n }\n return jsonify(result)\n\n\[email protected]('%s/es/domain' % API_PREFIX, methods=['POST'])\ndef create_domain():\n data = json.loads(to_str(request.data))\n domain_name = data['DomainName']\n if domain_name in ES_DOMAINS:\n return error_response(error_type='ResourceAlreadyExistsException')\n ES_DOMAINS[domain_name] = data\n # start actual Elasticsearch instance\n version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION\n start_elasticsearch_instance(version=version)\n result = get_domain_status(domain_name)\n\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_CREATE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n persistence.record('es', request=request)\n\n return jsonify(result)\n\n\[email protected]('%s/es/domain/<domain_name>' % API_PREFIX, methods=['GET'])\ndef describe_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name)\n return jsonify(result)\n\n\[email protected]('%s/es/domain-info' % API_PREFIX, methods=['POST'])\ndef describe_domains():\n data = json.loads(to_str(request.data))\n result = []\n domain_names = data.get('DomainNames', [])\n for domain_name in ES_DOMAINS:\n if domain_name in domain_names:\n status = get_domain_status(domain_name)\n status = status.get('DomainStatus') or status\n result.append(status)\n result = {'DomainStatusList': result}\n return jsonify(result)\n\n\[email protected]('%s/es/domain/<domain_name>/config' % API_PREFIX, methods=['GET', 'POST'])\ndef domain_config(domain_name):\n config = get_domain_config(domain_name)\n return jsonify(config)\n\n\[email protected]('%s/es/domain/<domain_name>' % API_PREFIX, methods=['DELETE'])\ndef delete_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name, deleted=True)\n status = ES_DOMAINS.pop(domain_name)\n if not ES_DOMAINS:\n cleanup_elasticsearch_instance(status)\n\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_DELETE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n persistence.record('es', request=request)\n\n return jsonify(result)\n\n\[email protected]('%s/es/compatibleVersions' % API_PREFIX, methods=['GET'])\ndef get_compatible_versions():\n result = [{\n 'SourceVersion': '6.5',\n 'TargetVersions': ['6.7', '6.8']\n }, {\n 'SourceVersion': '6.7',\n 'TargetVersions': ['6.8']\n }, {\n 'SourceVersion': '6.8',\n 'TargetVersions': ['7.1']\n }]\n return jsonify({'CompatibleElasticsearchVersions': result})\n\n\[email protected]('%s/tags' % API_PREFIX, methods=['GET', 'POST'])\ndef add_list_tags():\n if request.method == 'POST':\n data = json.loads(to_str(request.data) or '{}')\n arn = data.get('ARN')\n TAGS.tag_resource(arn, data.get('TagList', []))\n if request.method == 'GET' and request.args.get('arn'):\n arn = request.args.get('arn')\n tags = TAGS.list_tags_for_resource(arn)\n response = {\n 'TagList': tags.get('Tags')\n }\n return jsonify(response)\n\n return jsonify({})\n\n\ndef serve(port, quiet=True):\n generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)\n", "path": "localstack/services/es/es_api.py"}], "after_files": [{"content": "import json\nimport time\nimport logging\nfrom random import randint\nfrom flask import Flask, jsonify, request, make_response\nfrom localstack import config\nfrom localstack.utils import persistence\nfrom localstack.services import generic_proxy, install\nfrom localstack.utils.aws import aws_stack\nfrom localstack.constants import TEST_AWS_ACCOUNT_ID, ELASTICSEARCH_DEFAULT_VERSION\nfrom localstack.utils.common import to_str, FuncThread, get_service_protocol\nfrom localstack.utils.tagging import TaggingService\nfrom localstack.utils.analytics import event_publisher\nfrom localstack.services.plugins import check_infra\n\nLOG = logging.getLogger(__name__)\n\nAPP_NAME = 'es_api'\nAPI_PREFIX = '/2015-01-01'\n\nDEFAULT_ES_VERSION = '7.1'\n\nES_DOMAINS = {}\n\nTAGS = TaggingService()\n\napp = Flask(APP_NAME)\napp.url_map.strict_slashes = False\n\n\ndef error_response(error_type, code=400, message='Unknown error.'):\n if not message:\n if error_type == 'ResourceNotFoundException':\n message = 'Resource not found.'\n elif error_type == 'ResourceAlreadyExistsException':\n message = 'Resource already exists.'\n response = make_response(jsonify({'error': message}))\n response.headers['x-amzn-errortype'] = error_type\n return response, code\n\n\ndef get_domain_config_status():\n return {\n 'CreationDate': '%.2f' % time.time(),\n 'PendingDeletion': False,\n 'State': 'Active',\n 'UpdateDate': '%.2f' % time.time(),\n 'UpdateVersion': randint(1, 100)\n }\n\n\ndef get_domain_config(domain_name):\n config_status = get_domain_config_status()\n return {\n 'DomainConfig': {\n 'AccessPolicies': {\n 'Options': '{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":\"arn:aws:iam::%s:root\"},\"Action\":\"es:*\",\"Resource\":\"arn:aws:es:%s:%s:domain/%s/*\"}]}' % (TEST_AWS_ACCOUNT_ID, aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name), # noqa: E501\n 'Status': config_status\n },\n 'AdvancedOptions': {\n 'Options': {\n 'indices.fielddata.cache.size': '',\n 'rest.action.multi.allow_explicit_index': 'true'\n },\n 'Status': config_status\n },\n 'EBSOptions': {\n 'Options': {\n 'EBSEnabled': True,\n 'EncryptionEnabled': False,\n 'Iops': 0,\n 'VolumeSize': 10,\n 'VolumeType': 'gp2'\n },\n 'Status': config_status\n },\n 'ElasticsearchClusterConfig': {\n 'Options': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'Status': config_status\n },\n 'CognitoOptions': {\n 'Enabled': False\n },\n 'ElasticsearchVersion': {\n 'Options': '5.3',\n 'Status': config_status\n },\n 'EncryptionAtRestOptions': {\n 'Options': {\n 'Enabled': False,\n 'KmsKeyId': ''\n },\n 'Status': config_status\n },\n 'LogPublishingOptions': {\n 'Options': {\n 'INDEX_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False\n },\n 'SEARCH_SLOW_LOGS': {\n 'CloudWatchLogsLogGroupArn': 'arn:aws:logs:%s:%s:log-group:sample-domain' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID), # noqa: E501\n 'Enabled': False,\n }\n },\n 'Status': config_status\n },\n 'SnapshotOptions': {\n 'Options': {\n 'AutomatedSnapshotStartHour': randint(0, 23)\n },\n 'Status': config_status\n },\n 'VPCOptions': {\n 'Options': {\n 'AvailabilityZones': [\n 'us-east-1b'\n ],\n 'SecurityGroupIds': [\n 'sg-12345678'\n ],\n 'SubnetIds': [\n 'subnet-12345678'\n ],\n 'VPCId': 'vpc-12345678'\n },\n 'Status': config_status\n }\n }\n }\n\n\ndef get_domain_status(domain_name, deleted=False):\n status = ES_DOMAINS.get(domain_name) or {}\n endpoint = '%s://%s:%s' % (get_service_protocol(), config.HOSTNAME_EXTERNAL, config.PORT_ELASTICSEARCH)\n return {\n 'DomainStatus': {\n 'ARN': 'arn:aws:es:%s:%s:domain/%s' % (aws_stack.get_region(), TEST_AWS_ACCOUNT_ID, domain_name),\n 'Created': status.get('Created', True),\n 'Deleted': deleted,\n 'DomainId': '%s/%s' % (TEST_AWS_ACCOUNT_ID, domain_name),\n 'DomainName': domain_name,\n 'ElasticsearchClusterConfig': {\n 'DedicatedMasterCount': 1,\n 'DedicatedMasterEnabled': True,\n 'DedicatedMasterType': 'm3.medium.elasticsearch',\n 'InstanceCount': 1,\n 'InstanceType': 'm3.medium.elasticsearch',\n 'ZoneAwarenessEnabled': False\n },\n 'ElasticsearchVersion': status.get('ElasticsearchVersion') or DEFAULT_ES_VERSION,\n 'Endpoint': endpoint,\n 'Processing': False,\n 'EBSOptions': {\n 'EBSEnabled': True,\n 'VolumeType': 'gp2',\n 'VolumeSize': 10,\n 'Iops': 0\n },\n 'CognitoOptions': {\n 'Enabled': False\n },\n }\n }\n\n\ndef get_install_version_for_api_version(version):\n result = ELASTICSEARCH_DEFAULT_VERSION\n if version.startswith('6.'):\n result = '6.7.0'\n elif version == '7.4':\n result = '7.4.0'\n if not result.startswith(result):\n LOG.info('Elasticsearch version %s not yet supported, defaulting to %s' % (version, result))\n return result\n\n\ndef start_elasticsearch_instance(version):\n # Note: keep imports here to avoid circular dependencies\n from localstack.services.es import es_starter\n\n # install ES version\n install_version = get_install_version_for_api_version(version)\n install.install_elasticsearch(install_version)\n\n t1 = es_starter.start_elasticsearch(asynchronous=True, version=install_version)\n # sleep some time to give Elasticsearch enough time to come up\n time.sleep(8)\n # ensure that all infra components are up and running\n check_infra(apis=[], additional_checks=[es_starter.check_elasticsearch])\n return t1\n\n\ndef cleanup_elasticsearch_instance(status):\n # Note: keep imports here to avoid circular dependencies\n from localstack.services.es import es_starter\n es_starter.stop_elasticsearch()\n\n\[email protected]('%s/domain' % API_PREFIX, methods=['GET'])\ndef list_domain_names():\n result = {\n 'DomainNames': [{'DomainName': name} for name in ES_DOMAINS.keys()]\n }\n return jsonify(result)\n\n\[email protected]('%s/es/domain' % API_PREFIX, methods=['POST'])\ndef create_domain():\n data = json.loads(to_str(request.data))\n domain_name = data['DomainName']\n if domain_name in ES_DOMAINS:\n return error_response(error_type='ResourceAlreadyExistsException')\n ES_DOMAINS[domain_name] = data\n data['Created'] = False\n\n def do_start(*args):\n # start actual Elasticsearch instance\n version = data.get('ElasticsearchVersion') or DEFAULT_ES_VERSION\n start_elasticsearch_instance(version=version)\n data['Created'] = True\n\n # start ES instance in the background\n FuncThread(do_start).start()\n # sleep a short while, then return\n time.sleep(5)\n result = get_domain_status(domain_name)\n\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_CREATE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n persistence.record('es', request=request)\n\n return jsonify(result)\n\n\[email protected]('%s/es/domain/<domain_name>' % API_PREFIX, methods=['GET'])\ndef describe_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name)\n return jsonify(result)\n\n\[email protected]('%s/es/domain-info' % API_PREFIX, methods=['POST'])\ndef describe_domains():\n data = json.loads(to_str(request.data))\n result = []\n domain_names = data.get('DomainNames', [])\n for domain_name in ES_DOMAINS:\n if domain_name in domain_names:\n status = get_domain_status(domain_name)\n status = status.get('DomainStatus') or status\n result.append(status)\n result = {'DomainStatusList': result}\n return jsonify(result)\n\n\[email protected]('%s/es/domain/<domain_name>/config' % API_PREFIX, methods=['GET', 'POST'])\ndef domain_config(domain_name):\n config = get_domain_config(domain_name)\n return jsonify(config)\n\n\[email protected]('%s/es/domain/<domain_name>' % API_PREFIX, methods=['DELETE'])\ndef delete_domain(domain_name):\n if domain_name not in ES_DOMAINS:\n return error_response(error_type='ResourceNotFoundException')\n result = get_domain_status(domain_name, deleted=True)\n status = ES_DOMAINS.pop(domain_name)\n if not ES_DOMAINS:\n cleanup_elasticsearch_instance(status)\n\n # record event\n event_publisher.fire_event(event_publisher.EVENT_ES_DELETE_DOMAIN,\n payload={'n': event_publisher.get_hash(domain_name)})\n persistence.record('es', request=request)\n\n return jsonify(result)\n\n\[email protected]('%s/es/compatibleVersions' % API_PREFIX, methods=['GET'])\ndef get_compatible_versions():\n result = [{\n 'SourceVersion': '6.5',\n 'TargetVersions': ['6.7', '6.8']\n }, {\n 'SourceVersion': '6.7',\n 'TargetVersions': ['6.8']\n }, {\n 'SourceVersion': '6.8',\n 'TargetVersions': ['7.1']\n }]\n return jsonify({'CompatibleElasticsearchVersions': result})\n\n\[email protected]('%s/tags' % API_PREFIX, methods=['GET', 'POST'])\ndef add_list_tags():\n if request.method == 'POST':\n data = json.loads(to_str(request.data) or '{}')\n arn = data.get('ARN')\n TAGS.tag_resource(arn, data.get('TagList', []))\n if request.method == 'GET' and request.args.get('arn'):\n arn = request.args.get('arn')\n tags = TAGS.list_tags_for_resource(arn)\n response = {\n 'TagList': tags.get('Tags')\n }\n return jsonify(response)\n\n return jsonify({})\n\n\ndef serve(port, quiet=True):\n generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)\n", "path": "localstack/services/es/es_api.py"}]}
3,905
631
gh_patches_debug_7776
rasdani/github-patches
git_diff
secdev__scapy-4349
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect RTCP SR + RR parsing ### Brief description The RTCP parser fails to handle a packet that contains both Sender Report and Received Report, which is is the most common data for a two-way session. It seems that the "sender_info" info contain a payload, this should be parsed as a ReceptionReport info Incorrect behavior demonstrated in UTS here: https://github.com/secdev/scapy/commit/0bb9db2932d91d2f6e057caea60db78a2ad54f96 ### Scapy version main ### Python version 3.10 ### Operating system Linux 5.15.146 ### Additional environment information _No response_ ### How to reproduce Run tests on provided branch: `test/run_tests -P "load_contrib('rtcp')" -t test/contrib/rtcp.uts -F` ### Actual result Demo test should fail. ReceptionReport after SenderInfo should be parsed. SenderInfo should never have a payload, it's a fixed-sized struct ### Expected result The commented asserts should pass instead ### Related resources https://datatracker.ietf.org/doc/html/rfc3550 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scapy/contrib/rtcp.py` Content: ``` 1 # SPDX-License-Identifier: GPL-2.0-only 2 # This file is part of Scapy 3 # See https://scapy.net/ for more information 4 # Copyright (C) Pavel Oborin <[email protected]> 5 6 # RFC 3550 7 # scapy.contrib.description = Real-Time Transport Control Protocol 8 # scapy.contrib.status = loads 9 10 """ 11 RTCP (rfc 3550) 12 13 Use bind_layers(UDP, RTCP, dport=...) to start using it 14 """ 15 16 import struct 17 18 from scapy.packet import Packet 19 from scapy.fields import ( 20 BitField, 21 BitFieldLenField, 22 ByteEnumField, 23 ByteField, 24 ConditionalField, 25 FieldLenField, 26 IntField, 27 LenField, 28 LongField, 29 PacketField, 30 PacketListField, 31 StrLenField, 32 X3BytesField, 33 ) 34 35 36 _rtcp_packet_types = { 37 200: 'Sender report', 38 201: 'Receiver report', 39 202: 'Source description', 40 203: 'BYE', 41 204: 'APP' 42 } 43 44 45 class SenderInfo(Packet): 46 name = "Sender info" 47 fields_desc = [ 48 LongField('ntp_timestamp', None), 49 IntField('rtp_timestamp', None), 50 IntField('sender_packet_count', None), 51 IntField('sender_octet_count', None) 52 ] 53 54 55 class ReceptionReport(Packet): 56 name = "Reception report" 57 fields_desc = [ 58 IntField('sourcesync', None), 59 ByteField('fraction_lost', None), 60 X3BytesField('cumulative_lost', None), 61 IntField('highest_seqnum_recv', None), 62 IntField('interarrival_jitter', None), 63 IntField('last_SR_timestamp', None), 64 IntField('delay_since_last_SR', None) 65 ] 66 67 68 _sdes_chunk_types = { 69 0: "END", 70 1: "CNAME", 71 2: "NAME", 72 3: "EMAIL", 73 4: "PHONE", 74 5: "LOC", 75 6: "TOOL", 76 7: "NOTE", 77 8: "PRIV" 78 } 79 80 81 class SDESItem(Packet): 82 name = "SDES item" 83 fields_desc = [ 84 ByteEnumField('chunk_type', None, _sdes_chunk_types), 85 FieldLenField('length', None, fmt='!b', length_of='value'), 86 StrLenField('value', None, length_from=lambda pkt: pkt.length) 87 ] 88 89 def extract_padding(self, p): 90 return "", p 91 92 93 class SDESChunk(Packet): 94 name = "SDES chunk" 95 fields_desc = [ 96 IntField('sourcesync', None), 97 PacketListField( 98 'items', None, 99 next_cls_cb=( 100 lambda x, y, p, z: None if (p and p.chunk_type == 0) else SDESItem 101 ) 102 ) 103 ] 104 105 106 class RTCP(Packet): 107 name = "RTCP" 108 109 fields_desc = [ 110 # HEADER 111 BitField('version', 2, 2), 112 BitField('padding', 0, 1), 113 BitFieldLenField('count', 0, 5, count_of='report_blocks'), 114 ByteEnumField('packet_type', 0, _rtcp_packet_types), 115 LenField('length', None, fmt='!h'), 116 # SR/RR 117 ConditionalField( 118 IntField('sourcesync', 0), 119 lambda pkt: pkt.packet_type in (200, 201) 120 ), 121 ConditionalField( 122 PacketField('sender_info', SenderInfo(), SenderInfo), 123 lambda pkt: pkt.packet_type == 200 124 ), 125 ConditionalField( 126 PacketListField('report_blocks', None, pkt_cls=ReceptionReport, 127 count_from=lambda pkt: pkt.count), 128 lambda pkt: pkt.packet_type in (200, 201) 129 ), 130 # SDES 131 ConditionalField( 132 PacketListField('sdes_chunks', None, pkt_cls=SDESChunk, 133 count_from=lambda pkt: pkt.count), 134 lambda pkt: pkt.packet_type == 202 135 ), 136 ] 137 138 def post_build(self, pkt, pay): 139 pkt += pay 140 if self.length is None: 141 pkt = pkt[:2] + struct.pack("!h", len(pkt) // 4 - 1) + pkt[4:] 142 return pkt 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scapy/contrib/rtcp.py b/scapy/contrib/rtcp.py --- a/scapy/contrib/rtcp.py +++ b/scapy/contrib/rtcp.py @@ -51,6 +51,9 @@ IntField('sender_octet_count', None) ] + def extract_padding(self, p): + return "", p + class ReceptionReport(Packet): name = "Reception report" @@ -64,6 +67,9 @@ IntField('delay_since_last_SR', None) ] + def extract_padding(self, p): + return "", p + _sdes_chunk_types = { 0: "END",
{"golden_diff": "diff --git a/scapy/contrib/rtcp.py b/scapy/contrib/rtcp.py\n--- a/scapy/contrib/rtcp.py\n+++ b/scapy/contrib/rtcp.py\n@@ -51,6 +51,9 @@\n IntField('sender_octet_count', None)\n ]\n \n+ def extract_padding(self, p):\n+ return \"\", p\n+\n \n class ReceptionReport(Packet):\n name = \"Reception report\"\n@@ -64,6 +67,9 @@\n IntField('delay_since_last_SR', None)\n ]\n \n+ def extract_padding(self, p):\n+ return \"\", p\n+\n \n _sdes_chunk_types = {\n 0: \"END\",\n", "issue": "Incorrect RTCP SR + RR parsing\n### Brief description\n\nThe RTCP parser fails to handle a packet that contains both Sender Report and Received Report, which is is the most common data for a two-way session.\r\n\r\nIt seems that the \"sender_info\" info contain a payload, this should be parsed as a ReceptionReport info\r\n\r\nIncorrect behavior demonstrated in UTS here: https://github.com/secdev/scapy/commit/0bb9db2932d91d2f6e057caea60db78a2ad54f96\n\n### Scapy version\n\nmain\n\n### Python version\n\n3.10\n\n### Operating system\n\nLinux 5.15.146\n\n### Additional environment information\n\n_No response_\n\n### How to reproduce\n\nRun tests on provided branch:\r\n\r\n`test/run_tests -P \"load_contrib('rtcp')\" -t test/contrib/rtcp.uts -F`\r\n\r\n\n\n### Actual result\n\nDemo test should fail.\r\n\r\nReceptionReport after SenderInfo should be parsed. SenderInfo should never have a payload, it's a fixed-sized struct\n\n### Expected result\n\nThe commented asserts should pass instead\n\n### Related resources\n\nhttps://datatracker.ietf.org/doc/html/rfc3550\n", "before_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) Pavel Oborin <[email protected]>\n\n# RFC 3550\n# scapy.contrib.description = Real-Time Transport Control Protocol\n# scapy.contrib.status = loads\n\n\"\"\"\nRTCP (rfc 3550)\n\nUse bind_layers(UDP, RTCP, dport=...) to start using it\n\"\"\"\n\nimport struct\n\nfrom scapy.packet import Packet\nfrom scapy.fields import (\n BitField,\n BitFieldLenField,\n ByteEnumField,\n ByteField,\n ConditionalField,\n FieldLenField,\n IntField,\n LenField,\n LongField,\n PacketField,\n PacketListField,\n StrLenField,\n X3BytesField,\n)\n\n\n_rtcp_packet_types = {\n 200: 'Sender report',\n 201: 'Receiver report',\n 202: 'Source description',\n 203: 'BYE',\n 204: 'APP'\n}\n\n\nclass SenderInfo(Packet):\n name = \"Sender info\"\n fields_desc = [\n LongField('ntp_timestamp', None),\n IntField('rtp_timestamp', None),\n IntField('sender_packet_count', None),\n IntField('sender_octet_count', None)\n ]\n\n\nclass ReceptionReport(Packet):\n name = \"Reception report\"\n fields_desc = [\n IntField('sourcesync', None),\n ByteField('fraction_lost', None),\n X3BytesField('cumulative_lost', None),\n IntField('highest_seqnum_recv', None),\n IntField('interarrival_jitter', None),\n IntField('last_SR_timestamp', None),\n IntField('delay_since_last_SR', None)\n ]\n\n\n_sdes_chunk_types = {\n 0: \"END\",\n 1: \"CNAME\",\n 2: \"NAME\",\n 3: \"EMAIL\",\n 4: \"PHONE\",\n 5: \"LOC\",\n 6: \"TOOL\",\n 7: \"NOTE\",\n 8: \"PRIV\"\n}\n\n\nclass SDESItem(Packet):\n name = \"SDES item\"\n fields_desc = [\n ByteEnumField('chunk_type', None, _sdes_chunk_types),\n FieldLenField('length', None, fmt='!b', length_of='value'),\n StrLenField('value', None, length_from=lambda pkt: pkt.length)\n ]\n\n def extract_padding(self, p):\n return \"\", p\n\n\nclass SDESChunk(Packet):\n name = \"SDES chunk\"\n fields_desc = [\n IntField('sourcesync', None),\n PacketListField(\n 'items', None,\n next_cls_cb=(\n lambda x, y, p, z: None if (p and p.chunk_type == 0) else SDESItem\n )\n )\n ]\n\n\nclass RTCP(Packet):\n name = \"RTCP\"\n\n fields_desc = [\n # HEADER\n BitField('version', 2, 2),\n BitField('padding', 0, 1),\n BitFieldLenField('count', 0, 5, count_of='report_blocks'),\n ByteEnumField('packet_type', 0, _rtcp_packet_types),\n LenField('length', None, fmt='!h'),\n # SR/RR\n ConditionalField(\n IntField('sourcesync', 0),\n lambda pkt: pkt.packet_type in (200, 201)\n ),\n ConditionalField(\n PacketField('sender_info', SenderInfo(), SenderInfo),\n lambda pkt: pkt.packet_type == 200\n ),\n ConditionalField(\n PacketListField('report_blocks', None, pkt_cls=ReceptionReport,\n count_from=lambda pkt: pkt.count),\n lambda pkt: pkt.packet_type in (200, 201)\n ),\n # SDES\n ConditionalField(\n PacketListField('sdes_chunks', None, pkt_cls=SDESChunk,\n count_from=lambda pkt: pkt.count),\n lambda pkt: pkt.packet_type == 202\n ),\n ]\n\n def post_build(self, pkt, pay):\n pkt += pay\n if self.length is None:\n pkt = pkt[:2] + struct.pack(\"!h\", len(pkt) // 4 - 1) + pkt[4:]\n return pkt\n", "path": "scapy/contrib/rtcp.py"}], "after_files": [{"content": "# SPDX-License-Identifier: GPL-2.0-only\n# This file is part of Scapy\n# See https://scapy.net/ for more information\n# Copyright (C) Pavel Oborin <[email protected]>\n\n# RFC 3550\n# scapy.contrib.description = Real-Time Transport Control Protocol\n# scapy.contrib.status = loads\n\n\"\"\"\nRTCP (rfc 3550)\n\nUse bind_layers(UDP, RTCP, dport=...) to start using it\n\"\"\"\n\nimport struct\n\nfrom scapy.packet import Packet\nfrom scapy.fields import (\n BitField,\n BitFieldLenField,\n ByteEnumField,\n ByteField,\n ConditionalField,\n FieldLenField,\n IntField,\n LenField,\n LongField,\n PacketField,\n PacketListField,\n StrLenField,\n X3BytesField,\n)\n\n\n_rtcp_packet_types = {\n 200: 'Sender report',\n 201: 'Receiver report',\n 202: 'Source description',\n 203: 'BYE',\n 204: 'APP'\n}\n\n\nclass SenderInfo(Packet):\n name = \"Sender info\"\n fields_desc = [\n LongField('ntp_timestamp', None),\n IntField('rtp_timestamp', None),\n IntField('sender_packet_count', None),\n IntField('sender_octet_count', None)\n ]\n\n def extract_padding(self, p):\n return \"\", p\n\n\nclass ReceptionReport(Packet):\n name = \"Reception report\"\n fields_desc = [\n IntField('sourcesync', None),\n ByteField('fraction_lost', None),\n X3BytesField('cumulative_lost', None),\n IntField('highest_seqnum_recv', None),\n IntField('interarrival_jitter', None),\n IntField('last_SR_timestamp', None),\n IntField('delay_since_last_SR', None)\n ]\n\n def extract_padding(self, p):\n return \"\", p\n\n\n_sdes_chunk_types = {\n 0: \"END\",\n 1: \"CNAME\",\n 2: \"NAME\",\n 3: \"EMAIL\",\n 4: \"PHONE\",\n 5: \"LOC\",\n 6: \"TOOL\",\n 7: \"NOTE\",\n 8: \"PRIV\"\n}\n\n\nclass SDESItem(Packet):\n name = \"SDES item\"\n fields_desc = [\n ByteEnumField('chunk_type', None, _sdes_chunk_types),\n FieldLenField('length', None, fmt='!b', length_of='value'),\n StrLenField('value', None, length_from=lambda pkt: pkt.length)\n ]\n\n def extract_padding(self, p):\n return \"\", p\n\n\nclass SDESChunk(Packet):\n name = \"SDES chunk\"\n fields_desc = [\n IntField('sourcesync', None),\n PacketListField(\n 'items', None,\n next_cls_cb=(\n lambda x, y, p, z: None if (p and p.chunk_type == 0) else SDESItem\n )\n )\n ]\n\n\nclass RTCP(Packet):\n name = \"RTCP\"\n\n fields_desc = [\n # HEADER\n BitField('version', 2, 2),\n BitField('padding', 0, 1),\n BitFieldLenField('count', 0, 5, count_of='report_blocks'),\n ByteEnumField('packet_type', 0, _rtcp_packet_types),\n LenField('length', None, fmt='!h'),\n # SR/RR\n ConditionalField(\n IntField('sourcesync', 0),\n lambda pkt: pkt.packet_type in (200, 201)\n ),\n ConditionalField(\n PacketField('sender_info', SenderInfo(), SenderInfo),\n lambda pkt: pkt.packet_type == 200\n ),\n ConditionalField(\n PacketListField('report_blocks', None, pkt_cls=ReceptionReport,\n count_from=lambda pkt: pkt.count),\n lambda pkt: pkt.packet_type in (200, 201)\n ),\n # SDES\n ConditionalField(\n PacketListField('sdes_chunks', None, pkt_cls=SDESChunk,\n count_from=lambda pkt: pkt.count),\n lambda pkt: pkt.packet_type == 202\n ),\n ]\n\n def post_build(self, pkt, pay):\n pkt += pay\n if self.length is None:\n pkt = pkt[:2] + struct.pack(\"!h\", len(pkt) // 4 - 1) + pkt[4:]\n return pkt\n", "path": "scapy/contrib/rtcp.py"}]}
1,824
157
gh_patches_debug_4463
rasdani/github-patches
git_diff
getsentry__sentry-python-464
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- logging breadcrumb collection time is incorrect ![sentry](https://user-images.githubusercontent.com/28345280/61774444-71f0a800-ae29-11e9-8074-f3c88154b432.png) I found the breadcrumbs timestamp converted to the local time zone datetime.datetime.fromtimestamp(record.created) > ```python > # /sentry_sdk/integrations/logging.py:90 > def _breadcrumb_from_record(record): > # type: (LogRecord) -> Dict[str, Any] > return { > "ty": "log", > "level": _logging_to_event_level(record.levelname), > "category": record.name, > "message": record.message, > "timestamp": datetime.datetime.fromtimestamp(record.created), > "data": _extra_from_record(record), > } > ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sentry_sdk/integrations/logging.py` Content: ``` 1 from __future__ import absolute_import 2 3 import logging 4 import datetime 5 6 from sentry_sdk.hub import Hub 7 from sentry_sdk.utils import ( 8 to_string, 9 event_from_exception, 10 current_stacktrace, 11 capture_internal_exceptions, 12 ) 13 from sentry_sdk.integrations import Integration 14 from sentry_sdk._compat import iteritems 15 16 from sentry_sdk._types import MYPY 17 18 if MYPY: 19 from logging import LogRecord 20 from typing import Any 21 from typing import Dict 22 from typing import Optional 23 24 DEFAULT_LEVEL = logging.INFO 25 DEFAULT_EVENT_LEVEL = logging.ERROR 26 27 _IGNORED_LOGGERS = set(["sentry_sdk.errors"]) 28 29 30 def ignore_logger(name): 31 # type: (str) -> None 32 """This disables recording (both in breadcrumbs and as events) calls to 33 a logger of a specific name. Among other uses, many of our integrations 34 use this to prevent their actions being recorded as breadcrumbs. Exposed 35 to users as a way to quiet spammy loggers. 36 """ 37 _IGNORED_LOGGERS.add(name) 38 39 40 class LoggingIntegration(Integration): 41 identifier = "logging" 42 43 def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL): 44 # type: (Optional[int], Optional[int]) -> None 45 self._handler = None 46 self._breadcrumb_handler = None 47 48 if level is not None: 49 self._breadcrumb_handler = BreadcrumbHandler(level=level) 50 51 if event_level is not None: 52 self._handler = EventHandler(level=event_level) 53 54 def _handle_record(self, record): 55 # type: (LogRecord) -> None 56 if self._handler is not None and record.levelno >= self._handler.level: 57 self._handler.handle(record) 58 59 if ( 60 self._breadcrumb_handler is not None 61 and record.levelno >= self._breadcrumb_handler.level 62 ): 63 self._breadcrumb_handler.handle(record) 64 65 @staticmethod 66 def setup_once(): 67 # type: () -> None 68 old_callhandlers = logging.Logger.callHandlers # type: ignore 69 70 def sentry_patched_callhandlers(self, record): 71 # type: (Any, LogRecord) -> Any 72 try: 73 return old_callhandlers(self, record) 74 finally: 75 # This check is done twice, once also here before we even get 76 # the integration. Otherwise we have a high chance of getting 77 # into a recursion error when the integration is resolved 78 # (this also is slower). 79 if record.name not in _IGNORED_LOGGERS: 80 integration = Hub.current.get_integration(LoggingIntegration) 81 if integration is not None: 82 integration._handle_record(record) 83 84 logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore 85 86 87 def _can_record(record): 88 # type: (LogRecord) -> bool 89 return record.name not in _IGNORED_LOGGERS 90 91 92 def _breadcrumb_from_record(record): 93 # type: (LogRecord) -> Dict[str, Any] 94 return { 95 "ty": "log", 96 "level": _logging_to_event_level(record.levelname), 97 "category": record.name, 98 "message": record.message, 99 "timestamp": datetime.datetime.fromtimestamp(record.created), 100 "data": _extra_from_record(record), 101 } 102 103 104 def _logging_to_event_level(levelname): 105 # type: (str) -> str 106 return {"critical": "fatal"}.get(levelname.lower(), levelname.lower()) 107 108 109 COMMON_RECORD_ATTRS = frozenset( 110 ( 111 "args", 112 "created", 113 "exc_info", 114 "exc_text", 115 "filename", 116 "funcName", 117 "levelname", 118 "levelno", 119 "linenno", 120 "lineno", 121 "message", 122 "module", 123 "msecs", 124 "msg", 125 "name", 126 "pathname", 127 "process", 128 "processName", 129 "relativeCreated", 130 "stack", 131 "tags", 132 "thread", 133 "threadName", 134 ) 135 ) 136 137 138 def _extra_from_record(record): 139 # type: (LogRecord) -> Dict[str, None] 140 return { 141 k: v 142 for k, v in iteritems(vars(record)) 143 if k not in COMMON_RECORD_ATTRS 144 and (not isinstance(k, str) or not k.startswith("_")) 145 } 146 147 148 class EventHandler(logging.Handler, object): 149 def emit(self, record): 150 # type: (LogRecord) -> Any 151 with capture_internal_exceptions(): 152 self.format(record) 153 return self._emit(record) 154 155 def _emit(self, record): 156 # type: (LogRecord) -> None 157 if not _can_record(record): 158 return 159 160 hub = Hub.current 161 if hub.client is None: 162 return 163 164 client_options = hub.client.options 165 166 # exc_info might be None or (None, None, None) 167 if record.exc_info is not None and record.exc_info[0] is not None: 168 event, hint = event_from_exception( 169 record.exc_info, 170 client_options=client_options, 171 mechanism={"type": "logging", "handled": True}, 172 ) 173 elif record.exc_info and record.exc_info[0] is None: 174 event = {} 175 hint = {} 176 with capture_internal_exceptions(): 177 event["threads"] = { 178 "values": [ 179 { 180 "stacktrace": current_stacktrace( 181 client_options["with_locals"] 182 ), 183 "crashed": False, 184 "current": True, 185 } 186 ] 187 } 188 else: 189 event = {} 190 hint = {} 191 192 hint["log_record"] = record 193 194 event["level"] = _logging_to_event_level(record.levelname) 195 event["logger"] = record.name 196 event["logentry"] = {"message": to_string(record.msg), "params": record.args} 197 event["extra"] = _extra_from_record(record) 198 199 hub.capture_event(event, hint=hint) 200 201 202 # Legacy name 203 SentryHandler = EventHandler 204 205 206 class BreadcrumbHandler(logging.Handler, object): 207 def emit(self, record): 208 # type: (LogRecord) -> Any 209 with capture_internal_exceptions(): 210 self.format(record) 211 return self._emit(record) 212 213 def _emit(self, record): 214 # type: (LogRecord) -> None 215 if not _can_record(record): 216 return 217 218 Hub.current.add_breadcrumb( 219 _breadcrumb_from_record(record), hint={"log_record": record} 220 ) 221 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sentry_sdk/integrations/logging.py b/sentry_sdk/integrations/logging.py --- a/sentry_sdk/integrations/logging.py +++ b/sentry_sdk/integrations/logging.py @@ -96,7 +96,7 @@ "level": _logging_to_event_level(record.levelname), "category": record.name, "message": record.message, - "timestamp": datetime.datetime.fromtimestamp(record.created), + "timestamp": datetime.datetime.utcfromtimestamp(record.created), "data": _extra_from_record(record), }
{"golden_diff": "diff --git a/sentry_sdk/integrations/logging.py b/sentry_sdk/integrations/logging.py\n--- a/sentry_sdk/integrations/logging.py\n+++ b/sentry_sdk/integrations/logging.py\n@@ -96,7 +96,7 @@\n \"level\": _logging_to_event_level(record.levelname),\n \"category\": record.name,\n \"message\": record.message,\n- \"timestamp\": datetime.datetime.fromtimestamp(record.created),\n+ \"timestamp\": datetime.datetime.utcfromtimestamp(record.created),\n \"data\": _extra_from_record(record),\n }\n", "issue": "logging breadcrumb collection time is incorrect\n\r\n![sentry](https://user-images.githubusercontent.com/28345280/61774444-71f0a800-ae29-11e9-8074-f3c88154b432.png)\r\n\r\nI found the breadcrumbs timestamp converted to the local time zone\r\ndatetime.datetime.fromtimestamp(record.created)\r\n\r\n> ```python\r\n> # /sentry_sdk/integrations/logging.py:90\r\n> def _breadcrumb_from_record(record):\r\n> # type: (LogRecord) -> Dict[str, Any]\r\n> return {\r\n> \"ty\": \"log\",\r\n> \"level\": _logging_to_event_level(record.levelname),\r\n> \"category\": record.name,\r\n> \"message\": record.message,\r\n> \"timestamp\": datetime.datetime.fromtimestamp(record.created),\r\n> \"data\": _extra_from_record(record),\r\n> }\r\n> ```\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport datetime\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import (\n to_string,\n event_from_exception,\n current_stacktrace,\n capture_internal_exceptions,\n)\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk._compat import iteritems\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from logging import LogRecord\n from typing import Any\n from typing import Dict\n from typing import Optional\n\nDEFAULT_LEVEL = logging.INFO\nDEFAULT_EVENT_LEVEL = logging.ERROR\n\n_IGNORED_LOGGERS = set([\"sentry_sdk.errors\"])\n\n\ndef ignore_logger(name):\n # type: (str) -> None\n \"\"\"This disables recording (both in breadcrumbs and as events) calls to\n a logger of a specific name. Among other uses, many of our integrations\n use this to prevent their actions being recorded as breadcrumbs. Exposed\n to users as a way to quiet spammy loggers.\n \"\"\"\n _IGNORED_LOGGERS.add(name)\n\n\nclass LoggingIntegration(Integration):\n identifier = \"logging\"\n\n def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):\n # type: (Optional[int], Optional[int]) -> None\n self._handler = None\n self._breadcrumb_handler = None\n\n if level is not None:\n self._breadcrumb_handler = BreadcrumbHandler(level=level)\n\n if event_level is not None:\n self._handler = EventHandler(level=event_level)\n\n def _handle_record(self, record):\n # type: (LogRecord) -> None\n if self._handler is not None and record.levelno >= self._handler.level:\n self._handler.handle(record)\n\n if (\n self._breadcrumb_handler is not None\n and record.levelno >= self._breadcrumb_handler.level\n ):\n self._breadcrumb_handler.handle(record)\n\n @staticmethod\n def setup_once():\n # type: () -> None\n old_callhandlers = logging.Logger.callHandlers # type: ignore\n\n def sentry_patched_callhandlers(self, record):\n # type: (Any, LogRecord) -> Any\n try:\n return old_callhandlers(self, record)\n finally:\n # This check is done twice, once also here before we even get\n # the integration. Otherwise we have a high chance of getting\n # into a recursion error when the integration is resolved\n # (this also is slower).\n if record.name not in _IGNORED_LOGGERS:\n integration = Hub.current.get_integration(LoggingIntegration)\n if integration is not None:\n integration._handle_record(record)\n\n logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore\n\n\ndef _can_record(record):\n # type: (LogRecord) -> bool\n return record.name not in _IGNORED_LOGGERS\n\n\ndef _breadcrumb_from_record(record):\n # type: (LogRecord) -> Dict[str, Any]\n return {\n \"ty\": \"log\",\n \"level\": _logging_to_event_level(record.levelname),\n \"category\": record.name,\n \"message\": record.message,\n \"timestamp\": datetime.datetime.fromtimestamp(record.created),\n \"data\": _extra_from_record(record),\n }\n\n\ndef _logging_to_event_level(levelname):\n # type: (str) -> str\n return {\"critical\": \"fatal\"}.get(levelname.lower(), levelname.lower())\n\n\nCOMMON_RECORD_ATTRS = frozenset(\n (\n \"args\",\n \"created\",\n \"exc_info\",\n \"exc_text\",\n \"filename\",\n \"funcName\",\n \"levelname\",\n \"levelno\",\n \"linenno\",\n \"lineno\",\n \"message\",\n \"module\",\n \"msecs\",\n \"msg\",\n \"name\",\n \"pathname\",\n \"process\",\n \"processName\",\n \"relativeCreated\",\n \"stack\",\n \"tags\",\n \"thread\",\n \"threadName\",\n )\n)\n\n\ndef _extra_from_record(record):\n # type: (LogRecord) -> Dict[str, None]\n return {\n k: v\n for k, v in iteritems(vars(record))\n if k not in COMMON_RECORD_ATTRS\n and (not isinstance(k, str) or not k.startswith(\"_\"))\n }\n\n\nclass EventHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n hub = Hub.current\n if hub.client is None:\n return\n\n client_options = hub.client.options\n\n # exc_info might be None or (None, None, None)\n if record.exc_info is not None and record.exc_info[0] is not None:\n event, hint = event_from_exception(\n record.exc_info,\n client_options=client_options,\n mechanism={\"type\": \"logging\", \"handled\": True},\n )\n elif record.exc_info and record.exc_info[0] is None:\n event = {}\n hint = {}\n with capture_internal_exceptions():\n event[\"threads\"] = {\n \"values\": [\n {\n \"stacktrace\": current_stacktrace(\n client_options[\"with_locals\"]\n ),\n \"crashed\": False,\n \"current\": True,\n }\n ]\n }\n else:\n event = {}\n hint = {}\n\n hint[\"log_record\"] = record\n\n event[\"level\"] = _logging_to_event_level(record.levelname)\n event[\"logger\"] = record.name\n event[\"logentry\"] = {\"message\": to_string(record.msg), \"params\": record.args}\n event[\"extra\"] = _extra_from_record(record)\n\n hub.capture_event(event, hint=hint)\n\n\n# Legacy name\nSentryHandler = EventHandler\n\n\nclass BreadcrumbHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n Hub.current.add_breadcrumb(\n _breadcrumb_from_record(record), hint={\"log_record\": record}\n )\n", "path": "sentry_sdk/integrations/logging.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport datetime\n\nfrom sentry_sdk.hub import Hub\nfrom sentry_sdk.utils import (\n to_string,\n event_from_exception,\n current_stacktrace,\n capture_internal_exceptions,\n)\nfrom sentry_sdk.integrations import Integration\nfrom sentry_sdk._compat import iteritems\n\nfrom sentry_sdk._types import MYPY\n\nif MYPY:\n from logging import LogRecord\n from typing import Any\n from typing import Dict\n from typing import Optional\n\nDEFAULT_LEVEL = logging.INFO\nDEFAULT_EVENT_LEVEL = logging.ERROR\n\n_IGNORED_LOGGERS = set([\"sentry_sdk.errors\"])\n\n\ndef ignore_logger(name):\n # type: (str) -> None\n \"\"\"This disables recording (both in breadcrumbs and as events) calls to\n a logger of a specific name. Among other uses, many of our integrations\n use this to prevent their actions being recorded as breadcrumbs. Exposed\n to users as a way to quiet spammy loggers.\n \"\"\"\n _IGNORED_LOGGERS.add(name)\n\n\nclass LoggingIntegration(Integration):\n identifier = \"logging\"\n\n def __init__(self, level=DEFAULT_LEVEL, event_level=DEFAULT_EVENT_LEVEL):\n # type: (Optional[int], Optional[int]) -> None\n self._handler = None\n self._breadcrumb_handler = None\n\n if level is not None:\n self._breadcrumb_handler = BreadcrumbHandler(level=level)\n\n if event_level is not None:\n self._handler = EventHandler(level=event_level)\n\n def _handle_record(self, record):\n # type: (LogRecord) -> None\n if self._handler is not None and record.levelno >= self._handler.level:\n self._handler.handle(record)\n\n if (\n self._breadcrumb_handler is not None\n and record.levelno >= self._breadcrumb_handler.level\n ):\n self._breadcrumb_handler.handle(record)\n\n @staticmethod\n def setup_once():\n # type: () -> None\n old_callhandlers = logging.Logger.callHandlers # type: ignore\n\n def sentry_patched_callhandlers(self, record):\n # type: (Any, LogRecord) -> Any\n try:\n return old_callhandlers(self, record)\n finally:\n # This check is done twice, once also here before we even get\n # the integration. Otherwise we have a high chance of getting\n # into a recursion error when the integration is resolved\n # (this also is slower).\n if record.name not in _IGNORED_LOGGERS:\n integration = Hub.current.get_integration(LoggingIntegration)\n if integration is not None:\n integration._handle_record(record)\n\n logging.Logger.callHandlers = sentry_patched_callhandlers # type: ignore\n\n\ndef _can_record(record):\n # type: (LogRecord) -> bool\n return record.name not in _IGNORED_LOGGERS\n\n\ndef _breadcrumb_from_record(record):\n # type: (LogRecord) -> Dict[str, Any]\n return {\n \"ty\": \"log\",\n \"level\": _logging_to_event_level(record.levelname),\n \"category\": record.name,\n \"message\": record.message,\n \"timestamp\": datetime.datetime.utcfromtimestamp(record.created),\n \"data\": _extra_from_record(record),\n }\n\n\ndef _logging_to_event_level(levelname):\n # type: (str) -> str\n return {\"critical\": \"fatal\"}.get(levelname.lower(), levelname.lower())\n\n\nCOMMON_RECORD_ATTRS = frozenset(\n (\n \"args\",\n \"created\",\n \"exc_info\",\n \"exc_text\",\n \"filename\",\n \"funcName\",\n \"levelname\",\n \"levelno\",\n \"linenno\",\n \"lineno\",\n \"message\",\n \"module\",\n \"msecs\",\n \"msg\",\n \"name\",\n \"pathname\",\n \"process\",\n \"processName\",\n \"relativeCreated\",\n \"stack\",\n \"tags\",\n \"thread\",\n \"threadName\",\n )\n)\n\n\ndef _extra_from_record(record):\n # type: (LogRecord) -> Dict[str, None]\n return {\n k: v\n for k, v in iteritems(vars(record))\n if k not in COMMON_RECORD_ATTRS\n and (not isinstance(k, str) or not k.startswith(\"_\"))\n }\n\n\nclass EventHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n hub = Hub.current\n if hub.client is None:\n return\n\n client_options = hub.client.options\n\n # exc_info might be None or (None, None, None)\n if record.exc_info is not None and record.exc_info[0] is not None:\n event, hint = event_from_exception(\n record.exc_info,\n client_options=client_options,\n mechanism={\"type\": \"logging\", \"handled\": True},\n )\n elif record.exc_info and record.exc_info[0] is None:\n event = {}\n hint = {}\n with capture_internal_exceptions():\n event[\"threads\"] = {\n \"values\": [\n {\n \"stacktrace\": current_stacktrace(\n client_options[\"with_locals\"]\n ),\n \"crashed\": False,\n \"current\": True,\n }\n ]\n }\n else:\n event = {}\n hint = {}\n\n hint[\"log_record\"] = record\n\n event[\"level\"] = _logging_to_event_level(record.levelname)\n event[\"logger\"] = record.name\n event[\"logentry\"] = {\"message\": to_string(record.msg), \"params\": record.args}\n event[\"extra\"] = _extra_from_record(record)\n\n hub.capture_event(event, hint=hint)\n\n\n# Legacy name\nSentryHandler = EventHandler\n\n\nclass BreadcrumbHandler(logging.Handler, object):\n def emit(self, record):\n # type: (LogRecord) -> Any\n with capture_internal_exceptions():\n self.format(record)\n return self._emit(record)\n\n def _emit(self, record):\n # type: (LogRecord) -> None\n if not _can_record(record):\n return\n\n Hub.current.add_breadcrumb(\n _breadcrumb_from_record(record), hint={\"log_record\": record}\n )\n", "path": "sentry_sdk/integrations/logging.py"}]}
2,421
120
gh_patches_debug_40732
rasdani/github-patches
git_diff
InternLM__lmdeploy-416
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- bug-fix: when using stream is False, continuous batching doesn't work ## Motivation bug fix : when using stream=False, the continuous batching is not working. See details in this issue: https://github.com/InternLM/lmdeploy/issues/308 ## Modification In non-streaming mode, the main thread which is responsible for receiving request is stuck in the following code since self.que.get() will wait until the queue is not empty. But when using non-stream mode, the queue is always empty unless a request is finished. So the newly coming request won't be processed before the last request is done. So the batching doesn't work. ```python while True: while self.que.qsize() > 1: self.que.get() finish, tm_outputs = self.que.get() outputs = _tm_dict_to_torch_dict(tm_outputs) ``` I modify here to avoid the long wait. just see the commits. I'm not sure if there is a better way to fix this. If any, please comment. I have a question, what is this line for? Won't it cause output tokens being lost??? ```python while self.que.qsize() > 1: self.que.get() ``` ## BC-breaking (Optional) Does the modification introduce changes that break the backward-compatibility of the downstream repositories? If so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR. ## Use cases (Optional) If this PR introduces a new feature, it is better to list some use cases here, and update the documentation. ## Checklist 1. Pre-commit or other linting tools are used to fix the potential lint issues. 2. The modification is covered by complete unit tests. If not, please add more unit tests to ensure the correctness. 3. If the modification has a dependency on downstream projects of a newer version, this PR should be tested with all supported versions of downstream projects. 4. The documentation has been modified accordingly, like docstring or example tutorials. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lmdeploy/serve/openai/api_server.py` Content: ``` 1 # Copyright (c) OpenMMLab. All rights reserved. 2 import json 3 import os 4 import time 5 from http import HTTPStatus 6 from typing import AsyncGenerator, Optional 7 8 import fire 9 import uvicorn 10 from fastapi import BackgroundTasks, FastAPI, Request 11 from fastapi.responses import JSONResponse, StreamingResponse 12 13 from lmdeploy.serve.async_engine import AsyncEngine 14 from lmdeploy.serve.openai.protocol import ( # noqa: E501 15 ChatCompletionRequest, ChatCompletionResponse, 16 ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice, 17 ChatCompletionStreamResponse, ChatMessage, DeltaMessage, EmbeddingsRequest, 18 EmbeddingsResponse, ErrorResponse, GenerateRequest, ModelCard, ModelList, 19 ModelPermission, UsageInfo) 20 21 os.environ['TM_LOG_LEVEL'] = 'ERROR' 22 23 24 class VariableInterface: 25 """A IO interface maintaining variables.""" 26 async_engine: AsyncEngine = None 27 request_hosts = [] 28 29 30 app = FastAPI(docs_url='/') 31 32 33 def get_model_list(): 34 """Available models. 35 36 Only provided one now. 37 """ 38 return [VariableInterface.async_engine.tm_model.model_name] 39 40 41 @app.get('/v1/models') 42 def available_models(): 43 """Show available models.""" 44 model_cards = [] 45 for model_name in get_model_list(): 46 model_cards.append( 47 ModelCard(id=model_name, 48 root=model_name, 49 permission=[ModelPermission()])) 50 return ModelList(data=model_cards) 51 52 53 def create_error_response(status: HTTPStatus, message: str): 54 """Create error response according to http status and message. 55 56 Args: 57 status (HTTPStatus): HTTP status codes and reason phrases 58 message (str): error message 59 """ 60 return JSONResponse(ErrorResponse(message=message, 61 type='invalid_request_error').dict(), 62 status_code=status.value) 63 64 65 async def check_request(request) -> Optional[JSONResponse]: 66 """Check if a request is valid.""" 67 if request.model in get_model_list(): 68 return 69 ret = create_error_response( 70 HTTPStatus.NOT_FOUND, f'The model `{request.model}` does not exist.') 71 return ret 72 73 74 @app.post('/v1/chat/completions') 75 async def chat_completions_v1(request: ChatCompletionRequest, 76 raw_request: Request = None): 77 """Completion API similar to OpenAI's API. 78 79 Refer to `https://platform.openai.com/docs/api-reference/chat/create` 80 for the API specification. 81 82 The request should be a JSON object with the following fields: 83 - model: model name. Available from /v1/models. 84 - messages: string prompt or chat history in OpenAI format. 85 - temperature (float): to modulate the next token probability 86 - top_p (float): If set to float < 1, only the smallest set of most 87 probable tokens with probabilities that add up to top_p or higher 88 are kept for generation. 89 - n (int): How many chat completion choices to generate for each input 90 message. Only support one here. 91 - stream: whether to stream the results or not. Default to false. 92 - max_tokens (int): output token nums 93 - repetition_penalty (float): The parameter for repetition penalty. 94 1.0 means no penalty 95 96 Additional arguments supported by LMDeploy: 97 - renew_session (bool): Whether renew the session. Can be used when the 98 session length is exceeded. 99 - ignore_eos (bool): indicator for ignoring eos 100 101 Currently we do not support the following features: 102 - function_call (Users should implement this by themselves) 103 - logit_bias (not supported yet) 104 - presence_penalty (replaced with repetition_penalty) 105 - frequency_penalty (replaced with repetition_penalty) 106 """ 107 instance_id = int(raw_request.client.host.replace('.', '')) 108 109 error_check_ret = await check_request(request) 110 if error_check_ret is not None: 111 return error_check_ret 112 113 model_name = request.model 114 request_id = str(instance_id) 115 created_time = int(time.time()) 116 117 result_generator = VariableInterface.async_engine.generate_openai( 118 request.messages, 119 instance_id, 120 request.stream, 121 request.renew_session, 122 request_output_len=request.max_tokens if request.max_tokens else 512, 123 stop=request.stop, 124 top_p=request.top_p, 125 temperature=request.temperature, 126 repetition_penalty=request.repetition_penalty, 127 ignore_eos=request.ignore_eos) 128 129 async def abort_request() -> None: 130 async for _ in VariableInterface.async_engine.generate_openai( 131 request.messages, 132 instance_id, 133 request.stream, 134 request.renew_session, 135 stop=True): 136 pass 137 138 def create_stream_response_json( 139 index: int, 140 text: str, 141 finish_reason: Optional[str] = None, 142 ) -> str: 143 choice_data = ChatCompletionResponseStreamChoice( 144 index=index, 145 delta=DeltaMessage(role='assistant', content=text), 146 finish_reason=finish_reason, 147 ) 148 response = ChatCompletionStreamResponse( 149 id=request_id, 150 created=created_time, 151 model=model_name, 152 choices=[choice_data], 153 ) 154 response_json = response.json(ensure_ascii=False) 155 156 return response_json 157 158 async def completion_stream_generator() -> AsyncGenerator[str, None]: 159 # First chunk with role 160 for i in range(request.n): 161 choice_data = ChatCompletionResponseStreamChoice( 162 index=i, 163 delta=DeltaMessage(role='assistant'), 164 finish_reason=None, 165 ) 166 chunk = ChatCompletionStreamResponse(id=request_id, 167 choices=[choice_data], 168 model=model_name) 169 data = chunk.json(exclude_unset=True, ensure_ascii=False) 170 yield f'data: {data}\n\n' 171 172 async for res in result_generator: 173 response_json = create_stream_response_json( 174 index=0, 175 text=res.response, 176 ) 177 yield f'data: {response_json}\n\n' 178 yield 'data: [DONE]\n\n' 179 180 # Streaming response 181 if request.stream: 182 background_tasks = BackgroundTasks() 183 # Abort the request if the client disconnects. 184 background_tasks.add_task(abort_request) 185 return StreamingResponse(completion_stream_generator(), 186 media_type='text/event-stream', 187 background=background_tasks) 188 189 # Non-streaming response 190 final_res = None 191 async for res in result_generator: 192 if await raw_request.is_disconnected(): 193 # Abort the request if the client disconnects. 194 await abort_request() 195 return create_error_response(HTTPStatus.BAD_REQUEST, 196 'Client disconnected') 197 final_res = res 198 assert final_res is not None 199 choices = [] 200 choice_data = ChatCompletionResponseChoice( 201 index=0, 202 message=ChatMessage(role='assistant', content=final_res.response), 203 finish_reason=final_res.finish_reason, 204 ) 205 choices.append(choice_data) 206 207 total_tokens = sum([ 208 final_res.history_token_len, final_res.input_token_len, 209 final_res.generate_token_len 210 ]) 211 usage = UsageInfo( 212 prompt_tokens=final_res.input_token_len, 213 completion_tokens=final_res.generate_token_len, 214 total_tokens=total_tokens, 215 ) 216 response = ChatCompletionResponse( 217 id=request_id, 218 created=created_time, 219 model=model_name, 220 choices=choices, 221 usage=usage, 222 ) 223 224 return response 225 226 227 @app.post('/v1/embeddings') 228 async def create_embeddings(request: EmbeddingsRequest, 229 raw_request: Request = None): 230 """Creates embeddings for the text.""" 231 error_check_ret = await check_request(request) 232 if error_check_ret is not None: 233 return error_check_ret 234 235 embedding = await VariableInterface.async_engine.get_embeddings( 236 request.input) 237 data = [{'object': 'embedding', 'embedding': embedding, 'index': 0}] 238 token_num = len(embedding) 239 return EmbeddingsResponse( 240 data=data, 241 model=request.model, 242 usage=UsageInfo( 243 prompt_tokens=token_num, 244 total_tokens=token_num, 245 completion_tokens=None, 246 ), 247 ).dict(exclude_none=True) 248 249 250 @app.post('/generate') 251 async def generate(request: GenerateRequest, raw_request: Request = None): 252 """Generate completion for the request. 253 254 The request should be a JSON object with the following fields: 255 - prompt: the prompt to use for the generation. 256 - instance_id: determine which instance will be called. If not specified 257 with a value other than -1, using host ip directly. 258 - sequence_start (bool): indicator for starting a sequence. 259 - sequence_end (bool): indicator for ending a sequence 260 - stream: whether to stream the results or not. 261 - stop: whether to stop the session response or not. 262 - request_output_len (int): output token nums 263 - step (int): the offset of the k/v cache 264 - top_p (float): If set to float < 1, only the smallest set of most 265 probable tokens with probabilities that add up to top_p or higher 266 are kept for generation. 267 - top_k (int): The number of the highest probability vocabulary 268 tokens to keep for top-k-filtering 269 - temperature (float): to modulate the next token probability 270 - repetition_penalty (float): The parameter for repetition penalty. 271 1.0 means no penalty 272 - ignore_eos (bool): indicator for ignoring eos 273 """ 274 if request.instance_id == -1: 275 instance_id = int(raw_request.client.host.replace('.', '')) 276 request.instance_id = instance_id 277 278 generation = VariableInterface.async_engine.generate( 279 request.prompt, 280 request.instance_id, 281 stream_response=True, # always use stream to enable batching 282 sequence_start=request.sequence_start, 283 sequence_end=request.sequence_end, 284 request_output_len=request.request_output_len, 285 top_p=request.top_p, 286 top_k=request.top_k, 287 stop=request.stop, 288 temperature=request.temperature, 289 repetition_penalty=request.repetition_penalty, 290 ignore_eos=request.ignore_eos) 291 292 # Streaming case 293 async def stream_results() -> AsyncGenerator[bytes, None]: 294 async for out in generation: 295 ret = { 296 'text': out.response, 297 'tokens': out.generate_token_len, 298 'finish_reason': out.finish_reason 299 } 300 yield (json.dumps(ret) + '\0').encode('utf-8') 301 302 if request.stream: 303 return StreamingResponse(stream_results()) 304 else: 305 ret = {} 306 text = '' 307 tokens = 0 308 finish_reason = None 309 async for out in generation: 310 text += out.response 311 tokens += out.generate_token_len 312 finish_reason = out.finish_reason 313 ret = {'text': text, 'tokens': tokens, 'finish_reason': finish_reason} 314 return JSONResponse(ret) 315 316 317 def main(model_path: str, 318 server_name: str = 'localhost', 319 server_port: int = 23333, 320 instance_num: int = 32, 321 tp: int = 1): 322 """An example to perform model inference through the command line 323 interface. 324 325 Args: 326 model_path (str): the path of the deployed model 327 server_name (str): host ip for serving 328 server_port (int): server port 329 instance_num (int): number of instances of turbomind model 330 tp (int): tensor parallel 331 """ 332 VariableInterface.async_engine = AsyncEngine(model_path=model_path, 333 instance_num=instance_num, 334 tp=tp) 335 uvicorn.run(app=app, host=server_name, port=server_port, log_level='info') 336 337 338 if __name__ == '__main__': 339 fire.Fire(main) 340 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lmdeploy/serve/openai/api_server.py b/lmdeploy/serve/openai/api_server.py --- a/lmdeploy/serve/openai/api_server.py +++ b/lmdeploy/serve/openai/api_server.py @@ -57,9 +57,10 @@ status (HTTPStatus): HTTP status codes and reason phrases message (str): error message """ - return JSONResponse(ErrorResponse(message=message, - type='invalid_request_error').dict(), - status_code=status.value) + return JSONResponse( + ErrorResponse(message=message, + type='invalid_request_error', + code=status.value).dict()) async def check_request(request) -> Optional[JSONResponse]: @@ -117,7 +118,7 @@ result_generator = VariableInterface.async_engine.generate_openai( request.messages, instance_id, - request.stream, + True, # always use stream to enable batching request.renew_session, request_output_len=request.max_tokens if request.max_tokens else 512, stop=request.stop, @@ -130,7 +131,7 @@ async for _ in VariableInterface.async_engine.generate_openai( request.messages, instance_id, - request.stream, + True, request.renew_session, stop=True): pass @@ -188,6 +189,7 @@ # Non-streaming response final_res = None + text = '' async for res in result_generator: if await raw_request.is_disconnected(): # Abort the request if the client disconnects. @@ -195,11 +197,12 @@ return create_error_response(HTTPStatus.BAD_REQUEST, 'Client disconnected') final_res = res + text += res.response assert final_res is not None choices = [] choice_data = ChatCompletionResponseChoice( index=0, - message=ChatMessage(role='assistant', content=final_res.response), + message=ChatMessage(role='assistant', content=text), finish_reason=final_res.finish_reason, ) choices.append(choice_data) @@ -308,7 +311,7 @@ finish_reason = None async for out in generation: text += out.response - tokens += out.generate_token_len + tokens = out.generate_token_len finish_reason = out.finish_reason ret = {'text': text, 'tokens': tokens, 'finish_reason': finish_reason} return JSONResponse(ret)
{"golden_diff": "diff --git a/lmdeploy/serve/openai/api_server.py b/lmdeploy/serve/openai/api_server.py\n--- a/lmdeploy/serve/openai/api_server.py\n+++ b/lmdeploy/serve/openai/api_server.py\n@@ -57,9 +57,10 @@\n status (HTTPStatus): HTTP status codes and reason phrases\n message (str): error message\n \"\"\"\n- return JSONResponse(ErrorResponse(message=message,\n- type='invalid_request_error').dict(),\n- status_code=status.value)\n+ return JSONResponse(\n+ ErrorResponse(message=message,\n+ type='invalid_request_error',\n+ code=status.value).dict())\n \n \n async def check_request(request) -> Optional[JSONResponse]:\n@@ -117,7 +118,7 @@\n result_generator = VariableInterface.async_engine.generate_openai(\n request.messages,\n instance_id,\n- request.stream,\n+ True, # always use stream to enable batching\n request.renew_session,\n request_output_len=request.max_tokens if request.max_tokens else 512,\n stop=request.stop,\n@@ -130,7 +131,7 @@\n async for _ in VariableInterface.async_engine.generate_openai(\n request.messages,\n instance_id,\n- request.stream,\n+ True,\n request.renew_session,\n stop=True):\n pass\n@@ -188,6 +189,7 @@\n \n # Non-streaming response\n final_res = None\n+ text = ''\n async for res in result_generator:\n if await raw_request.is_disconnected():\n # Abort the request if the client disconnects.\n@@ -195,11 +197,12 @@\n return create_error_response(HTTPStatus.BAD_REQUEST,\n 'Client disconnected')\n final_res = res\n+ text += res.response\n assert final_res is not None\n choices = []\n choice_data = ChatCompletionResponseChoice(\n index=0,\n- message=ChatMessage(role='assistant', content=final_res.response),\n+ message=ChatMessage(role='assistant', content=text),\n finish_reason=final_res.finish_reason,\n )\n choices.append(choice_data)\n@@ -308,7 +311,7 @@\n finish_reason = None\n async for out in generation:\n text += out.response\n- tokens += out.generate_token_len\n+ tokens = out.generate_token_len\n finish_reason = out.finish_reason\n ret = {'text': text, 'tokens': tokens, 'finish_reason': finish_reason}\n return JSONResponse(ret)\n", "issue": "bug-fix: when using stream is False, continuous batching doesn't work\n## Motivation\r\n\r\nbug fix : when using stream=False, the continuous batching is not working.\r\nSee details in this issue: https://github.com/InternLM/lmdeploy/issues/308\r\n\r\n## Modification\r\n\r\nIn non-streaming mode, the main thread which is responsible for receiving request is stuck in the following code since self.que.get() will wait until the queue is not empty.\r\n\r\nBut when using non-stream mode, the queue is always empty unless a request is finished.\r\n\r\nSo the newly coming request won't be processed before the last request is done. So the batching doesn't work.\r\n\r\n```python\r\n while True:\r\n while self.que.qsize() > 1:\r\n self.que.get()\r\n finish, tm_outputs = self.que.get()\r\n outputs = _tm_dict_to_torch_dict(tm_outputs)\r\n```\r\n\r\nI modify here to avoid the long wait. just see the commits.\r\n\r\nI'm not sure if there is a better way to fix this. If any, please comment.\r\n\r\nI have a question, what is this line for? Won't it cause output tokens being lost???\r\n```python\r\nwhile self.que.qsize() > 1: self.que.get()\r\n```\r\n\r\n## BC-breaking (Optional)\r\n\r\nDoes the modification introduce changes that break the backward-compatibility of the downstream repositories?\r\nIf so, please describe how it breaks the compatibility and how the downstream projects should modify their code to keep compatibility with this PR.\r\n\r\n## Use cases (Optional)\r\n\r\nIf this PR introduces a new feature, it is better to list some use cases here, and update the documentation.\r\n\r\n## Checklist\r\n\r\n1. Pre-commit or other linting tools are used to fix the potential lint issues.\r\n2. The modification is covered by complete unit tests. If not, please add more unit tests to ensure the correctness.\r\n3. If the modification has a dependency on downstream projects of a newer version, this PR should be tested with all supported versions of downstream projects.\r\n4. The documentation has been modified accordingly, like docstring or example tutorials.\r\n\n", "before_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport json\nimport os\nimport time\nfrom http import HTTPStatus\nfrom typing import AsyncGenerator, Optional\n\nimport fire\nimport uvicorn\nfrom fastapi import BackgroundTasks, FastAPI, Request\nfrom fastapi.responses import JSONResponse, StreamingResponse\n\nfrom lmdeploy.serve.async_engine import AsyncEngine\nfrom lmdeploy.serve.openai.protocol import ( # noqa: E501\n ChatCompletionRequest, ChatCompletionResponse,\n ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,\n ChatCompletionStreamResponse, ChatMessage, DeltaMessage, EmbeddingsRequest,\n EmbeddingsResponse, ErrorResponse, GenerateRequest, ModelCard, ModelList,\n ModelPermission, UsageInfo)\n\nos.environ['TM_LOG_LEVEL'] = 'ERROR'\n\n\nclass VariableInterface:\n \"\"\"A IO interface maintaining variables.\"\"\"\n async_engine: AsyncEngine = None\n request_hosts = []\n\n\napp = FastAPI(docs_url='/')\n\n\ndef get_model_list():\n \"\"\"Available models.\n\n Only provided one now.\n \"\"\"\n return [VariableInterface.async_engine.tm_model.model_name]\n\n\[email protected]('/v1/models')\ndef available_models():\n \"\"\"Show available models.\"\"\"\n model_cards = []\n for model_name in get_model_list():\n model_cards.append(\n ModelCard(id=model_name,\n root=model_name,\n permission=[ModelPermission()]))\n return ModelList(data=model_cards)\n\n\ndef create_error_response(status: HTTPStatus, message: str):\n \"\"\"Create error response according to http status and message.\n\n Args:\n status (HTTPStatus): HTTP status codes and reason phrases\n message (str): error message\n \"\"\"\n return JSONResponse(ErrorResponse(message=message,\n type='invalid_request_error').dict(),\n status_code=status.value)\n\n\nasync def check_request(request) -> Optional[JSONResponse]:\n \"\"\"Check if a request is valid.\"\"\"\n if request.model in get_model_list():\n return\n ret = create_error_response(\n HTTPStatus.NOT_FOUND, f'The model `{request.model}` does not exist.')\n return ret\n\n\[email protected]('/v1/chat/completions')\nasync def chat_completions_v1(request: ChatCompletionRequest,\n raw_request: Request = None):\n \"\"\"Completion API similar to OpenAI's API.\n\n Refer to `https://platform.openai.com/docs/api-reference/chat/create`\n for the API specification.\n\n The request should be a JSON object with the following fields:\n - model: model name. Available from /v1/models.\n - messages: string prompt or chat history in OpenAI format.\n - temperature (float): to modulate the next token probability\n - top_p (float): If set to float < 1, only the smallest set of most\n probable tokens with probabilities that add up to top_p or higher\n are kept for generation.\n - n (int): How many chat completion choices to generate for each input\n message. Only support one here.\n - stream: whether to stream the results or not. Default to false.\n - max_tokens (int): output token nums\n - repetition_penalty (float): The parameter for repetition penalty.\n 1.0 means no penalty\n\n Additional arguments supported by LMDeploy:\n - renew_session (bool): Whether renew the session. Can be used when the\n session length is exceeded.\n - ignore_eos (bool): indicator for ignoring eos\n\n Currently we do not support the following features:\n - function_call (Users should implement this by themselves)\n - logit_bias (not supported yet)\n - presence_penalty (replaced with repetition_penalty)\n - frequency_penalty (replaced with repetition_penalty)\n \"\"\"\n instance_id = int(raw_request.client.host.replace('.', ''))\n\n error_check_ret = await check_request(request)\n if error_check_ret is not None:\n return error_check_ret\n\n model_name = request.model\n request_id = str(instance_id)\n created_time = int(time.time())\n\n result_generator = VariableInterface.async_engine.generate_openai(\n request.messages,\n instance_id,\n request.stream,\n request.renew_session,\n request_output_len=request.max_tokens if request.max_tokens else 512,\n stop=request.stop,\n top_p=request.top_p,\n temperature=request.temperature,\n repetition_penalty=request.repetition_penalty,\n ignore_eos=request.ignore_eos)\n\n async def abort_request() -> None:\n async for _ in VariableInterface.async_engine.generate_openai(\n request.messages,\n instance_id,\n request.stream,\n request.renew_session,\n stop=True):\n pass\n\n def create_stream_response_json(\n index: int,\n text: str,\n finish_reason: Optional[str] = None,\n ) -> str:\n choice_data = ChatCompletionResponseStreamChoice(\n index=index,\n delta=DeltaMessage(role='assistant', content=text),\n finish_reason=finish_reason,\n )\n response = ChatCompletionStreamResponse(\n id=request_id,\n created=created_time,\n model=model_name,\n choices=[choice_data],\n )\n response_json = response.json(ensure_ascii=False)\n\n return response_json\n\n async def completion_stream_generator() -> AsyncGenerator[str, None]:\n # First chunk with role\n for i in range(request.n):\n choice_data = ChatCompletionResponseStreamChoice(\n index=i,\n delta=DeltaMessage(role='assistant'),\n finish_reason=None,\n )\n chunk = ChatCompletionStreamResponse(id=request_id,\n choices=[choice_data],\n model=model_name)\n data = chunk.json(exclude_unset=True, ensure_ascii=False)\n yield f'data: {data}\\n\\n'\n\n async for res in result_generator:\n response_json = create_stream_response_json(\n index=0,\n text=res.response,\n )\n yield f'data: {response_json}\\n\\n'\n yield 'data: [DONE]\\n\\n'\n\n # Streaming response\n if request.stream:\n background_tasks = BackgroundTasks()\n # Abort the request if the client disconnects.\n background_tasks.add_task(abort_request)\n return StreamingResponse(completion_stream_generator(),\n media_type='text/event-stream',\n background=background_tasks)\n\n # Non-streaming response\n final_res = None\n async for res in result_generator:\n if await raw_request.is_disconnected():\n # Abort the request if the client disconnects.\n await abort_request()\n return create_error_response(HTTPStatus.BAD_REQUEST,\n 'Client disconnected')\n final_res = res\n assert final_res is not None\n choices = []\n choice_data = ChatCompletionResponseChoice(\n index=0,\n message=ChatMessage(role='assistant', content=final_res.response),\n finish_reason=final_res.finish_reason,\n )\n choices.append(choice_data)\n\n total_tokens = sum([\n final_res.history_token_len, final_res.input_token_len,\n final_res.generate_token_len\n ])\n usage = UsageInfo(\n prompt_tokens=final_res.input_token_len,\n completion_tokens=final_res.generate_token_len,\n total_tokens=total_tokens,\n )\n response = ChatCompletionResponse(\n id=request_id,\n created=created_time,\n model=model_name,\n choices=choices,\n usage=usage,\n )\n\n return response\n\n\[email protected]('/v1/embeddings')\nasync def create_embeddings(request: EmbeddingsRequest,\n raw_request: Request = None):\n \"\"\"Creates embeddings for the text.\"\"\"\n error_check_ret = await check_request(request)\n if error_check_ret is not None:\n return error_check_ret\n\n embedding = await VariableInterface.async_engine.get_embeddings(\n request.input)\n data = [{'object': 'embedding', 'embedding': embedding, 'index': 0}]\n token_num = len(embedding)\n return EmbeddingsResponse(\n data=data,\n model=request.model,\n usage=UsageInfo(\n prompt_tokens=token_num,\n total_tokens=token_num,\n completion_tokens=None,\n ),\n ).dict(exclude_none=True)\n\n\[email protected]('/generate')\nasync def generate(request: GenerateRequest, raw_request: Request = None):\n \"\"\"Generate completion for the request.\n\n The request should be a JSON object with the following fields:\n - prompt: the prompt to use for the generation.\n - instance_id: determine which instance will be called. If not specified\n with a value other than -1, using host ip directly.\n - sequence_start (bool): indicator for starting a sequence.\n - sequence_end (bool): indicator for ending a sequence\n - stream: whether to stream the results or not.\n - stop: whether to stop the session response or not.\n - request_output_len (int): output token nums\n - step (int): the offset of the k/v cache\n - top_p (float): If set to float < 1, only the smallest set of most\n probable tokens with probabilities that add up to top_p or higher\n are kept for generation.\n - top_k (int): The number of the highest probability vocabulary\n tokens to keep for top-k-filtering\n - temperature (float): to modulate the next token probability\n - repetition_penalty (float): The parameter for repetition penalty.\n 1.0 means no penalty\n - ignore_eos (bool): indicator for ignoring eos\n \"\"\"\n if request.instance_id == -1:\n instance_id = int(raw_request.client.host.replace('.', ''))\n request.instance_id = instance_id\n\n generation = VariableInterface.async_engine.generate(\n request.prompt,\n request.instance_id,\n stream_response=True, # always use stream to enable batching\n sequence_start=request.sequence_start,\n sequence_end=request.sequence_end,\n request_output_len=request.request_output_len,\n top_p=request.top_p,\n top_k=request.top_k,\n stop=request.stop,\n temperature=request.temperature,\n repetition_penalty=request.repetition_penalty,\n ignore_eos=request.ignore_eos)\n\n # Streaming case\n async def stream_results() -> AsyncGenerator[bytes, None]:\n async for out in generation:\n ret = {\n 'text': out.response,\n 'tokens': out.generate_token_len,\n 'finish_reason': out.finish_reason\n }\n yield (json.dumps(ret) + '\\0').encode('utf-8')\n\n if request.stream:\n return StreamingResponse(stream_results())\n else:\n ret = {}\n text = ''\n tokens = 0\n finish_reason = None\n async for out in generation:\n text += out.response\n tokens += out.generate_token_len\n finish_reason = out.finish_reason\n ret = {'text': text, 'tokens': tokens, 'finish_reason': finish_reason}\n return JSONResponse(ret)\n\n\ndef main(model_path: str,\n server_name: str = 'localhost',\n server_port: int = 23333,\n instance_num: int = 32,\n tp: int = 1):\n \"\"\"An example to perform model inference through the command line\n interface.\n\n Args:\n model_path (str): the path of the deployed model\n server_name (str): host ip for serving\n server_port (int): server port\n instance_num (int): number of instances of turbomind model\n tp (int): tensor parallel\n \"\"\"\n VariableInterface.async_engine = AsyncEngine(model_path=model_path,\n instance_num=instance_num,\n tp=tp)\n uvicorn.run(app=app, host=server_name, port=server_port, log_level='info')\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n", "path": "lmdeploy/serve/openai/api_server.py"}], "after_files": [{"content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport json\nimport os\nimport time\nfrom http import HTTPStatus\nfrom typing import AsyncGenerator, Optional\n\nimport fire\nimport uvicorn\nfrom fastapi import BackgroundTasks, FastAPI, Request\nfrom fastapi.responses import JSONResponse, StreamingResponse\n\nfrom lmdeploy.serve.async_engine import AsyncEngine\nfrom lmdeploy.serve.openai.protocol import ( # noqa: E501\n ChatCompletionRequest, ChatCompletionResponse,\n ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,\n ChatCompletionStreamResponse, ChatMessage, DeltaMessage, EmbeddingsRequest,\n EmbeddingsResponse, ErrorResponse, GenerateRequest, ModelCard, ModelList,\n ModelPermission, UsageInfo)\n\nos.environ['TM_LOG_LEVEL'] = 'ERROR'\n\n\nclass VariableInterface:\n \"\"\"A IO interface maintaining variables.\"\"\"\n async_engine: AsyncEngine = None\n request_hosts = []\n\n\napp = FastAPI(docs_url='/')\n\n\ndef get_model_list():\n \"\"\"Available models.\n\n Only provided one now.\n \"\"\"\n return [VariableInterface.async_engine.tm_model.model_name]\n\n\[email protected]('/v1/models')\ndef available_models():\n \"\"\"Show available models.\"\"\"\n model_cards = []\n for model_name in get_model_list():\n model_cards.append(\n ModelCard(id=model_name,\n root=model_name,\n permission=[ModelPermission()]))\n return ModelList(data=model_cards)\n\n\ndef create_error_response(status: HTTPStatus, message: str):\n \"\"\"Create error response according to http status and message.\n\n Args:\n status (HTTPStatus): HTTP status codes and reason phrases\n message (str): error message\n \"\"\"\n return JSONResponse(\n ErrorResponse(message=message,\n type='invalid_request_error',\n code=status.value).dict())\n\n\nasync def check_request(request) -> Optional[JSONResponse]:\n \"\"\"Check if a request is valid.\"\"\"\n if request.model in get_model_list():\n return\n ret = create_error_response(\n HTTPStatus.NOT_FOUND, f'The model `{request.model}` does not exist.')\n return ret\n\n\[email protected]('/v1/chat/completions')\nasync def chat_completions_v1(request: ChatCompletionRequest,\n raw_request: Request = None):\n \"\"\"Completion API similar to OpenAI's API.\n\n Refer to `https://platform.openai.com/docs/api-reference/chat/create`\n for the API specification.\n\n The request should be a JSON object with the following fields:\n - model: model name. Available from /v1/models.\n - messages: string prompt or chat history in OpenAI format.\n - temperature (float): to modulate the next token probability\n - top_p (float): If set to float < 1, only the smallest set of most\n probable tokens with probabilities that add up to top_p or higher\n are kept for generation.\n - n (int): How many chat completion choices to generate for each input\n message. Only support one here.\n - stream: whether to stream the results or not. Default to false.\n - max_tokens (int): output token nums\n - repetition_penalty (float): The parameter for repetition penalty.\n 1.0 means no penalty\n\n Additional arguments supported by LMDeploy:\n - renew_session (bool): Whether renew the session. Can be used when the\n session length is exceeded.\n - ignore_eos (bool): indicator for ignoring eos\n\n Currently we do not support the following features:\n - function_call (Users should implement this by themselves)\n - logit_bias (not supported yet)\n - presence_penalty (replaced with repetition_penalty)\n - frequency_penalty (replaced with repetition_penalty)\n \"\"\"\n instance_id = int(raw_request.client.host.replace('.', ''))\n\n error_check_ret = await check_request(request)\n if error_check_ret is not None:\n return error_check_ret\n\n model_name = request.model\n request_id = str(instance_id)\n created_time = int(time.time())\n\n result_generator = VariableInterface.async_engine.generate_openai(\n request.messages,\n instance_id,\n True, # always use stream to enable batching\n request.renew_session,\n request_output_len=request.max_tokens if request.max_tokens else 512,\n stop=request.stop,\n top_p=request.top_p,\n temperature=request.temperature,\n repetition_penalty=request.repetition_penalty,\n ignore_eos=request.ignore_eos)\n\n async def abort_request() -> None:\n async for _ in VariableInterface.async_engine.generate_openai(\n request.messages,\n instance_id,\n True,\n request.renew_session,\n stop=True):\n pass\n\n def create_stream_response_json(\n index: int,\n text: str,\n finish_reason: Optional[str] = None,\n ) -> str:\n choice_data = ChatCompletionResponseStreamChoice(\n index=index,\n delta=DeltaMessage(role='assistant', content=text),\n finish_reason=finish_reason,\n )\n response = ChatCompletionStreamResponse(\n id=request_id,\n created=created_time,\n model=model_name,\n choices=[choice_data],\n )\n response_json = response.json(ensure_ascii=False)\n\n return response_json\n\n async def completion_stream_generator() -> AsyncGenerator[str, None]:\n # First chunk with role\n for i in range(request.n):\n choice_data = ChatCompletionResponseStreamChoice(\n index=i,\n delta=DeltaMessage(role='assistant'),\n finish_reason=None,\n )\n chunk = ChatCompletionStreamResponse(id=request_id,\n choices=[choice_data],\n model=model_name)\n data = chunk.json(exclude_unset=True, ensure_ascii=False)\n yield f'data: {data}\\n\\n'\n\n async for res in result_generator:\n response_json = create_stream_response_json(\n index=0,\n text=res.response,\n )\n yield f'data: {response_json}\\n\\n'\n yield 'data: [DONE]\\n\\n'\n\n # Streaming response\n if request.stream:\n background_tasks = BackgroundTasks()\n # Abort the request if the client disconnects.\n background_tasks.add_task(abort_request)\n return StreamingResponse(completion_stream_generator(),\n media_type='text/event-stream',\n background=background_tasks)\n\n # Non-streaming response\n final_res = None\n text = ''\n async for res in result_generator:\n if await raw_request.is_disconnected():\n # Abort the request if the client disconnects.\n await abort_request()\n return create_error_response(HTTPStatus.BAD_REQUEST,\n 'Client disconnected')\n final_res = res\n text += res.response\n assert final_res is not None\n choices = []\n choice_data = ChatCompletionResponseChoice(\n index=0,\n message=ChatMessage(role='assistant', content=text),\n finish_reason=final_res.finish_reason,\n )\n choices.append(choice_data)\n\n total_tokens = sum([\n final_res.history_token_len, final_res.input_token_len,\n final_res.generate_token_len\n ])\n usage = UsageInfo(\n prompt_tokens=final_res.input_token_len,\n completion_tokens=final_res.generate_token_len,\n total_tokens=total_tokens,\n )\n response = ChatCompletionResponse(\n id=request_id,\n created=created_time,\n model=model_name,\n choices=choices,\n usage=usage,\n )\n\n return response\n\n\[email protected]('/v1/embeddings')\nasync def create_embeddings(request: EmbeddingsRequest,\n raw_request: Request = None):\n \"\"\"Creates embeddings for the text.\"\"\"\n error_check_ret = await check_request(request)\n if error_check_ret is not None:\n return error_check_ret\n\n embedding = await VariableInterface.async_engine.get_embeddings(\n request.input)\n data = [{'object': 'embedding', 'embedding': embedding, 'index': 0}]\n token_num = len(embedding)\n return EmbeddingsResponse(\n data=data,\n model=request.model,\n usage=UsageInfo(\n prompt_tokens=token_num,\n total_tokens=token_num,\n completion_tokens=None,\n ),\n ).dict(exclude_none=True)\n\n\[email protected]('/generate')\nasync def generate(request: GenerateRequest, raw_request: Request = None):\n \"\"\"Generate completion for the request.\n\n The request should be a JSON object with the following fields:\n - prompt: the prompt to use for the generation.\n - instance_id: determine which instance will be called. If not specified\n with a value other than -1, using host ip directly.\n - sequence_start (bool): indicator for starting a sequence.\n - sequence_end (bool): indicator for ending a sequence\n - stream: whether to stream the results or not.\n - stop: whether to stop the session response or not.\n - request_output_len (int): output token nums\n - step (int): the offset of the k/v cache\n - top_p (float): If set to float < 1, only the smallest set of most\n probable tokens with probabilities that add up to top_p or higher\n are kept for generation.\n - top_k (int): The number of the highest probability vocabulary\n tokens to keep for top-k-filtering\n - temperature (float): to modulate the next token probability\n - repetition_penalty (float): The parameter for repetition penalty.\n 1.0 means no penalty\n - ignore_eos (bool): indicator for ignoring eos\n \"\"\"\n if request.instance_id == -1:\n instance_id = int(raw_request.client.host.replace('.', ''))\n request.instance_id = instance_id\n\n generation = VariableInterface.async_engine.generate(\n request.prompt,\n request.instance_id,\n stream_response=True, # always use stream to enable batching\n sequence_start=request.sequence_start,\n sequence_end=request.sequence_end,\n request_output_len=request.request_output_len,\n top_p=request.top_p,\n top_k=request.top_k,\n stop=request.stop,\n temperature=request.temperature,\n repetition_penalty=request.repetition_penalty,\n ignore_eos=request.ignore_eos)\n\n # Streaming case\n async def stream_results() -> AsyncGenerator[bytes, None]:\n async for out in generation:\n ret = {\n 'text': out.response,\n 'tokens': out.generate_token_len,\n 'finish_reason': out.finish_reason\n }\n yield (json.dumps(ret) + '\\0').encode('utf-8')\n\n if request.stream:\n return StreamingResponse(stream_results())\n else:\n ret = {}\n text = ''\n tokens = 0\n finish_reason = None\n async for out in generation:\n text += out.response\n tokens = out.generate_token_len\n finish_reason = out.finish_reason\n ret = {'text': text, 'tokens': tokens, 'finish_reason': finish_reason}\n return JSONResponse(ret)\n\n\ndef main(model_path: str,\n server_name: str = 'localhost',\n server_port: int = 23333,\n instance_num: int = 32,\n tp: int = 1):\n \"\"\"An example to perform model inference through the command line\n interface.\n\n Args:\n model_path (str): the path of the deployed model\n server_name (str): host ip for serving\n server_port (int): server port\n instance_num (int): number of instances of turbomind model\n tp (int): tensor parallel\n \"\"\"\n VariableInterface.async_engine = AsyncEngine(model_path=model_path,\n instance_num=instance_num,\n tp=tp)\n uvicorn.run(app=app, host=server_name, port=server_port, log_level='info')\n\n\nif __name__ == '__main__':\n fire.Fire(main)\n", "path": "lmdeploy/serve/openai/api_server.py"}]}
4,087
555
gh_patches_debug_1016
rasdani/github-patches
git_diff
scikit-hep__pyhf-2068
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docs build failing on Pygments lexter warning Hm. Something related to https://github.com/spatialaudio/nbsphinx/issues/24 is breaking the docs build. We're getting ```pytb WARNING: Pygments lexer name 'ipython3' is not known ``` for all the notebooks during the docs build and we fail on warnings. _Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/2066#issuecomment-1329937208_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow>=2.7.0', # c.f. PR #1962 7 'tensorflow-probability>=0.11.0', # c.f. PR #1657 8 ], 9 'torch': ['torch>=1.10.0'], # c.f. PR #1657 10 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.61,!=0.1.68'], # c.f. PR #1962, Issue #1501 11 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567 12 'minuit': ['iminuit>=2.7.0'], # c.f. PR #1895 13 } 14 extras_require['backends'] = sorted( 15 set( 16 extras_require['tensorflow'] 17 + extras_require['torch'] 18 + extras_require['jax'] 19 + extras_require['minuit'] 20 ) 21 ) 22 extras_require['contrib'] = sorted({'matplotlib', 'requests'}) 23 extras_require['test'] = sorted( 24 set( 25 extras_require['backends'] 26 + extras_require['xmlio'] 27 + extras_require['contrib'] 28 + extras_require['shellcomplete'] 29 + [ 30 'scikit-hep-testdata>=0.4.11', 31 'pytest>=6.0', 32 'coverage[toml]>=6.0.0', 33 'pytest-mock', 34 'requests-mock>=1.9.0', 35 'pytest-benchmark[histogram]', 36 'pytest-console-scripts', 37 'pytest-mpl', 38 'pydocstyle', 39 'papermill~=2.3.4', 40 'scrapbook~=0.5.0', 41 'jupyter', 42 'graphviz', 43 'pytest-socket>=0.2.0', # c.f. PR #1917 44 ] 45 ) 46 ) 47 extras_require['docs'] = sorted( 48 set( 49 extras_require['xmlio'] 50 + extras_require['contrib'] 51 + [ 52 'sphinx>=5.1.1', # c.f. https://github.com/scikit-hep/pyhf/pull/1926 53 'sphinxcontrib-bibtex~=2.1', 54 'sphinx-click', 55 'sphinx_rtd_theme', 56 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620 57 'ipywidgets', 58 'sphinx-issues', 59 'sphinx-copybutton>=0.3.2', 60 'sphinx-togglebutton>=0.3.0', 61 ] 62 ) 63 ) 64 extras_require['develop'] = sorted( 65 set( 66 extras_require['docs'] 67 + extras_require['test'] 68 + [ 69 'nbdime', 70 'tbump>=6.7.0', 71 'ipython', 72 'pre-commit', 73 'nox', 74 'check-manifest', 75 'codemetapy>=2.3.0', 76 'twine', 77 ] 78 ) 79 ) 80 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 81 82 83 setup( 84 extras_require=extras_require, 85 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 86 ) 87 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -58,6 +58,7 @@ 'sphinx-issues', 'sphinx-copybutton>=0.3.2', 'sphinx-togglebutton>=0.3.0', + 'ipython!=8.7.0', # c.f. https://github.com/scikit-hep/pyhf/pull/2068 ] ) )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,6 +58,7 @@\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n+ 'ipython!=8.7.0', # c.f. https://github.com/scikit-hep/pyhf/pull/2068\n ]\n )\n )\n", "issue": "docs build failing on Pygments lexter warning\nHm. Something related to https://github.com/spatialaudio/nbsphinx/issues/24 is breaking the docs build. We're getting\r\n\r\n```pytb\r\nWARNING: Pygments lexer name 'ipython3' is not known\r\n```\r\n\r\nfor all the notebooks during the docs build and we fail on warnings.\r\n\r\n_Originally posted by @matthewfeickert in https://github.com/scikit-hep/pyhf/issues/2066#issuecomment-1329937208_\r\n \n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.7.0', # c.f. PR #1962\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.61,!=0.1.68'], # c.f. PR #1962, Issue #1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.7.0'], # c.f. PR #1895\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'coverage[toml]>=6.0.0',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.3.4',\n 'scrapbook~=0.5.0',\n 'jupyter',\n 'graphviz',\n 'pytest-socket>=0.2.0', # c.f. PR #1917\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=5.1.1', # c.f. https://github.com/scikit-hep/pyhf/pull/1926\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'nox',\n 'check-manifest',\n 'codemetapy>=2.3.0',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow>=2.7.0', # c.f. PR #1962\n 'tensorflow-probability>=0.11.0', # c.f. PR #1657\n ],\n 'torch': ['torch>=1.10.0'], # c.f. PR #1657\n 'jax': ['jax>=0.2.10', 'jaxlib>=0.1.61,!=0.1.68'], # c.f. PR #1962, Issue #1501\n 'xmlio': ['uproot>=4.1.1'], # c.f. PR #1567\n 'minuit': ['iminuit>=2.7.0'], # c.f. PR #1895\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted({'matplotlib', 'requests'})\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'scikit-hep-testdata>=0.4.11',\n 'pytest>=6.0',\n 'coverage[toml]>=6.0.0',\n 'pytest-mock',\n 'requests-mock>=1.9.0',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'papermill~=2.3.4',\n 'scrapbook~=0.5.0',\n 'jupyter',\n 'graphviz',\n 'pytest-socket>=0.2.0', # c.f. PR #1917\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n extras_require['xmlio']\n + extras_require['contrib']\n + [\n 'sphinx>=5.1.1', # c.f. https://github.com/scikit-hep/pyhf/pull/1926\n 'sphinxcontrib-bibtex~=2.1',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx!=0.8.8', # c.f. https://github.com/spatialaudio/nbsphinx/issues/620\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>=0.3.2',\n 'sphinx-togglebutton>=0.3.0',\n 'ipython!=8.7.0', # c.f. https://github.com/scikit-hep/pyhf/pull/2068\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['test']\n + [\n 'nbdime',\n 'tbump>=6.7.0',\n 'ipython',\n 'pre-commit',\n 'nox',\n 'check-manifest',\n 'codemetapy>=2.3.0',\n 'twine',\n ]\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,285
105
gh_patches_debug_9948
rasdani/github-patches
git_diff
DataDog__dd-trace-py-481
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DatabaseError with ddtrace ``` Traceback (most recent call last): File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/core/handlers/base.py", line 132, in get_response response = wrapped_callback(request, *callback_args, **callback_kwargs) File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/signup/views/signup.py", line 199, in edu_school_reg token = EDUVerifyToken.objects.create_token(email=email, domain=domain, name=data['name'], plan_family=data['plan_family'], action=action, upgrade_email=upgrade_email, user_id=user_id, school_data=school_data) File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/signup/models.py", line 69, in create_token token.save() File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/signup/models.py", line 99, in save super(EDUVerifyToken, self).save(**kwargs) File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/models/base.py", line 734, in save force_update=force_update, update_fields=update_fields) File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/models/base.py", line 759, in save_base with transaction.atomic(using=using, savepoint=False): File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/transaction.py", line 150, in __enter__ if not connection.get_autocommit(): File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py", line 286, in get_autocommit self.ensure_connection() File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py", line 130, in ensure_connection self.connect() File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py", line 121, in connect self.init_connection_state() File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/mysql/base.py", line 282, in init_connection_state with self.cursor() as cursor: File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/ddtrace/contrib/django/db.py", line 35, in cursor return TracedCursor(tracer, conn, conn._datadog_original_cursor()) File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py", line 160, in cursor self.validate_thread_sharing() File "/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py", line 421, in validate_thread_sharing % (self.alias, self._thread_ident, thread.get_ident())) DatabaseError: DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias 'default' was created in thread id 40345840 and this is thread id 76693744. ``` ddtrace created a `DatabaseWrapper` in a different thread. And an exception is raised when the save called on the model. Current dependencies ``` gunicorn==19.3 MySQL-python==1.2.5 ddtrace==0.11.0 datadog==0.19.0 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/django/conf.py` Content: ``` 1 """ 2 Settings for Datadog tracer are all namespaced in the DATADOG_TRACE setting. 3 For example your project's `settings.py` file might look like this: 4 5 DATADOG_TRACE = { 6 'TRACER': 'myapp.tracer', 7 } 8 9 This module provides the `setting` object, that is used to access 10 Datadog settings, checking for user settings first, then falling 11 back to the defaults. 12 """ 13 from __future__ import unicode_literals 14 15 import os 16 import importlib 17 import logging 18 19 from django.conf import settings as django_settings 20 21 from django.test.signals import setting_changed 22 23 24 log = logging.getLogger(__name__) 25 26 # List of available settings with their defaults 27 DEFAULTS = { 28 'AGENT_HOSTNAME': 'localhost', 29 'AGENT_PORT': 8126, 30 'AUTO_INSTRUMENT': True, 31 'INSTRUMENT_CACHE': True, 32 'INSTRUMENT_DATABASE': True, 33 'INSTRUMENT_TEMPLATE': True, 34 'DEFAULT_DATABASE_PREFIX': '', 35 'DEFAULT_SERVICE': 'django', 36 'ENABLED': True, 37 'DISTRIBUTED_TRACING': False, 38 'TAGS': {}, 39 'TRACER': 'ddtrace.tracer', 40 } 41 42 # List of settings that may be in string import notation. 43 IMPORT_STRINGS = ( 44 'TRACER', 45 ) 46 47 # List of settings that have been removed 48 REMOVED_SETTINGS = () 49 50 51 def import_from_string(val, setting_name): 52 """ 53 Attempt to import a class from a string representation. 54 """ 55 try: 56 # Nod to tastypie's use of importlib. 57 parts = val.split('.') 58 module_path, class_name = '.'.join(parts[:-1]), parts[-1] 59 module = importlib.import_module(module_path) 60 return getattr(module, class_name) 61 except (ImportError, AttributeError) as e: 62 msg = 'Could not import "{}" for setting "{}". {}: {}.'.format( 63 val, setting_name, 64 e.__class__.__name__, e 65 ) 66 67 raise ImportError(msg) 68 69 70 class DatadogSettings(object): 71 """ 72 A settings object, that allows Datadog settings to be accessed as properties. 73 For example: 74 75 from ddtrace.contrib.django.conf import settings 76 77 tracer = settings.TRACER 78 79 Any setting with string import paths will be automatically resolved 80 and return the class, rather than the string literal. 81 """ 82 def __init__(self, user_settings=None, defaults=None, import_strings=None): 83 if user_settings: 84 self._user_settings = self.__check_user_settings(user_settings) 85 86 self.defaults = defaults or DEFAULTS 87 if os.environ.get('DATADOG_ENV'): 88 self.defaults['TAGS'].update({'env': os.environ.get('DATADOG_ENV')}) 89 if os.environ.get('DATADOG_SERVICE_NAME'): 90 self.defaults['DEFAULT_SERVICE'] = os.environ.get('DATADOG_SERVICE_NAME') 91 if os.environ.get('DATADOG_TRACE_AGENT_HOSTNAME'): 92 self.defaults['AGENT_HOSTNAME'] = os.environ.get('DATADOG_TRACE_AGENT_HOSTNAME') 93 if os.environ.get('DATADOG_TRACE_AGENT_PORT'): 94 # if the agent port is a string, the underlying library that creates the socket 95 # stops working 96 try: 97 port = int(os.environ.get('DATADOG_TRACE_AGENT_PORT')) 98 except ValueError: 99 log.warning('DATADOG_TRACE_AGENT_PORT is not an integer value; default to 8126') 100 else: 101 self.defaults['AGENT_PORT'] = port 102 103 self.import_strings = import_strings or IMPORT_STRINGS 104 105 @property 106 def user_settings(self): 107 if not hasattr(self, '_user_settings'): 108 self._user_settings = getattr(django_settings, 'DATADOG_TRACE', {}) 109 110 # TODO[manu]: prevents docs import errors; provide a better implementation 111 if 'ENABLED' not in self._user_settings: 112 self._user_settings['ENABLED'] = not django_settings.DEBUG 113 return self._user_settings 114 115 def __getattr__(self, attr): 116 if attr not in self.defaults: 117 raise AttributeError('Invalid setting: "{}"'.format(attr)) 118 119 try: 120 # Check if present in user settings 121 val = self.user_settings[attr] 122 except KeyError: 123 # Otherwise, fall back to defaults 124 val = self.defaults[attr] 125 126 # Coerce import strings into classes 127 if attr in self.import_strings: 128 val = import_from_string(val, attr) 129 130 # Cache the result 131 setattr(self, attr, val) 132 return val 133 134 def __check_user_settings(self, user_settings): 135 SETTINGS_DOC = 'http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.django' 136 for setting in REMOVED_SETTINGS: 137 if setting in user_settings: 138 raise RuntimeError( 139 'The "{}" setting has been removed, check "{}".'.format(setting, SETTINGS_DOC) 140 ) 141 return user_settings 142 143 144 settings = DatadogSettings(None, DEFAULTS, IMPORT_STRINGS) 145 146 147 def reload_settings(*args, **kwargs): 148 """ 149 Triggers a reload when Django emits the reloading signal 150 """ 151 global settings 152 setting, value = kwargs['setting'], kwargs['value'] 153 if setting == 'DATADOG_TRACE': 154 settings = DatadogSettings(value, DEFAULTS, IMPORT_STRINGS) 155 156 157 setting_changed.connect(reload_settings) 158 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/django/conf.py b/ddtrace/contrib/django/conf.py --- a/ddtrace/contrib/django/conf.py +++ b/ddtrace/contrib/django/conf.py @@ -18,8 +18,6 @@ from django.conf import settings as django_settings -from django.test.signals import setting_changed - log = logging.getLogger(__name__) @@ -152,6 +150,3 @@ setting, value = kwargs['setting'], kwargs['value'] if setting == 'DATADOG_TRACE': settings = DatadogSettings(value, DEFAULTS, IMPORT_STRINGS) - - -setting_changed.connect(reload_settings)
{"golden_diff": "diff --git a/ddtrace/contrib/django/conf.py b/ddtrace/contrib/django/conf.py\n--- a/ddtrace/contrib/django/conf.py\n+++ b/ddtrace/contrib/django/conf.py\n@@ -18,8 +18,6 @@\n \n from django.conf import settings as django_settings\n \n-from django.test.signals import setting_changed\n-\n \n log = logging.getLogger(__name__)\n \n@@ -152,6 +150,3 @@\n setting, value = kwargs['setting'], kwargs['value']\n if setting == 'DATADOG_TRACE':\n settings = DatadogSettings(value, DEFAULTS, IMPORT_STRINGS)\n-\n-\n-setting_changed.connect(reload_settings)\n", "issue": "DatabaseError with ddtrace\n```\r\nTraceback (most recent call last):\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/core/handlers/base.py\", line 132, in get_response\r\n response = wrapped_callback(request, *callback_args, **callback_kwargs)\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/signup/views/signup.py\", line 199, in edu_school_reg\r\n token = EDUVerifyToken.objects.create_token(email=email, domain=domain, name=data['name'], plan_family=data['plan_family'], action=action, upgrade_email=upgrade_email, user_id=user_id, school_data=school_data)\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/signup/models.py\", line 69, in create_token\r\n token.save()\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/signup/models.py\", line 99, in save\r\n super(EDUVerifyToken, self).save(**kwargs)\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/models/base.py\", line 734, in save\r\n force_update=force_update, update_fields=update_fields)\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/models/base.py\", line 759, in save_base\r\n with transaction.atomic(using=using, savepoint=False):\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/transaction.py\", line 150, in __enter__\r\n if not connection.get_autocommit():\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py\", line 286, in get_autocommit\r\n self.ensure_connection()\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py\", line 130, in ensure_connection\r\n self.connect()\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py\", line 121, in connect\r\n self.init_connection_state()\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/mysql/base.py\", line 282, in init_connection_state\r\n with self.cursor() as cursor:\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/ddtrace/contrib/django/db.py\", line 35, in cursor\r\n return TracedCursor(tracer, conn, conn._datadog_original_cursor())\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py\", line 160, in cursor\r\n self.validate_thread_sharing()\r\n File \"/opt/prezi/signup/versions/318-db1a27e562e267603c55ea13e8db46b29d715834/virtualenv/local/lib/python2.7/site-packages/django/db/backends/base/base.py\", line 421, in validate_thread_sharing\r\n % (self.alias, self._thread_ident, thread.get_ident()))\r\nDatabaseError: DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias 'default' was created in thread id 40345840 and this is thread id 76693744.\r\n```\r\n\r\nddtrace created a `DatabaseWrapper` in a different thread. And an exception is raised when the save called on the model.\r\n\r\nCurrent dependencies\r\n```\r\ngunicorn==19.3\r\nMySQL-python==1.2.5\r\nddtrace==0.11.0\r\ndatadog==0.19.0\r\n```\n", "before_files": [{"content": "\"\"\"\nSettings for Datadog tracer are all namespaced in the DATADOG_TRACE setting.\nFor example your project's `settings.py` file might look like this:\n\nDATADOG_TRACE = {\n 'TRACER': 'myapp.tracer',\n}\n\nThis module provides the `setting` object, that is used to access\nDatadog settings, checking for user settings first, then falling\nback to the defaults.\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport os\nimport importlib\nimport logging\n\nfrom django.conf import settings as django_settings\n\nfrom django.test.signals import setting_changed\n\n\nlog = logging.getLogger(__name__)\n\n# List of available settings with their defaults\nDEFAULTS = {\n 'AGENT_HOSTNAME': 'localhost',\n 'AGENT_PORT': 8126,\n 'AUTO_INSTRUMENT': True,\n 'INSTRUMENT_CACHE': True,\n 'INSTRUMENT_DATABASE': True,\n 'INSTRUMENT_TEMPLATE': True,\n 'DEFAULT_DATABASE_PREFIX': '',\n 'DEFAULT_SERVICE': 'django',\n 'ENABLED': True,\n 'DISTRIBUTED_TRACING': False,\n 'TAGS': {},\n 'TRACER': 'ddtrace.tracer',\n}\n\n# List of settings that may be in string import notation.\nIMPORT_STRINGS = (\n 'TRACER',\n)\n\n# List of settings that have been removed\nREMOVED_SETTINGS = ()\n\n\ndef import_from_string(val, setting_name):\n \"\"\"\n Attempt to import a class from a string representation.\n \"\"\"\n try:\n # Nod to tastypie's use of importlib.\n parts = val.split('.')\n module_path, class_name = '.'.join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as e:\n msg = 'Could not import \"{}\" for setting \"{}\". {}: {}.'.format(\n val, setting_name,\n e.__class__.__name__, e\n )\n\n raise ImportError(msg)\n\n\nclass DatadogSettings(object):\n \"\"\"\n A settings object, that allows Datadog settings to be accessed as properties.\n For example:\n\n from ddtrace.contrib.django.conf import settings\n\n tracer = settings.TRACER\n\n Any setting with string import paths will be automatically resolved\n and return the class, rather than the string literal.\n \"\"\"\n def __init__(self, user_settings=None, defaults=None, import_strings=None):\n if user_settings:\n self._user_settings = self.__check_user_settings(user_settings)\n\n self.defaults = defaults or DEFAULTS\n if os.environ.get('DATADOG_ENV'):\n self.defaults['TAGS'].update({'env': os.environ.get('DATADOG_ENV')})\n if os.environ.get('DATADOG_SERVICE_NAME'):\n self.defaults['DEFAULT_SERVICE'] = os.environ.get('DATADOG_SERVICE_NAME')\n if os.environ.get('DATADOG_TRACE_AGENT_HOSTNAME'):\n self.defaults['AGENT_HOSTNAME'] = os.environ.get('DATADOG_TRACE_AGENT_HOSTNAME')\n if os.environ.get('DATADOG_TRACE_AGENT_PORT'):\n # if the agent port is a string, the underlying library that creates the socket\n # stops working\n try:\n port = int(os.environ.get('DATADOG_TRACE_AGENT_PORT'))\n except ValueError:\n log.warning('DATADOG_TRACE_AGENT_PORT is not an integer value; default to 8126')\n else:\n self.defaults['AGENT_PORT'] = port\n\n self.import_strings = import_strings or IMPORT_STRINGS\n\n @property\n def user_settings(self):\n if not hasattr(self, '_user_settings'):\n self._user_settings = getattr(django_settings, 'DATADOG_TRACE', {})\n\n # TODO[manu]: prevents docs import errors; provide a better implementation\n if 'ENABLED' not in self._user_settings:\n self._user_settings['ENABLED'] = not django_settings.DEBUG\n return self._user_settings\n\n def __getattr__(self, attr):\n if attr not in self.defaults:\n raise AttributeError('Invalid setting: \"{}\"'.format(attr))\n\n try:\n # Check if present in user settings\n val = self.user_settings[attr]\n except KeyError:\n # Otherwise, fall back to defaults\n val = self.defaults[attr]\n\n # Coerce import strings into classes\n if attr in self.import_strings:\n val = import_from_string(val, attr)\n\n # Cache the result\n setattr(self, attr, val)\n return val\n\n def __check_user_settings(self, user_settings):\n SETTINGS_DOC = 'http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.django'\n for setting in REMOVED_SETTINGS:\n if setting in user_settings:\n raise RuntimeError(\n 'The \"{}\" setting has been removed, check \"{}\".'.format(setting, SETTINGS_DOC)\n )\n return user_settings\n\n\nsettings = DatadogSettings(None, DEFAULTS, IMPORT_STRINGS)\n\n\ndef reload_settings(*args, **kwargs):\n \"\"\"\n Triggers a reload when Django emits the reloading signal\n \"\"\"\n global settings\n setting, value = kwargs['setting'], kwargs['value']\n if setting == 'DATADOG_TRACE':\n settings = DatadogSettings(value, DEFAULTS, IMPORT_STRINGS)\n\n\nsetting_changed.connect(reload_settings)\n", "path": "ddtrace/contrib/django/conf.py"}], "after_files": [{"content": "\"\"\"\nSettings for Datadog tracer are all namespaced in the DATADOG_TRACE setting.\nFor example your project's `settings.py` file might look like this:\n\nDATADOG_TRACE = {\n 'TRACER': 'myapp.tracer',\n}\n\nThis module provides the `setting` object, that is used to access\nDatadog settings, checking for user settings first, then falling\nback to the defaults.\n\"\"\"\nfrom __future__ import unicode_literals\n\nimport os\nimport importlib\nimport logging\n\nfrom django.conf import settings as django_settings\n\n\nlog = logging.getLogger(__name__)\n\n# List of available settings with their defaults\nDEFAULTS = {\n 'AGENT_HOSTNAME': 'localhost',\n 'AGENT_PORT': 8126,\n 'AUTO_INSTRUMENT': True,\n 'INSTRUMENT_CACHE': True,\n 'INSTRUMENT_DATABASE': True,\n 'INSTRUMENT_TEMPLATE': True,\n 'DEFAULT_DATABASE_PREFIX': '',\n 'DEFAULT_SERVICE': 'django',\n 'ENABLED': True,\n 'DISTRIBUTED_TRACING': False,\n 'TAGS': {},\n 'TRACER': 'ddtrace.tracer',\n}\n\n# List of settings that may be in string import notation.\nIMPORT_STRINGS = (\n 'TRACER',\n)\n\n# List of settings that have been removed\nREMOVED_SETTINGS = ()\n\n\ndef import_from_string(val, setting_name):\n \"\"\"\n Attempt to import a class from a string representation.\n \"\"\"\n try:\n # Nod to tastypie's use of importlib.\n parts = val.split('.')\n module_path, class_name = '.'.join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as e:\n msg = 'Could not import \"{}\" for setting \"{}\". {}: {}.'.format(\n val, setting_name,\n e.__class__.__name__, e\n )\n\n raise ImportError(msg)\n\n\nclass DatadogSettings(object):\n \"\"\"\n A settings object, that allows Datadog settings to be accessed as properties.\n For example:\n\n from ddtrace.contrib.django.conf import settings\n\n tracer = settings.TRACER\n\n Any setting with string import paths will be automatically resolved\n and return the class, rather than the string literal.\n \"\"\"\n def __init__(self, user_settings=None, defaults=None, import_strings=None):\n if user_settings:\n self._user_settings = self.__check_user_settings(user_settings)\n\n self.defaults = defaults or DEFAULTS\n if os.environ.get('DATADOG_ENV'):\n self.defaults['TAGS'].update({'env': os.environ.get('DATADOG_ENV')})\n if os.environ.get('DATADOG_SERVICE_NAME'):\n self.defaults['DEFAULT_SERVICE'] = os.environ.get('DATADOG_SERVICE_NAME')\n if os.environ.get('DATADOG_TRACE_AGENT_HOSTNAME'):\n self.defaults['AGENT_HOSTNAME'] = os.environ.get('DATADOG_TRACE_AGENT_HOSTNAME')\n if os.environ.get('DATADOG_TRACE_AGENT_PORT'):\n # if the agent port is a string, the underlying library that creates the socket\n # stops working\n try:\n port = int(os.environ.get('DATADOG_TRACE_AGENT_PORT'))\n except ValueError:\n log.warning('DATADOG_TRACE_AGENT_PORT is not an integer value; default to 8126')\n else:\n self.defaults['AGENT_PORT'] = port\n\n self.import_strings = import_strings or IMPORT_STRINGS\n\n @property\n def user_settings(self):\n if not hasattr(self, '_user_settings'):\n self._user_settings = getattr(django_settings, 'DATADOG_TRACE', {})\n\n # TODO[manu]: prevents docs import errors; provide a better implementation\n if 'ENABLED' not in self._user_settings:\n self._user_settings['ENABLED'] = not django_settings.DEBUG\n return self._user_settings\n\n def __getattr__(self, attr):\n if attr not in self.defaults:\n raise AttributeError('Invalid setting: \"{}\"'.format(attr))\n\n try:\n # Check if present in user settings\n val = self.user_settings[attr]\n except KeyError:\n # Otherwise, fall back to defaults\n val = self.defaults[attr]\n\n # Coerce import strings into classes\n if attr in self.import_strings:\n val = import_from_string(val, attr)\n\n # Cache the result\n setattr(self, attr, val)\n return val\n\n def __check_user_settings(self, user_settings):\n SETTINGS_DOC = 'http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.django'\n for setting in REMOVED_SETTINGS:\n if setting in user_settings:\n raise RuntimeError(\n 'The \"{}\" setting has been removed, check \"{}\".'.format(setting, SETTINGS_DOC)\n )\n return user_settings\n\n\nsettings = DatadogSettings(None, DEFAULTS, IMPORT_STRINGS)\n\n\ndef reload_settings(*args, **kwargs):\n \"\"\"\n Triggers a reload when Django emits the reloading signal\n \"\"\"\n global settings\n setting, value = kwargs['setting'], kwargs['value']\n if setting == 'DATADOG_TRACE':\n settings = DatadogSettings(value, DEFAULTS, IMPORT_STRINGS)\n", "path": "ddtrace/contrib/django/conf.py"}]}
3,176
146
gh_patches_debug_22693
rasdani/github-patches
git_diff
googleapis__google-cloud-python-1440
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pubsub.list_topics fails when there are no topics [Offending line](https://github.com/GoogleCloudPlatform/gcloud-python/blob/0910f9979a45af8cc2826dd4c6ff38d9efa5ccec/gcloud/pubsub/client.py#L80). Reproduce via: ``` python client = pubsub.Client() >>> client.list_topics() Traceback (most recent call last): File "<stdin>", line 1, in <module> File "gcloud/pubsub/client.py", line 80, in list_topics for resource in resp['topics']] KeyError: 'topics' ``` @tseaver ISTM we should locate all instances where we assume a key is present and just protect against this. The time between releases behooves us to be "protective" of users. (I realize that we've usually done it this way based on documented outputs.) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gcloud/pubsub/client.py` Content: ``` 1 # Copyright 2015 Google Inc. All rights reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Client for interacting with the Google Cloud Pub/Sub API.""" 16 17 18 from gcloud.client import JSONClient 19 from gcloud.pubsub.connection import Connection 20 from gcloud.pubsub.subscription import Subscription 21 from gcloud.pubsub.topic import Topic 22 23 24 class Client(JSONClient): 25 """Client to bundle configuration needed for API requests. 26 27 :type project: string 28 :param project: the project which the client acts on behalf of. Will be 29 passed when creating a topic. If not passed, 30 falls back to the default inferred from the environment. 31 32 :type credentials: :class:`oauth2client.client.OAuth2Credentials` or 33 :class:`NoneType` 34 :param credentials: The OAuth2 Credentials to use for the connection 35 owned by this client. If not passed (and if no ``http`` 36 object is passed), falls back to the default inferred 37 from the environment. 38 39 :type http: :class:`httplib2.Http` or class that defines ``request()``. 40 :param http: An optional HTTP object to make requests. If not passed, an 41 ``http`` object is created that is bound to the 42 ``credentials`` for the current object. 43 """ 44 45 _connection_class = Connection 46 47 def list_topics(self, page_size=None, page_token=None): 48 """List topics for the project associated with this client. 49 50 See: 51 https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list 52 53 :type page_size: int 54 :param page_size: maximum number of topics to return, If not passed, 55 defaults to a value set by the API. 56 57 :type page_token: string 58 :param page_token: opaque marker for the next "page" of topics. If not 59 passed, the API will return the first page of 60 topics. 61 62 :rtype: tuple, (list, str) 63 :returns: list of :class:`gcloud.pubsub.topic.Topic`, plus a 64 "next page token" string: if not None, indicates that 65 more topics can be retrieved with another call (pass that 66 value as ``page_token``). 67 """ 68 params = {} 69 70 if page_size is not None: 71 params['pageSize'] = page_size 72 73 if page_token is not None: 74 params['pageToken'] = page_token 75 76 path = '/projects/%s/topics' % (self.project,) 77 resp = self.connection.api_request(method='GET', path=path, 78 query_params=params) 79 topics = [Topic.from_api_repr(resource, self) 80 for resource in resp['topics']] 81 return topics, resp.get('nextPageToken') 82 83 def list_subscriptions(self, page_size=None, page_token=None, 84 topic_name=None): 85 """List subscriptions for the project associated with this client. 86 87 See: 88 https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list 89 90 and (where ``topic_name`` is passed): 91 https://cloud.google.com/pubsub/reference/rest/v1/projects.topics.subscriptions/list 92 93 :type page_size: int 94 :param page_size: maximum number of topics to return, If not passed, 95 defaults to a value set by the API. 96 97 :type page_token: string 98 :param page_token: opaque marker for the next "page" of topics. If not 99 passed, the API will return the first page of 100 topics. 101 102 :type topic_name: string 103 :param topic_name: limit results to subscriptions bound to the given 104 topic. 105 106 :rtype: tuple, (list, str) 107 :returns: list of :class:`gcloud.pubsub.subscription.Subscription`, 108 plus a "next page token" string: if not None, indicates that 109 more topics can be retrieved with another call (pass that 110 value as ``page_token``). 111 """ 112 params = {} 113 114 if page_size is not None: 115 params['pageSize'] = page_size 116 117 if page_token is not None: 118 params['pageToken'] = page_token 119 120 if topic_name is None: 121 path = '/projects/%s/subscriptions' % (self.project,) 122 else: 123 path = '/projects/%s/topics/%s/subscriptions' % (self.project, 124 topic_name) 125 126 resp = self.connection.api_request(method='GET', path=path, 127 query_params=params) 128 topics = {} 129 subscriptions = [Subscription.from_api_repr(resource, self, 130 topics=topics) 131 for resource in resp['subscriptions']] 132 return subscriptions, resp.get('nextPageToken') 133 134 def topic(self, name, timestamp_messages=False): 135 """Creates a topic bound to the current client. 136 137 :type name: string 138 :param name: the name of the topic to be constructed. 139 140 :type timestamp_messages: boolean 141 :param timestamp_messages: To be passed to ``Topic`` constructor. 142 143 :rtype: :class:`gcloud.pubsub.topic.Topic` 144 :returns: Topic created with the current client. 145 """ 146 return Topic(name, client=self, timestamp_messages=timestamp_messages) 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gcloud/pubsub/client.py b/gcloud/pubsub/client.py --- a/gcloud/pubsub/client.py +++ b/gcloud/pubsub/client.py @@ -77,7 +77,7 @@ resp = self.connection.api_request(method='GET', path=path, query_params=params) topics = [Topic.from_api_repr(resource, self) - for resource in resp['topics']] + for resource in resp.get('topics', ())] return topics, resp.get('nextPageToken') def list_subscriptions(self, page_size=None, page_token=None, @@ -128,7 +128,7 @@ topics = {} subscriptions = [Subscription.from_api_repr(resource, self, topics=topics) - for resource in resp['subscriptions']] + for resource in resp.get('subscriptions', ())] return subscriptions, resp.get('nextPageToken') def topic(self, name, timestamp_messages=False):
{"golden_diff": "diff --git a/gcloud/pubsub/client.py b/gcloud/pubsub/client.py\n--- a/gcloud/pubsub/client.py\n+++ b/gcloud/pubsub/client.py\n@@ -77,7 +77,7 @@\n resp = self.connection.api_request(method='GET', path=path,\n query_params=params)\n topics = [Topic.from_api_repr(resource, self)\n- for resource in resp['topics']]\n+ for resource in resp.get('topics', ())]\n return topics, resp.get('nextPageToken')\n \n def list_subscriptions(self, page_size=None, page_token=None,\n@@ -128,7 +128,7 @@\n topics = {}\n subscriptions = [Subscription.from_api_repr(resource, self,\n topics=topics)\n- for resource in resp['subscriptions']]\n+ for resource in resp.get('subscriptions', ())]\n return subscriptions, resp.get('nextPageToken')\n \n def topic(self, name, timestamp_messages=False):\n", "issue": "Pubsub.list_topics fails when there are no topics\n[Offending line](https://github.com/GoogleCloudPlatform/gcloud-python/blob/0910f9979a45af8cc2826dd4c6ff38d9efa5ccec/gcloud/pubsub/client.py#L80). Reproduce via:\n\n``` python\nclient = pubsub.Client()\n>>> client.list_topics()\nTraceback (most recent call last):\n File \"<stdin>\", line 1, in <module>\n File \"gcloud/pubsub/client.py\", line 80, in list_topics\n for resource in resp['topics']]\nKeyError: 'topics'\n```\n\n@tseaver ISTM we should locate all instances where we assume a key is present and just protect against this. The time between releases behooves us to be \"protective\" of users. (I realize that we've usually done it this way based on documented outputs.)\n\n", "before_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Client for interacting with the Google Cloud Pub/Sub API.\"\"\"\n\n\nfrom gcloud.client import JSONClient\nfrom gcloud.pubsub.connection import Connection\nfrom gcloud.pubsub.subscription import Subscription\nfrom gcloud.pubsub.topic import Topic\n\n\nclass Client(JSONClient):\n \"\"\"Client to bundle configuration needed for API requests.\n\n :type project: string\n :param project: the project which the client acts on behalf of. Will be\n passed when creating a topic. If not passed,\n falls back to the default inferred from the environment.\n\n :type credentials: :class:`oauth2client.client.OAuth2Credentials` or\n :class:`NoneType`\n :param credentials: The OAuth2 Credentials to use for the connection\n owned by this client. If not passed (and if no ``http``\n object is passed), falls back to the default inferred\n from the environment.\n\n :type http: :class:`httplib2.Http` or class that defines ``request()``.\n :param http: An optional HTTP object to make requests. If not passed, an\n ``http`` object is created that is bound to the\n ``credentials`` for the current object.\n \"\"\"\n\n _connection_class = Connection\n\n def list_topics(self, page_size=None, page_token=None):\n \"\"\"List topics for the project associated with this client.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list\n\n :type page_size: int\n :param page_size: maximum number of topics to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: string\n :param page_token: opaque marker for the next \"page\" of topics. If not\n passed, the API will return the first page of\n topics.\n\n :rtype: tuple, (list, str)\n :returns: list of :class:`gcloud.pubsub.topic.Topic`, plus a\n \"next page token\" string: if not None, indicates that\n more topics can be retrieved with another call (pass that\n value as ``page_token``).\n \"\"\"\n params = {}\n\n if page_size is not None:\n params['pageSize'] = page_size\n\n if page_token is not None:\n params['pageToken'] = page_token\n\n path = '/projects/%s/topics' % (self.project,)\n resp = self.connection.api_request(method='GET', path=path,\n query_params=params)\n topics = [Topic.from_api_repr(resource, self)\n for resource in resp['topics']]\n return topics, resp.get('nextPageToken')\n\n def list_subscriptions(self, page_size=None, page_token=None,\n topic_name=None):\n \"\"\"List subscriptions for the project associated with this client.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list\n\n and (where ``topic_name`` is passed):\n https://cloud.google.com/pubsub/reference/rest/v1/projects.topics.subscriptions/list\n\n :type page_size: int\n :param page_size: maximum number of topics to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: string\n :param page_token: opaque marker for the next \"page\" of topics. If not\n passed, the API will return the first page of\n topics.\n\n :type topic_name: string\n :param topic_name: limit results to subscriptions bound to the given\n topic.\n\n :rtype: tuple, (list, str)\n :returns: list of :class:`gcloud.pubsub.subscription.Subscription`,\n plus a \"next page token\" string: if not None, indicates that\n more topics can be retrieved with another call (pass that\n value as ``page_token``).\n \"\"\"\n params = {}\n\n if page_size is not None:\n params['pageSize'] = page_size\n\n if page_token is not None:\n params['pageToken'] = page_token\n\n if topic_name is None:\n path = '/projects/%s/subscriptions' % (self.project,)\n else:\n path = '/projects/%s/topics/%s/subscriptions' % (self.project,\n topic_name)\n\n resp = self.connection.api_request(method='GET', path=path,\n query_params=params)\n topics = {}\n subscriptions = [Subscription.from_api_repr(resource, self,\n topics=topics)\n for resource in resp['subscriptions']]\n return subscriptions, resp.get('nextPageToken')\n\n def topic(self, name, timestamp_messages=False):\n \"\"\"Creates a topic bound to the current client.\n\n :type name: string\n :param name: the name of the topic to be constructed.\n\n :type timestamp_messages: boolean\n :param timestamp_messages: To be passed to ``Topic`` constructor.\n\n :rtype: :class:`gcloud.pubsub.topic.Topic`\n :returns: Topic created with the current client.\n \"\"\"\n return Topic(name, client=self, timestamp_messages=timestamp_messages)\n", "path": "gcloud/pubsub/client.py"}], "after_files": [{"content": "# Copyright 2015 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Client for interacting with the Google Cloud Pub/Sub API.\"\"\"\n\n\nfrom gcloud.client import JSONClient\nfrom gcloud.pubsub.connection import Connection\nfrom gcloud.pubsub.subscription import Subscription\nfrom gcloud.pubsub.topic import Topic\n\n\nclass Client(JSONClient):\n \"\"\"Client to bundle configuration needed for API requests.\n\n :type project: string\n :param project: the project which the client acts on behalf of. Will be\n passed when creating a topic. If not passed,\n falls back to the default inferred from the environment.\n\n :type credentials: :class:`oauth2client.client.OAuth2Credentials` or\n :class:`NoneType`\n :param credentials: The OAuth2 Credentials to use for the connection\n owned by this client. If not passed (and if no ``http``\n object is passed), falls back to the default inferred\n from the environment.\n\n :type http: :class:`httplib2.Http` or class that defines ``request()``.\n :param http: An optional HTTP object to make requests. If not passed, an\n ``http`` object is created that is bound to the\n ``credentials`` for the current object.\n \"\"\"\n\n _connection_class = Connection\n\n def list_topics(self, page_size=None, page_token=None):\n \"\"\"List topics for the project associated with this client.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list\n\n :type page_size: int\n :param page_size: maximum number of topics to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: string\n :param page_token: opaque marker for the next \"page\" of topics. If not\n passed, the API will return the first page of\n topics.\n\n :rtype: tuple, (list, str)\n :returns: list of :class:`gcloud.pubsub.topic.Topic`, plus a\n \"next page token\" string: if not None, indicates that\n more topics can be retrieved with another call (pass that\n value as ``page_token``).\n \"\"\"\n params = {}\n\n if page_size is not None:\n params['pageSize'] = page_size\n\n if page_token is not None:\n params['pageToken'] = page_token\n\n path = '/projects/%s/topics' % (self.project,)\n resp = self.connection.api_request(method='GET', path=path,\n query_params=params)\n topics = [Topic.from_api_repr(resource, self)\n for resource in resp.get('topics', ())]\n return topics, resp.get('nextPageToken')\n\n def list_subscriptions(self, page_size=None, page_token=None,\n topic_name=None):\n \"\"\"List subscriptions for the project associated with this client.\n\n See:\n https://cloud.google.com/pubsub/reference/rest/v1/projects.topics/list\n\n and (where ``topic_name`` is passed):\n https://cloud.google.com/pubsub/reference/rest/v1/projects.topics.subscriptions/list\n\n :type page_size: int\n :param page_size: maximum number of topics to return, If not passed,\n defaults to a value set by the API.\n\n :type page_token: string\n :param page_token: opaque marker for the next \"page\" of topics. If not\n passed, the API will return the first page of\n topics.\n\n :type topic_name: string\n :param topic_name: limit results to subscriptions bound to the given\n topic.\n\n :rtype: tuple, (list, str)\n :returns: list of :class:`gcloud.pubsub.subscription.Subscription`,\n plus a \"next page token\" string: if not None, indicates that\n more topics can be retrieved with another call (pass that\n value as ``page_token``).\n \"\"\"\n params = {}\n\n if page_size is not None:\n params['pageSize'] = page_size\n\n if page_token is not None:\n params['pageToken'] = page_token\n\n if topic_name is None:\n path = '/projects/%s/subscriptions' % (self.project,)\n else:\n path = '/projects/%s/topics/%s/subscriptions' % (self.project,\n topic_name)\n\n resp = self.connection.api_request(method='GET', path=path,\n query_params=params)\n topics = {}\n subscriptions = [Subscription.from_api_repr(resource, self,\n topics=topics)\n for resource in resp.get('subscriptions', ())]\n return subscriptions, resp.get('nextPageToken')\n\n def topic(self, name, timestamp_messages=False):\n \"\"\"Creates a topic bound to the current client.\n\n :type name: string\n :param name: the name of the topic to be constructed.\n\n :type timestamp_messages: boolean\n :param timestamp_messages: To be passed to ``Topic`` constructor.\n\n :rtype: :class:`gcloud.pubsub.topic.Topic`\n :returns: Topic created with the current client.\n \"\"\"\n return Topic(name, client=self, timestamp_messages=timestamp_messages)\n", "path": "gcloud/pubsub/client.py"}]}
2,022
206
gh_patches_debug_22083
rasdani/github-patches
git_diff
crytic__slither-414
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spelling mistake in detector output The `uninitialized-local` and `uninitialized-storage` detectors each have a minor spelling mistake in their output. The word *initiali~a~zed* is misspelled. **Current output** ```(tests/uninitialized_local_variable.sol#4) is a local variable never initialiazed``` ```(tests/uninitialized_storage_pointer.sol#10) is a storage variable never initialiazed``` **Expected output** ```(tests/uninitialized_local_variable.sol#4) is a local variable never initialized``` ```(tests/uninitialized_storage_pointer.sol#10) is a storage variable never initialized``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slither/detectors/variables/uninitialized_storage_variables.py` Content: ``` 1 """ 2 Module detecting uninitialized storage variables 3 4 Recursively explore the CFG to only report uninitialized storage variables that are 5 written before being read 6 """ 7 8 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification 9 10 11 class UninitializedStorageVars(AbstractDetector): 12 """ 13 """ 14 15 ARGUMENT = 'uninitialized-storage' 16 HELP = 'Uninitialized storage variables' 17 IMPACT = DetectorClassification.HIGH 18 CONFIDENCE = DetectorClassification.HIGH 19 20 WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#uninitialized-storage-variables' 21 22 WIKI_TITLE = 'Uninitialized storage variables' 23 WIKI_DESCRIPTION = 'An uinitialized storage variable will act as a reference to the first state variable, and can override a critical variable.' 24 WIKI_EXPLOIT_SCENARIO = ''' 25 ```solidity 26 contract Uninitialized{ 27 address owner = msg.sender; 28 29 struct St{ 30 uint a; 31 } 32 33 function func() { 34 St st; 35 st.a = 0x0; 36 } 37 } 38 ``` 39 Bob calls `func`. As a result, `owner` is override to 0. 40 ''' 41 42 WIKI_RECOMMENDATION = 'Initialize all the storage variables.' 43 44 # node.context[self.key] contains the uninitialized storage variables 45 key = "UNINITIALIZEDSTORAGE" 46 47 def _detect_uninitialized(self, function, node, visited): 48 if node in visited: 49 return 50 51 visited = visited + [node] 52 53 fathers_context = [] 54 55 for father in node.fathers: 56 if self.key in father.context: 57 fathers_context += father.context[self.key] 58 59 # Exclude paths that dont bring further information 60 if node in self.visited_all_paths: 61 if all(f_c in self.visited_all_paths[node] for f_c in fathers_context): 62 return 63 else: 64 self.visited_all_paths[node] = [] 65 66 self.visited_all_paths[node] = list(set(self.visited_all_paths[node] + fathers_context)) 67 68 if self.key in node.context: 69 fathers_context += node.context[self.key] 70 71 variables_read = node.variables_read 72 for uninitialized_storage_variable in fathers_context: 73 if uninitialized_storage_variable in variables_read: 74 self.results.append((function, uninitialized_storage_variable)) 75 76 # Only save the storage variables that are not yet written 77 uninitialized_storage_variables = list(set(fathers_context) - set(node.variables_written)) 78 node.context[self.key] = uninitialized_storage_variables 79 80 for son in node.sons: 81 self._detect_uninitialized(function, son, visited) 82 83 84 def _detect(self): 85 """ Detect uninitialized storage variables 86 87 Recursively visit the calls 88 Returns: 89 dict: [contract name] = set(storage variable uninitialized) 90 """ 91 results = [] 92 93 self.results = [] 94 self.visited_all_paths = {} 95 96 for contract in self.slither.contracts: 97 for function in contract.functions: 98 if function.is_implemented: 99 uninitialized_storage_variables = [v for v in function.local_variables if v.is_storage and v.uninitialized] 100 function.entry_point.context[self.key] = uninitialized_storage_variables 101 self._detect_uninitialized(function, function.entry_point, []) 102 103 for(function, uninitialized_storage_variable) in self.results: 104 info = [uninitialized_storage_variable, " is a storage variable never initialiazed\n"] 105 json = self.generate_result(info) 106 results.append(json) 107 108 return results 109 ``` Path: `slither/detectors/variables/uninitialized_local_variables.py` Content: ``` 1 """ 2 Module detecting uninitialized local variables 3 4 Recursively explore the CFG to only report uninitialized local variables that are 5 read before being written 6 """ 7 8 from slither.detectors.abstract_detector import AbstractDetector, DetectorClassification 9 10 11 class UninitializedLocalVars(AbstractDetector): 12 """ 13 """ 14 15 ARGUMENT = 'uninitialized-local' 16 HELP = 'Uninitialized local variables' 17 IMPACT = DetectorClassification.MEDIUM 18 CONFIDENCE = DetectorClassification.MEDIUM 19 20 WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#uninitialized-local-variables' 21 22 23 WIKI_TITLE = 'Uninitialized local variables' 24 WIKI_DESCRIPTION = 'Uninitialized local variables.' 25 WIKI_EXPLOIT_SCENARIO = ''' 26 ```solidity 27 contract Uninitialized is Owner{ 28 function withdraw() payable public onlyOwner{ 29 address to; 30 to.transfer(this.balance) 31 } 32 } 33 ``` 34 Bob calls `transfer`. As a result, the ethers are sent to the address 0x0 and are lost.''' 35 36 WIKI_RECOMMENDATION = 'Initialize all the variables. If a variable is meant to be initialized to zero, explicitly set it to zero.' 37 38 key = "UNINITIALIZEDLOCAL" 39 40 def _detect_uninitialized(self, function, node, visited): 41 if node in visited: 42 return 43 44 visited = visited + [node] 45 46 fathers_context = [] 47 48 for father in node.fathers: 49 if self.key in father.context: 50 fathers_context += father.context[self.key] 51 52 # Exclude path that dont bring further information 53 if node in self.visited_all_paths: 54 if all(f_c in self.visited_all_paths[node] for f_c in fathers_context): 55 return 56 else: 57 self.visited_all_paths[node] = [] 58 59 self.visited_all_paths[node] = list(set(self.visited_all_paths[node] + fathers_context)) 60 61 if self.key in node.context: 62 fathers_context += node.context[self.key] 63 64 variables_read = node.variables_read 65 for uninitialized_local_variable in fathers_context: 66 if uninitialized_local_variable in variables_read: 67 self.results.append((function, uninitialized_local_variable)) 68 69 # Only save the local variables that are not yet written 70 uninitialized_local_variables = list(set(fathers_context) - set(node.variables_written)) 71 node.context[self.key] = uninitialized_local_variables 72 73 for son in node.sons: 74 self._detect_uninitialized(function, son, visited) 75 76 77 def _detect(self): 78 """ Detect uninitialized local variables 79 80 Recursively visit the calls 81 Returns: 82 dict: [contract name] = set(local variable uninitialized) 83 """ 84 results = [] 85 86 self.results = [] 87 self.visited_all_paths = {} 88 89 for contract in self.slither.contracts: 90 for function in contract.functions: 91 if function.is_implemented and function.contract_declarer == contract: 92 if function.contains_assembly: 93 continue 94 # dont consider storage variable, as they are detected by another detector 95 uninitialized_local_variables = [v for v in function.local_variables if not v.is_storage and v.uninitialized] 96 function.entry_point.context[self.key] = uninitialized_local_variables 97 self._detect_uninitialized(function, function.entry_point, []) 98 all_results = list(set(self.results)) 99 for(function, uninitialized_local_variable) in all_results: 100 101 info = [uninitialized_local_variable, " is a local variable never initialiazed\n"] 102 json = self.generate_result(info) 103 results.append(json) 104 105 return results 106 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slither/detectors/variables/uninitialized_local_variables.py b/slither/detectors/variables/uninitialized_local_variables.py --- a/slither/detectors/variables/uninitialized_local_variables.py +++ b/slither/detectors/variables/uninitialized_local_variables.py @@ -98,7 +98,7 @@ all_results = list(set(self.results)) for(function, uninitialized_local_variable) in all_results: - info = [uninitialized_local_variable, " is a local variable never initialiazed\n"] + info = [uninitialized_local_variable, " is a local variable never initialized\n"] json = self.generate_result(info) results.append(json) diff --git a/slither/detectors/variables/uninitialized_storage_variables.py b/slither/detectors/variables/uninitialized_storage_variables.py --- a/slither/detectors/variables/uninitialized_storage_variables.py +++ b/slither/detectors/variables/uninitialized_storage_variables.py @@ -101,7 +101,7 @@ self._detect_uninitialized(function, function.entry_point, []) for(function, uninitialized_storage_variable) in self.results: - info = [uninitialized_storage_variable, " is a storage variable never initialiazed\n"] + info = [uninitialized_storage_variable, " is a storage variable never initialized\n"] json = self.generate_result(info) results.append(json)
{"golden_diff": "diff --git a/slither/detectors/variables/uninitialized_local_variables.py b/slither/detectors/variables/uninitialized_local_variables.py\n--- a/slither/detectors/variables/uninitialized_local_variables.py\n+++ b/slither/detectors/variables/uninitialized_local_variables.py\n@@ -98,7 +98,7 @@\n all_results = list(set(self.results))\n for(function, uninitialized_local_variable) in all_results:\n \n- info = [uninitialized_local_variable, \" is a local variable never initialiazed\\n\"]\n+ info = [uninitialized_local_variable, \" is a local variable never initialized\\n\"]\n json = self.generate_result(info)\n results.append(json)\n \ndiff --git a/slither/detectors/variables/uninitialized_storage_variables.py b/slither/detectors/variables/uninitialized_storage_variables.py\n--- a/slither/detectors/variables/uninitialized_storage_variables.py\n+++ b/slither/detectors/variables/uninitialized_storage_variables.py\n@@ -101,7 +101,7 @@\n self._detect_uninitialized(function, function.entry_point, [])\n \n for(function, uninitialized_storage_variable) in self.results:\n- info = [uninitialized_storage_variable, \" is a storage variable never initialiazed\\n\"]\n+ info = [uninitialized_storage_variable, \" is a storage variable never initialized\\n\"]\n json = self.generate_result(info)\n results.append(json)\n", "issue": "Spelling mistake in detector output\nThe `uninitialized-local` and `uninitialized-storage` detectors each have a minor spelling mistake in their output. The word *initiali~a~zed* is misspelled.\r\n\r\n**Current output**\r\n\r\n```(tests/uninitialized_local_variable.sol#4) is a local variable never initialiazed```\r\n```(tests/uninitialized_storage_pointer.sol#10) is a storage variable never initialiazed```\r\n\r\n**Expected output**\r\n\r\n```(tests/uninitialized_local_variable.sol#4) is a local variable never initialized```\r\n```(tests/uninitialized_storage_pointer.sol#10) is a storage variable never initialized```\n", "before_files": [{"content": "\"\"\"\n Module detecting uninitialized storage variables\n\n Recursively explore the CFG to only report uninitialized storage variables that are\n written before being read\n\"\"\"\n\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass UninitializedStorageVars(AbstractDetector):\n \"\"\"\n \"\"\"\n\n ARGUMENT = 'uninitialized-storage'\n HELP = 'Uninitialized storage variables'\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#uninitialized-storage-variables'\n\n WIKI_TITLE = 'Uninitialized storage variables'\n WIKI_DESCRIPTION = 'An uinitialized storage variable will act as a reference to the first state variable, and can override a critical variable.'\n WIKI_EXPLOIT_SCENARIO = '''\n```solidity\ncontract Uninitialized{\n address owner = msg.sender;\n\n struct St{\n uint a;\n }\n\n function func() {\n St st;\n st.a = 0x0;\n }\n}\n```\nBob calls `func`. As a result, `owner` is override to 0.\n'''\n\n WIKI_RECOMMENDATION = 'Initialize all the storage variables.'\n\n # node.context[self.key] contains the uninitialized storage variables\n key = \"UNINITIALIZEDSTORAGE\"\n\n def _detect_uninitialized(self, function, node, visited):\n if node in visited:\n return\n\n visited = visited + [node]\n\n fathers_context = []\n\n for father in node.fathers:\n if self.key in father.context:\n fathers_context += father.context[self.key]\n\n # Exclude paths that dont bring further information\n if node in self.visited_all_paths:\n if all(f_c in self.visited_all_paths[node] for f_c in fathers_context):\n return\n else:\n self.visited_all_paths[node] = []\n\n self.visited_all_paths[node] = list(set(self.visited_all_paths[node] + fathers_context))\n\n if self.key in node.context:\n fathers_context += node.context[self.key]\n\n variables_read = node.variables_read\n for uninitialized_storage_variable in fathers_context:\n if uninitialized_storage_variable in variables_read:\n self.results.append((function, uninitialized_storage_variable))\n\n # Only save the storage variables that are not yet written\n uninitialized_storage_variables = list(set(fathers_context) - set(node.variables_written))\n node.context[self.key] = uninitialized_storage_variables\n\n for son in node.sons:\n self._detect_uninitialized(function, son, visited)\n\n\n def _detect(self):\n \"\"\" Detect uninitialized storage variables\n\n Recursively visit the calls\n Returns:\n dict: [contract name] = set(storage variable uninitialized)\n \"\"\"\n results = []\n\n self.results = []\n self.visited_all_paths = {}\n\n for contract in self.slither.contracts:\n for function in contract.functions:\n if function.is_implemented:\n uninitialized_storage_variables = [v for v in function.local_variables if v.is_storage and v.uninitialized]\n function.entry_point.context[self.key] = uninitialized_storage_variables\n self._detect_uninitialized(function, function.entry_point, [])\n\n for(function, uninitialized_storage_variable) in self.results:\n info = [uninitialized_storage_variable, \" is a storage variable never initialiazed\\n\"]\n json = self.generate_result(info)\n results.append(json)\n\n return results\n", "path": "slither/detectors/variables/uninitialized_storage_variables.py"}, {"content": "\"\"\"\n Module detecting uninitialized local variables\n\n Recursively explore the CFG to only report uninitialized local variables that are\n read before being written\n\"\"\"\n\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass UninitializedLocalVars(AbstractDetector):\n \"\"\"\n \"\"\"\n\n ARGUMENT = 'uninitialized-local'\n HELP = 'Uninitialized local variables'\n IMPACT = DetectorClassification.MEDIUM\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#uninitialized-local-variables'\n\n\n WIKI_TITLE = 'Uninitialized local variables'\n WIKI_DESCRIPTION = 'Uninitialized local variables.'\n WIKI_EXPLOIT_SCENARIO = '''\n```solidity\ncontract Uninitialized is Owner{\n function withdraw() payable public onlyOwner{\n address to;\n to.transfer(this.balance)\n }\n}\n```\nBob calls `transfer`. As a result, the ethers are sent to the address 0x0 and are lost.'''\n\n WIKI_RECOMMENDATION = 'Initialize all the variables. If a variable is meant to be initialized to zero, explicitly set it to zero.'\n\n key = \"UNINITIALIZEDLOCAL\"\n\n def _detect_uninitialized(self, function, node, visited):\n if node in visited:\n return\n\n visited = visited + [node]\n\n fathers_context = []\n\n for father in node.fathers:\n if self.key in father.context:\n fathers_context += father.context[self.key]\n\n # Exclude path that dont bring further information\n if node in self.visited_all_paths:\n if all(f_c in self.visited_all_paths[node] for f_c in fathers_context):\n return\n else:\n self.visited_all_paths[node] = []\n\n self.visited_all_paths[node] = list(set(self.visited_all_paths[node] + fathers_context))\n\n if self.key in node.context:\n fathers_context += node.context[self.key]\n\n variables_read = node.variables_read\n for uninitialized_local_variable in fathers_context:\n if uninitialized_local_variable in variables_read:\n self.results.append((function, uninitialized_local_variable))\n\n # Only save the local variables that are not yet written\n uninitialized_local_variables = list(set(fathers_context) - set(node.variables_written))\n node.context[self.key] = uninitialized_local_variables\n\n for son in node.sons:\n self._detect_uninitialized(function, son, visited)\n\n\n def _detect(self):\n \"\"\" Detect uninitialized local variables\n\n Recursively visit the calls\n Returns:\n dict: [contract name] = set(local variable uninitialized)\n \"\"\"\n results = []\n\n self.results = []\n self.visited_all_paths = {}\n\n for contract in self.slither.contracts:\n for function in contract.functions:\n if function.is_implemented and function.contract_declarer == contract:\n if function.contains_assembly:\n continue\n # dont consider storage variable, as they are detected by another detector\n uninitialized_local_variables = [v for v in function.local_variables if not v.is_storage and v.uninitialized]\n function.entry_point.context[self.key] = uninitialized_local_variables\n self._detect_uninitialized(function, function.entry_point, [])\n all_results = list(set(self.results))\n for(function, uninitialized_local_variable) in all_results:\n\n info = [uninitialized_local_variable, \" is a local variable never initialiazed\\n\"]\n json = self.generate_result(info)\n results.append(json)\n\n return results\n", "path": "slither/detectors/variables/uninitialized_local_variables.py"}], "after_files": [{"content": "\"\"\"\n Module detecting uninitialized storage variables\n\n Recursively explore the CFG to only report uninitialized storage variables that are\n written before being read\n\"\"\"\n\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass UninitializedStorageVars(AbstractDetector):\n \"\"\"\n \"\"\"\n\n ARGUMENT = 'uninitialized-storage'\n HELP = 'Uninitialized storage variables'\n IMPACT = DetectorClassification.HIGH\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#uninitialized-storage-variables'\n\n WIKI_TITLE = 'Uninitialized storage variables'\n WIKI_DESCRIPTION = 'An uinitialized storage variable will act as a reference to the first state variable, and can override a critical variable.'\n WIKI_EXPLOIT_SCENARIO = '''\n```solidity\ncontract Uninitialized{\n address owner = msg.sender;\n\n struct St{\n uint a;\n }\n\n function func() {\n St st;\n st.a = 0x0;\n }\n}\n```\nBob calls `func`. As a result, `owner` is override to 0.\n'''\n\n WIKI_RECOMMENDATION = 'Initialize all the storage variables.'\n\n # node.context[self.key] contains the uninitialized storage variables\n key = \"UNINITIALIZEDSTORAGE\"\n\n def _detect_uninitialized(self, function, node, visited):\n if node in visited:\n return\n\n visited = visited + [node]\n\n fathers_context = []\n\n for father in node.fathers:\n if self.key in father.context:\n fathers_context += father.context[self.key]\n\n # Exclude paths that dont bring further information\n if node in self.visited_all_paths:\n if all(f_c in self.visited_all_paths[node] for f_c in fathers_context):\n return\n else:\n self.visited_all_paths[node] = []\n\n self.visited_all_paths[node] = list(set(self.visited_all_paths[node] + fathers_context))\n\n if self.key in node.context:\n fathers_context += node.context[self.key]\n\n variables_read = node.variables_read\n for uninitialized_storage_variable in fathers_context:\n if uninitialized_storage_variable in variables_read:\n self.results.append((function, uninitialized_storage_variable))\n\n # Only save the storage variables that are not yet written\n uninitialized_storage_variables = list(set(fathers_context) - set(node.variables_written))\n node.context[self.key] = uninitialized_storage_variables\n\n for son in node.sons:\n self._detect_uninitialized(function, son, visited)\n\n\n def _detect(self):\n \"\"\" Detect uninitialized storage variables\n\n Recursively visit the calls\n Returns:\n dict: [contract name] = set(storage variable uninitialized)\n \"\"\"\n results = []\n\n self.results = []\n self.visited_all_paths = {}\n\n for contract in self.slither.contracts:\n for function in contract.functions:\n if function.is_implemented:\n uninitialized_storage_variables = [v for v in function.local_variables if v.is_storage and v.uninitialized]\n function.entry_point.context[self.key] = uninitialized_storage_variables\n self._detect_uninitialized(function, function.entry_point, [])\n\n for(function, uninitialized_storage_variable) in self.results:\n info = [uninitialized_storage_variable, \" is a storage variable never initialized\\n\"]\n json = self.generate_result(info)\n results.append(json)\n\n return results\n", "path": "slither/detectors/variables/uninitialized_storage_variables.py"}, {"content": "\"\"\"\n Module detecting uninitialized local variables\n\n Recursively explore the CFG to only report uninitialized local variables that are\n read before being written\n\"\"\"\n\nfrom slither.detectors.abstract_detector import AbstractDetector, DetectorClassification\n\n\nclass UninitializedLocalVars(AbstractDetector):\n \"\"\"\n \"\"\"\n\n ARGUMENT = 'uninitialized-local'\n HELP = 'Uninitialized local variables'\n IMPACT = DetectorClassification.MEDIUM\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = 'https://github.com/crytic/slither/wiki/Detector-Documentation#uninitialized-local-variables'\n\n\n WIKI_TITLE = 'Uninitialized local variables'\n WIKI_DESCRIPTION = 'Uninitialized local variables.'\n WIKI_EXPLOIT_SCENARIO = '''\n```solidity\ncontract Uninitialized is Owner{\n function withdraw() payable public onlyOwner{\n address to;\n to.transfer(this.balance)\n }\n}\n```\nBob calls `transfer`. As a result, the ethers are sent to the address 0x0 and are lost.'''\n\n WIKI_RECOMMENDATION = 'Initialize all the variables. If a variable is meant to be initialized to zero, explicitly set it to zero.'\n\n key = \"UNINITIALIZEDLOCAL\"\n\n def _detect_uninitialized(self, function, node, visited):\n if node in visited:\n return\n\n visited = visited + [node]\n\n fathers_context = []\n\n for father in node.fathers:\n if self.key in father.context:\n fathers_context += father.context[self.key]\n\n # Exclude path that dont bring further information\n if node in self.visited_all_paths:\n if all(f_c in self.visited_all_paths[node] for f_c in fathers_context):\n return\n else:\n self.visited_all_paths[node] = []\n\n self.visited_all_paths[node] = list(set(self.visited_all_paths[node] + fathers_context))\n\n if self.key in node.context:\n fathers_context += node.context[self.key]\n\n variables_read = node.variables_read\n for uninitialized_local_variable in fathers_context:\n if uninitialized_local_variable in variables_read:\n self.results.append((function, uninitialized_local_variable))\n\n # Only save the local variables that are not yet written\n uninitialized_local_variables = list(set(fathers_context) - set(node.variables_written))\n node.context[self.key] = uninitialized_local_variables\n\n for son in node.sons:\n self._detect_uninitialized(function, son, visited)\n\n\n def _detect(self):\n \"\"\" Detect uninitialized local variables\n\n Recursively visit the calls\n Returns:\n dict: [contract name] = set(local variable uninitialized)\n \"\"\"\n results = []\n\n self.results = []\n self.visited_all_paths = {}\n\n for contract in self.slither.contracts:\n for function in contract.functions:\n if function.is_implemented and function.contract_declarer == contract:\n if function.contains_assembly:\n continue\n # dont consider storage variable, as they are detected by another detector\n uninitialized_local_variables = [v for v in function.local_variables if not v.is_storage and v.uninitialized]\n function.entry_point.context[self.key] = uninitialized_local_variables\n self._detect_uninitialized(function, function.entry_point, [])\n all_results = list(set(self.results))\n for(function, uninitialized_local_variable) in all_results:\n\n info = [uninitialized_local_variable, \" is a local variable never initialized\\n\"]\n json = self.generate_result(info)\n results.append(json)\n\n return results\n", "path": "slither/detectors/variables/uninitialized_local_variables.py"}]}
2,346
301
gh_patches_debug_4560
rasdani/github-patches
git_diff
comic__grand-challenge.org-1012
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- "Run algorithm" page is missing the breadcrumb bar Maybe this is on purpose, but I find it confusing that the page that allows users to upload images and run an algorithm does not show the breadcrumb bar to get back to the algorithm overview page. Example: https://grand-challenge.org/algorithms/vertebra-segmentation/run/ To be consistent with the job list the title of that page could also read "Run this algorithm" instead "Run an algorithm" --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/grandchallenge/reader_studies/views.py` Content: ``` 1 import csv 2 import re 3 4 from dal import autocomplete 5 from django.conf import settings 6 from django.contrib import messages 7 from django.contrib.auth import get_user_model 8 from django.contrib.auth.mixins import ( 9 PermissionRequiredMixin, 10 UserPassesTestMixin, 11 ) 12 from django.contrib.messages.views import SuccessMessageMixin 13 from django.http import Http404, HttpResponse 14 from django.views.generic import ( 15 CreateView, 16 DetailView, 17 FormView, 18 ListView, 19 UpdateView, 20 ) 21 from guardian.mixins import ( 22 LoginRequiredMixin, 23 PermissionListMixin, 24 PermissionRequiredMixin as ObjectPermissionRequiredMixin, 25 ) 26 from guardian.shortcuts import get_perms 27 from rest_framework.decorators import action 28 from rest_framework.mixins import ( 29 CreateModelMixin, 30 ListModelMixin, 31 RetrieveModelMixin, 32 ) 33 from rest_framework.permissions import DjangoObjectPermissions 34 from rest_framework.response import Response 35 from rest_framework.viewsets import ( 36 GenericViewSet, 37 ReadOnlyModelViewSet, 38 ) 39 from rest_framework_guardian.filters import ObjectPermissionsFilter 40 41 from grandchallenge.cases.forms import UploadRawImagesForm 42 from grandchallenge.cases.models import RawImageUploadSession 43 from grandchallenge.core.permissions.rest_framework import ( 44 DjangoObjectOnlyPermissions, 45 ) 46 from grandchallenge.reader_studies.forms import ( 47 EditorsForm, 48 QuestionForm, 49 ReaderStudyCreateForm, 50 ReaderStudyUpdateForm, 51 ReadersForm, 52 ) 53 from grandchallenge.reader_studies.models import Answer, Question, ReaderStudy 54 from grandchallenge.reader_studies.serializers import ( 55 AnswerSerializer, 56 QuestionSerializer, 57 ReaderStudySerializer, 58 ) 59 60 61 class ReaderStudyList(LoginRequiredMixin, PermissionListMixin, ListView): 62 model = ReaderStudy 63 permission_required = ( 64 f"{ReaderStudy._meta.app_label}.view_{ReaderStudy._meta.model_name}" 65 ) 66 67 68 class ReaderStudyCreate( 69 LoginRequiredMixin, PermissionRequiredMixin, CreateView 70 ): 71 model = ReaderStudy 72 form_class = ReaderStudyCreateForm 73 permission_required = ( 74 f"{ReaderStudy._meta.app_label}.add_{ReaderStudy._meta.model_name}" 75 ) 76 77 def get_form_kwargs(self): 78 kwargs = super().get_form_kwargs() 79 kwargs.update({"user": self.request.user}) 80 return kwargs 81 82 def form_valid(self, form): 83 response = super().form_valid(form) 84 self.object.add_editor(self.request.user) 85 return response 86 87 88 class ReaderStudyDetail( 89 LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView 90 ): 91 model = ReaderStudy 92 permission_required = ( 93 f"{ReaderStudy._meta.app_label}.view_{ReaderStudy._meta.model_name}" 94 ) 95 raise_exception = True 96 97 def get_context_data(self, **kwargs): 98 context = super().get_context_data(**kwargs) 99 change_perm = f"change_{ReaderStudy._meta.model_name}" 100 if change_perm in get_perms(self.request.user, self.object): 101 readers = [ 102 { 103 "obj": reader, 104 "progress": self.object.get_progress_for_user(reader), 105 } 106 for reader in self.object.readers_group.user_set.all() 107 ] 108 context.update({"readers": readers}) 109 else: 110 user_progress = self.object.get_progress_for_user( 111 self.request.user 112 ) 113 context.update({"progress": user_progress}) 114 context.update( 115 {"user_is_reader": self.object.is_reader(user=self.request.user)} 116 ) 117 return context 118 119 120 class ReaderStudyUpdate( 121 LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView 122 ): 123 model = ReaderStudy 124 form_class = ReaderStudyUpdateForm 125 permission_required = ( 126 f"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}" 127 ) 128 raise_exception = True 129 130 def get_form_kwargs(self): 131 kwargs = super().get_form_kwargs() 132 kwargs.update({"user": self.request.user}) 133 return kwargs 134 135 136 class QuestionUpdate( 137 LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView 138 ): 139 model = Question 140 form_class = QuestionForm 141 template_name = "reader_studies/readerstudy_update_object.html" 142 permission_required = ( 143 f"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}" 144 ) 145 raise_exception = True 146 147 def get_permission_object(self): 148 return self.reader_study 149 150 @property 151 def reader_study(self): 152 return ReaderStudy.objects.get(slug=self.kwargs["slug"]) 153 154 def get_context_data(self, **kwargs): 155 context = super().get_context_data(**kwargs) 156 form_fields = context["form"].fields 157 for field_name in self.object.read_only_fields: 158 form_fields[field_name].required = False 159 form_fields[field_name].disabled = True 160 161 return context 162 163 def get_success_url(self): 164 return self.object.reader_study.get_absolute_url() 165 166 167 class AddObjectToReaderStudyMixin( 168 LoginRequiredMixin, ObjectPermissionRequiredMixin, CreateView 169 ): 170 """ 171 Mixin that adds an object that has a foreign key to a reader study and a 172 creator. The url to this view must include a slug that points to the slug 173 of the reader study. 174 175 Must be placed to the left of ObjectPermissionRequiredMixin. 176 """ 177 178 permission_required = ( 179 f"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}" 180 ) 181 raise_exception = True 182 183 def get_permission_object(self): 184 return self.reader_study 185 186 @property 187 def reader_study(self): 188 return ReaderStudy.objects.get(slug=self.kwargs["slug"]) 189 190 def get_context_data(self, **kwargs): 191 context = super().get_context_data(**kwargs) 192 context.update( 193 {"object": self.reader_study, "type_to_add": self.type_to_add} 194 ) 195 return context 196 197 def form_valid(self, form): 198 form.instance.creator = self.request.user 199 form.instance.reader_study = self.reader_study 200 return super().form_valid(form) 201 202 def get_success_url(self): 203 return self.object.reader_study.get_absolute_url() 204 205 206 class AddImagesToReaderStudy(AddObjectToReaderStudyMixin): 207 model = RawImageUploadSession 208 form_class = UploadRawImagesForm 209 template_name = "reader_studies/readerstudy_add_object.html" 210 type_to_add = "images" 211 212 def get_form_kwargs(self): 213 kwargs = super().get_form_kwargs() 214 kwargs.update({"user": self.request.user}) 215 return kwargs 216 217 218 class AddQuestionToReaderStudy(AddObjectToReaderStudyMixin): 219 model = Question 220 form_class = QuestionForm 221 template_name = "reader_studies/readerstudy_add_object.html" 222 type_to_add = "question" 223 224 225 class ReaderStudyUserAutocomplete( 226 LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView 227 ): 228 def test_func(self): 229 group_pks = ( 230 ReaderStudy.objects.all() 231 .select_related("editors_group") 232 .values_list("editors_group__pk", flat=True) 233 ) 234 return ( 235 self.request.user.is_superuser 236 or self.request.user.groups.filter(pk__in=group_pks).exists() 237 ) 238 239 def get_queryset(self): 240 qs = ( 241 get_user_model() 242 .objects.all() 243 .order_by("username") 244 .exclude(username=settings.ANONYMOUS_USER_NAME) 245 ) 246 247 if self.q: 248 qs = qs.filter(username__istartswith=self.q) 249 250 return qs 251 252 253 class ReaderStudyUserGroupUpdateMixin( 254 LoginRequiredMixin, 255 ObjectPermissionRequiredMixin, 256 SuccessMessageMixin, 257 FormView, 258 ): 259 template_name = "reader_studies/readerstudy_user_groups_form.html" 260 permission_required = ( 261 f"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}" 262 ) 263 raise_exception = True 264 265 def get_permission_object(self): 266 return self.reader_study 267 268 @property 269 def reader_study(self): 270 return ReaderStudy.objects.get(slug=self.kwargs["slug"]) 271 272 def get_context_data(self, **kwargs): 273 context = super().get_context_data(**kwargs) 274 context.update( 275 {"object": self.reader_study, "role": self.get_form().role} 276 ) 277 return context 278 279 def get_success_url(self): 280 return self.reader_study.get_absolute_url() 281 282 def form_valid(self, form): 283 form.add_or_remove_user(reader_study=self.reader_study) 284 return super().form_valid(form) 285 286 287 class EditorsUpdate(ReaderStudyUserGroupUpdateMixin): 288 form_class = EditorsForm 289 success_message = "Editors successfully updated" 290 291 292 class ReadersUpdate(ReaderStudyUserGroupUpdateMixin): 293 form_class = ReadersForm 294 success_message = "Readers successfully updated" 295 296 297 class ExportCSVMixin(object): 298 def _create_dicts(self, headers, data): 299 return map(lambda x: dict(zip(headers, x)), data) 300 301 def _preprocess_data(self, data): 302 processed = [] 303 for entry in data: 304 processed.append( 305 map(lambda x: re.sub(r"[\n\r\t]", " ", str(x)), entry) 306 ) 307 return processed 308 309 def _create_csv_response(self, data, headers, filename="export.csv"): 310 response = HttpResponse(content_type="text/csv") 311 response["Content-Disposition"] = f'attachment; filename="{filename}"' 312 writer = csv.DictWriter( 313 response, 314 quoting=csv.QUOTE_ALL, 315 escapechar="\\", 316 fieldnames=headers, 317 ) 318 writer.writeheader() 319 csv_dict = self._create_dicts(headers, self._preprocess_data(data)) 320 writer.writerows(csv_dict) 321 322 return response 323 324 325 class ReaderStudyViewSet(ExportCSVMixin, ReadOnlyModelViewSet): 326 serializer_class = ReaderStudySerializer 327 queryset = ReaderStudy.objects.all().prefetch_related( 328 "images", "questions" 329 ) 330 permission_classes = [DjangoObjectOnlyPermissions] 331 filter_backends = [ObjectPermissionsFilter] 332 change_permission = ( 333 f"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}" 334 ) 335 336 def _check_change_perms(self, user, obj): 337 if not (user and user.has_perm(self.change_permission, obj)): 338 raise Http404() 339 340 @action(detail=True) 341 def export_answers(self, request, pk=None): 342 reader_study = self.get_object() 343 self._check_change_perms(request.user, reader_study) 344 345 data = [ 346 answer.csv_values 347 for answer in Answer.objects.select_related( 348 "question__reader_study" 349 ) 350 .select_related("creator") 351 .prefetch_related("images") 352 .filter(question__reader_study=reader_study) 353 ] 354 355 return self._create_csv_response( 356 data, 357 Answer.csv_headers, 358 filename=f"{reader_study.slug}-answers.csv", 359 ) 360 361 @action(detail=True, methods=["patch"]) 362 def generate_hanging_list(self, request, pk=None): 363 reader_study = self.get_object() 364 reader_study.generate_hanging_list() 365 messages.add_message( 366 request, messages.SUCCESS, "Hanging list re-generated." 367 ) 368 return Response({"status": "Hanging list generated."},) 369 370 371 class QuestionViewSet(ReadOnlyModelViewSet): 372 serializer_class = QuestionSerializer 373 queryset = Question.objects.all().select_related("reader_study") 374 permission_classes = [DjangoObjectPermissions] 375 filter_backends = [ObjectPermissionsFilter] 376 377 378 class AnswerViewSet( 379 CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet 380 ): 381 serializer_class = AnswerSerializer 382 queryset = ( 383 Answer.objects.all() 384 .select_related("creator") 385 .prefetch_related("images") 386 ) 387 permission_classes = [DjangoObjectPermissions] 388 filter_backends = [ObjectPermissionsFilter] 389 390 def perform_create(self, serializer): 391 serializer.save(creator=self.request.user) 392 393 @action(detail=False) 394 def mine(self, request): 395 """ 396 An endpoint that returns the questions that have been answered by 397 the current user. 398 """ 399 queryset = self.filter_queryset( 400 self.get_queryset().filter(creator=request.user) 401 ) 402 403 page = self.paginate_queryset(queryset) 404 if page is not None: 405 serializer = self.get_serializer(page, many=True) 406 return self.get_paginated_response(serializer.data) 407 408 serializer = self.get_serializer(queryset, many=True) 409 return Response(serializer.data) 410 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/grandchallenge/reader_studies/views.py b/app/grandchallenge/reader_studies/views.py --- a/app/grandchallenge/reader_studies/views.py +++ b/app/grandchallenge/reader_studies/views.py @@ -157,7 +157,7 @@ for field_name in self.object.read_only_fields: form_fields[field_name].required = False form_fields[field_name].disabled = True - + context.update({"reader_study": self.reader_study}) return context def get_success_url(self):
{"golden_diff": "diff --git a/app/grandchallenge/reader_studies/views.py b/app/grandchallenge/reader_studies/views.py\n--- a/app/grandchallenge/reader_studies/views.py\n+++ b/app/grandchallenge/reader_studies/views.py\n@@ -157,7 +157,7 @@\n for field_name in self.object.read_only_fields:\n form_fields[field_name].required = False\n form_fields[field_name].disabled = True\n-\n+ context.update({\"reader_study\": self.reader_study})\n return context\n \n def get_success_url(self):\n", "issue": "\"Run algorithm\" page is missing the breadcrumb bar\nMaybe this is on purpose, but I find it confusing that the page that allows users to upload images and run an algorithm does not show the breadcrumb bar to get back to the algorithm overview page.\r\n\r\nExample: https://grand-challenge.org/algorithms/vertebra-segmentation/run/\r\n\r\nTo be consistent with the job list the title of that page could also read \"Run this algorithm\" instead \"Run an algorithm\"\n", "before_files": [{"content": "import csv\nimport re\n\nfrom dal import autocomplete\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import (\n PermissionRequiredMixin,\n UserPassesTestMixin,\n)\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import Http404, HttpResponse\nfrom django.views.generic import (\n CreateView,\n DetailView,\n FormView,\n ListView,\n UpdateView,\n)\nfrom guardian.mixins import (\n LoginRequiredMixin,\n PermissionListMixin,\n PermissionRequiredMixin as ObjectPermissionRequiredMixin,\n)\nfrom guardian.shortcuts import get_perms\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import (\n CreateModelMixin,\n ListModelMixin,\n RetrieveModelMixin,\n)\nfrom rest_framework.permissions import DjangoObjectPermissions\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import (\n GenericViewSet,\n ReadOnlyModelViewSet,\n)\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.cases.forms import UploadRawImagesForm\nfrom grandchallenge.cases.models import RawImageUploadSession\nfrom grandchallenge.core.permissions.rest_framework import (\n DjangoObjectOnlyPermissions,\n)\nfrom grandchallenge.reader_studies.forms import (\n EditorsForm,\n QuestionForm,\n ReaderStudyCreateForm,\n ReaderStudyUpdateForm,\n ReadersForm,\n)\nfrom grandchallenge.reader_studies.models import Answer, Question, ReaderStudy\nfrom grandchallenge.reader_studies.serializers import (\n AnswerSerializer,\n QuestionSerializer,\n ReaderStudySerializer,\n)\n\n\nclass ReaderStudyList(LoginRequiredMixin, PermissionListMixin, ListView):\n model = ReaderStudy\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.view_{ReaderStudy._meta.model_name}\"\n )\n\n\nclass ReaderStudyCreate(\n LoginRequiredMixin, PermissionRequiredMixin, CreateView\n):\n model = ReaderStudy\n form_class = ReaderStudyCreateForm\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.add_{ReaderStudy._meta.model_name}\"\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n def form_valid(self, form):\n response = super().form_valid(form)\n self.object.add_editor(self.request.user)\n return response\n\n\nclass ReaderStudyDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = ReaderStudy\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.view_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n change_perm = f\"change_{ReaderStudy._meta.model_name}\"\n if change_perm in get_perms(self.request.user, self.object):\n readers = [\n {\n \"obj\": reader,\n \"progress\": self.object.get_progress_for_user(reader),\n }\n for reader in self.object.readers_group.user_set.all()\n ]\n context.update({\"readers\": readers})\n else:\n user_progress = self.object.get_progress_for_user(\n self.request.user\n )\n context.update({\"progress\": user_progress})\n context.update(\n {\"user_is_reader\": self.object.is_reader(user=self.request.user)}\n )\n return context\n\n\nclass ReaderStudyUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = ReaderStudy\n form_class = ReaderStudyUpdateForm\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass QuestionUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = Question\n form_class = QuestionForm\n template_name = \"reader_studies/readerstudy_update_object.html\"\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.reader_study\n\n @property\n def reader_study(self):\n return ReaderStudy.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n form_fields = context[\"form\"].fields\n for field_name in self.object.read_only_fields:\n form_fields[field_name].required = False\n form_fields[field_name].disabled = True\n\n return context\n\n def get_success_url(self):\n return self.object.reader_study.get_absolute_url()\n\n\nclass AddObjectToReaderStudyMixin(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, CreateView\n):\n \"\"\"\n Mixin that adds an object that has a foreign key to a reader study and a\n creator. The url to this view must include a slug that points to the slug\n of the reader study.\n\n Must be placed to the left of ObjectPermissionRequiredMixin.\n \"\"\"\n\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.reader_study\n\n @property\n def reader_study(self):\n return ReaderStudy.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\"object\": self.reader_study, \"type_to_add\": self.type_to_add}\n )\n return context\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.reader_study = self.reader_study\n return super().form_valid(form)\n\n def get_success_url(self):\n return self.object.reader_study.get_absolute_url()\n\n\nclass AddImagesToReaderStudy(AddObjectToReaderStudyMixin):\n model = RawImageUploadSession\n form_class = UploadRawImagesForm\n template_name = \"reader_studies/readerstudy_add_object.html\"\n type_to_add = \"images\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass AddQuestionToReaderStudy(AddObjectToReaderStudyMixin):\n model = Question\n form_class = QuestionForm\n template_name = \"reader_studies/readerstudy_add_object.html\"\n type_to_add = \"question\"\n\n\nclass ReaderStudyUserAutocomplete(\n LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView\n):\n def test_func(self):\n group_pks = (\n ReaderStudy.objects.all()\n .select_related(\"editors_group\")\n .values_list(\"editors_group__pk\", flat=True)\n )\n return (\n self.request.user.is_superuser\n or self.request.user.groups.filter(pk__in=group_pks).exists()\n )\n\n def get_queryset(self):\n qs = (\n get_user_model()\n .objects.all()\n .order_by(\"username\")\n .exclude(username=settings.ANONYMOUS_USER_NAME)\n )\n\n if self.q:\n qs = qs.filter(username__istartswith=self.q)\n\n return qs\n\n\nclass ReaderStudyUserGroupUpdateMixin(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n FormView,\n):\n template_name = \"reader_studies/readerstudy_user_groups_form.html\"\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.reader_study\n\n @property\n def reader_study(self):\n return ReaderStudy.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\"object\": self.reader_study, \"role\": self.get_form().role}\n )\n return context\n\n def get_success_url(self):\n return self.reader_study.get_absolute_url()\n\n def form_valid(self, form):\n form.add_or_remove_user(reader_study=self.reader_study)\n return super().form_valid(form)\n\n\nclass EditorsUpdate(ReaderStudyUserGroupUpdateMixin):\n form_class = EditorsForm\n success_message = \"Editors successfully updated\"\n\n\nclass ReadersUpdate(ReaderStudyUserGroupUpdateMixin):\n form_class = ReadersForm\n success_message = \"Readers successfully updated\"\n\n\nclass ExportCSVMixin(object):\n def _create_dicts(self, headers, data):\n return map(lambda x: dict(zip(headers, x)), data)\n\n def _preprocess_data(self, data):\n processed = []\n for entry in data:\n processed.append(\n map(lambda x: re.sub(r\"[\\n\\r\\t]\", \" \", str(x)), entry)\n )\n return processed\n\n def _create_csv_response(self, data, headers, filename=\"export.csv\"):\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n writer = csv.DictWriter(\n response,\n quoting=csv.QUOTE_ALL,\n escapechar=\"\\\\\",\n fieldnames=headers,\n )\n writer.writeheader()\n csv_dict = self._create_dicts(headers, self._preprocess_data(data))\n writer.writerows(csv_dict)\n\n return response\n\n\nclass ReaderStudyViewSet(ExportCSVMixin, ReadOnlyModelViewSet):\n serializer_class = ReaderStudySerializer\n queryset = ReaderStudy.objects.all().prefetch_related(\n \"images\", \"questions\"\n )\n permission_classes = [DjangoObjectOnlyPermissions]\n filter_backends = [ObjectPermissionsFilter]\n change_permission = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n\n def _check_change_perms(self, user, obj):\n if not (user and user.has_perm(self.change_permission, obj)):\n raise Http404()\n\n @action(detail=True)\n def export_answers(self, request, pk=None):\n reader_study = self.get_object()\n self._check_change_perms(request.user, reader_study)\n\n data = [\n answer.csv_values\n for answer in Answer.objects.select_related(\n \"question__reader_study\"\n )\n .select_related(\"creator\")\n .prefetch_related(\"images\")\n .filter(question__reader_study=reader_study)\n ]\n\n return self._create_csv_response(\n data,\n Answer.csv_headers,\n filename=f\"{reader_study.slug}-answers.csv\",\n )\n\n @action(detail=True, methods=[\"patch\"])\n def generate_hanging_list(self, request, pk=None):\n reader_study = self.get_object()\n reader_study.generate_hanging_list()\n messages.add_message(\n request, messages.SUCCESS, \"Hanging list re-generated.\"\n )\n return Response({\"status\": \"Hanging list generated.\"},)\n\n\nclass QuestionViewSet(ReadOnlyModelViewSet):\n serializer_class = QuestionSerializer\n queryset = Question.objects.all().select_related(\"reader_study\")\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass AnswerViewSet(\n CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet\n):\n serializer_class = AnswerSerializer\n queryset = (\n Answer.objects.all()\n .select_related(\"creator\")\n .prefetch_related(\"images\")\n )\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n def perform_create(self, serializer):\n serializer.save(creator=self.request.user)\n\n @action(detail=False)\n def mine(self, request):\n \"\"\"\n An endpoint that returns the questions that have been answered by\n the current user.\n \"\"\"\n queryset = self.filter_queryset(\n self.get_queryset().filter(creator=request.user)\n )\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n", "path": "app/grandchallenge/reader_studies/views.py"}], "after_files": [{"content": "import csv\nimport re\n\nfrom dal import autocomplete\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.mixins import (\n PermissionRequiredMixin,\n UserPassesTestMixin,\n)\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.http import Http404, HttpResponse\nfrom django.views.generic import (\n CreateView,\n DetailView,\n FormView,\n ListView,\n UpdateView,\n)\nfrom guardian.mixins import (\n LoginRequiredMixin,\n PermissionListMixin,\n PermissionRequiredMixin as ObjectPermissionRequiredMixin,\n)\nfrom guardian.shortcuts import get_perms\nfrom rest_framework.decorators import action\nfrom rest_framework.mixins import (\n CreateModelMixin,\n ListModelMixin,\n RetrieveModelMixin,\n)\nfrom rest_framework.permissions import DjangoObjectPermissions\nfrom rest_framework.response import Response\nfrom rest_framework.viewsets import (\n GenericViewSet,\n ReadOnlyModelViewSet,\n)\nfrom rest_framework_guardian.filters import ObjectPermissionsFilter\n\nfrom grandchallenge.cases.forms import UploadRawImagesForm\nfrom grandchallenge.cases.models import RawImageUploadSession\nfrom grandchallenge.core.permissions.rest_framework import (\n DjangoObjectOnlyPermissions,\n)\nfrom grandchallenge.reader_studies.forms import (\n EditorsForm,\n QuestionForm,\n ReaderStudyCreateForm,\n ReaderStudyUpdateForm,\n ReadersForm,\n)\nfrom grandchallenge.reader_studies.models import Answer, Question, ReaderStudy\nfrom grandchallenge.reader_studies.serializers import (\n AnswerSerializer,\n QuestionSerializer,\n ReaderStudySerializer,\n)\n\n\nclass ReaderStudyList(LoginRequiredMixin, PermissionListMixin, ListView):\n model = ReaderStudy\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.view_{ReaderStudy._meta.model_name}\"\n )\n\n\nclass ReaderStudyCreate(\n LoginRequiredMixin, PermissionRequiredMixin, CreateView\n):\n model = ReaderStudy\n form_class = ReaderStudyCreateForm\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.add_{ReaderStudy._meta.model_name}\"\n )\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n def form_valid(self, form):\n response = super().form_valid(form)\n self.object.add_editor(self.request.user)\n return response\n\n\nclass ReaderStudyDetail(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, DetailView\n):\n model = ReaderStudy\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.view_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n change_perm = f\"change_{ReaderStudy._meta.model_name}\"\n if change_perm in get_perms(self.request.user, self.object):\n readers = [\n {\n \"obj\": reader,\n \"progress\": self.object.get_progress_for_user(reader),\n }\n for reader in self.object.readers_group.user_set.all()\n ]\n context.update({\"readers\": readers})\n else:\n user_progress = self.object.get_progress_for_user(\n self.request.user\n )\n context.update({\"progress\": user_progress})\n context.update(\n {\"user_is_reader\": self.object.is_reader(user=self.request.user)}\n )\n return context\n\n\nclass ReaderStudyUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = ReaderStudy\n form_class = ReaderStudyUpdateForm\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass QuestionUpdate(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, UpdateView\n):\n model = Question\n form_class = QuestionForm\n template_name = \"reader_studies/readerstudy_update_object.html\"\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.reader_study\n\n @property\n def reader_study(self):\n return ReaderStudy.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n form_fields = context[\"form\"].fields\n for field_name in self.object.read_only_fields:\n form_fields[field_name].required = False\n form_fields[field_name].disabled = True\n context.update({\"reader_study\": self.reader_study})\n return context\n\n def get_success_url(self):\n return self.object.reader_study.get_absolute_url()\n\n\nclass AddObjectToReaderStudyMixin(\n LoginRequiredMixin, ObjectPermissionRequiredMixin, CreateView\n):\n \"\"\"\n Mixin that adds an object that has a foreign key to a reader study and a\n creator. The url to this view must include a slug that points to the slug\n of the reader study.\n\n Must be placed to the left of ObjectPermissionRequiredMixin.\n \"\"\"\n\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.reader_study\n\n @property\n def reader_study(self):\n return ReaderStudy.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\"object\": self.reader_study, \"type_to_add\": self.type_to_add}\n )\n return context\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.reader_study = self.reader_study\n return super().form_valid(form)\n\n def get_success_url(self):\n return self.object.reader_study.get_absolute_url()\n\n\nclass AddImagesToReaderStudy(AddObjectToReaderStudyMixin):\n model = RawImageUploadSession\n form_class = UploadRawImagesForm\n template_name = \"reader_studies/readerstudy_add_object.html\"\n type_to_add = \"images\"\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\"user\": self.request.user})\n return kwargs\n\n\nclass AddQuestionToReaderStudy(AddObjectToReaderStudyMixin):\n model = Question\n form_class = QuestionForm\n template_name = \"reader_studies/readerstudy_add_object.html\"\n type_to_add = \"question\"\n\n\nclass ReaderStudyUserAutocomplete(\n LoginRequiredMixin, UserPassesTestMixin, autocomplete.Select2QuerySetView\n):\n def test_func(self):\n group_pks = (\n ReaderStudy.objects.all()\n .select_related(\"editors_group\")\n .values_list(\"editors_group__pk\", flat=True)\n )\n return (\n self.request.user.is_superuser\n or self.request.user.groups.filter(pk__in=group_pks).exists()\n )\n\n def get_queryset(self):\n qs = (\n get_user_model()\n .objects.all()\n .order_by(\"username\")\n .exclude(username=settings.ANONYMOUS_USER_NAME)\n )\n\n if self.q:\n qs = qs.filter(username__istartswith=self.q)\n\n return qs\n\n\nclass ReaderStudyUserGroupUpdateMixin(\n LoginRequiredMixin,\n ObjectPermissionRequiredMixin,\n SuccessMessageMixin,\n FormView,\n):\n template_name = \"reader_studies/readerstudy_user_groups_form.html\"\n permission_required = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n raise_exception = True\n\n def get_permission_object(self):\n return self.reader_study\n\n @property\n def reader_study(self):\n return ReaderStudy.objects.get(slug=self.kwargs[\"slug\"])\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context.update(\n {\"object\": self.reader_study, \"role\": self.get_form().role}\n )\n return context\n\n def get_success_url(self):\n return self.reader_study.get_absolute_url()\n\n def form_valid(self, form):\n form.add_or_remove_user(reader_study=self.reader_study)\n return super().form_valid(form)\n\n\nclass EditorsUpdate(ReaderStudyUserGroupUpdateMixin):\n form_class = EditorsForm\n success_message = \"Editors successfully updated\"\n\n\nclass ReadersUpdate(ReaderStudyUserGroupUpdateMixin):\n form_class = ReadersForm\n success_message = \"Readers successfully updated\"\n\n\nclass ExportCSVMixin(object):\n def _create_dicts(self, headers, data):\n return map(lambda x: dict(zip(headers, x)), data)\n\n def _preprocess_data(self, data):\n processed = []\n for entry in data:\n processed.append(\n map(lambda x: re.sub(r\"[\\n\\r\\t]\", \" \", str(x)), entry)\n )\n return processed\n\n def _create_csv_response(self, data, headers, filename=\"export.csv\"):\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n writer = csv.DictWriter(\n response,\n quoting=csv.QUOTE_ALL,\n escapechar=\"\\\\\",\n fieldnames=headers,\n )\n writer.writeheader()\n csv_dict = self._create_dicts(headers, self._preprocess_data(data))\n writer.writerows(csv_dict)\n\n return response\n\n\nclass ReaderStudyViewSet(ExportCSVMixin, ReadOnlyModelViewSet):\n serializer_class = ReaderStudySerializer\n queryset = ReaderStudy.objects.all().prefetch_related(\n \"images\", \"questions\"\n )\n permission_classes = [DjangoObjectOnlyPermissions]\n filter_backends = [ObjectPermissionsFilter]\n change_permission = (\n f\"{ReaderStudy._meta.app_label}.change_{ReaderStudy._meta.model_name}\"\n )\n\n def _check_change_perms(self, user, obj):\n if not (user and user.has_perm(self.change_permission, obj)):\n raise Http404()\n\n @action(detail=True)\n def export_answers(self, request, pk=None):\n reader_study = self.get_object()\n self._check_change_perms(request.user, reader_study)\n\n data = [\n answer.csv_values\n for answer in Answer.objects.select_related(\n \"question__reader_study\"\n )\n .select_related(\"creator\")\n .prefetch_related(\"images\")\n .filter(question__reader_study=reader_study)\n ]\n\n return self._create_csv_response(\n data,\n Answer.csv_headers,\n filename=f\"{reader_study.slug}-answers.csv\",\n )\n\n @action(detail=True, methods=[\"patch\"])\n def generate_hanging_list(self, request, pk=None):\n reader_study = self.get_object()\n reader_study.generate_hanging_list()\n messages.add_message(\n request, messages.SUCCESS, \"Hanging list re-generated.\"\n )\n return Response({\"status\": \"Hanging list generated.\"},)\n\n\nclass QuestionViewSet(ReadOnlyModelViewSet):\n serializer_class = QuestionSerializer\n queryset = Question.objects.all().select_related(\"reader_study\")\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n\nclass AnswerViewSet(\n CreateModelMixin, RetrieveModelMixin, ListModelMixin, GenericViewSet\n):\n serializer_class = AnswerSerializer\n queryset = (\n Answer.objects.all()\n .select_related(\"creator\")\n .prefetch_related(\"images\")\n )\n permission_classes = [DjangoObjectPermissions]\n filter_backends = [ObjectPermissionsFilter]\n\n def perform_create(self, serializer):\n serializer.save(creator=self.request.user)\n\n @action(detail=False)\n def mine(self, request):\n \"\"\"\n An endpoint that returns the questions that have been answered by\n the current user.\n \"\"\"\n queryset = self.filter_queryset(\n self.get_queryset().filter(creator=request.user)\n )\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response(serializer.data)\n", "path": "app/grandchallenge/reader_studies/views.py"}]}
4,083
121
gh_patches_debug_17421
rasdani/github-patches
git_diff
Lightning-Universe__lightning-flash-1094
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Is there a way to seed experiments? ## ❓ Questions and Help #### What is your question? Is there a way to seed experiments? Attempts at using `seed_everything` from pytorch lightning do not appear to work (also with the the workers argument set to True). #### What's your environment? - OS: Linux - Packaging: pip - Version: 0.5.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `flash/image/segmentation/input.py` Content: ``` 1 # Copyright The PyTorch Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import os 15 from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union 16 17 import torch 18 from pytorch_lightning.utilities import rank_zero_warn 19 20 from flash.core.data.io.input import DataKeys, ImageLabelsMap, Input 21 from flash.core.data.utilities.paths import filter_valid_files, PATH_TYPE 22 from flash.core.data.utilities.samples import to_samples 23 from flash.core.data.utils import image_default_loader 24 from flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities 25 from flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _TORCHVISION_AVAILABLE, lazy_import 26 from flash.image.data import ImageDeserializer, IMG_EXTENSIONS 27 from flash.image.segmentation.output import SegmentationLabelsOutput 28 29 SampleCollection = None 30 if _FIFTYONE_AVAILABLE: 31 fo = lazy_import("fiftyone") 32 if TYPE_CHECKING: 33 from fiftyone.core.collections import SampleCollection 34 else: 35 fo = None 36 37 if _TORCHVISION_AVAILABLE: 38 import torchvision 39 import torchvision.transforms.functional as FT 40 41 42 class SemanticSegmentationInput(Input): 43 def load_labels_map( 44 self, num_classes: Optional[int] = None, labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None 45 ) -> None: 46 if num_classes is not None: 47 self.num_classes = num_classes 48 labels_map = labels_map or SegmentationLabelsOutput.create_random_labels_map(num_classes) 49 50 if labels_map is not None: 51 self.set_state(ImageLabelsMap(labels_map)) 52 self.labels_map = labels_map 53 54 def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]: 55 sample[DataKeys.INPUT] = sample[DataKeys.INPUT].float() 56 if DataKeys.TARGET in sample: 57 sample[DataKeys.TARGET] = sample[DataKeys.TARGET].float() 58 sample[DataKeys.METADATA] = {"size": sample[DataKeys.INPUT].shape[-2:]} 59 return sample 60 61 62 class SemanticSegmentationTensorInput(SemanticSegmentationInput): 63 def load_data( 64 self, 65 tensor: Any, 66 masks: Any = None, 67 num_classes: Optional[int] = None, 68 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None, 69 ) -> List[Dict[str, Any]]: 70 self.load_labels_map(num_classes, labels_map) 71 return to_samples(tensor, masks) 72 73 74 class SemanticSegmentationNumpyInput(SemanticSegmentationInput): 75 def load_data( 76 self, 77 array: Any, 78 masks: Any = None, 79 num_classes: Optional[int] = None, 80 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None, 81 ) -> List[Dict[str, Any]]: 82 self.load_labels_map(num_classes, labels_map) 83 return to_samples(array, masks) 84 85 def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]: 86 sample[DataKeys.INPUT] = torch.from_numpy(sample[DataKeys.INPUT]) 87 if DataKeys.TARGET in sample: 88 sample[DataKeys.TARGET] = torch.from_numpy(sample[DataKeys.TARGET]) 89 return super().load_sample(sample) 90 91 92 class SemanticSegmentationFilesInput(SemanticSegmentationInput): 93 def load_data( 94 self, 95 files: Union[PATH_TYPE, List[PATH_TYPE]], 96 mask_files: Optional[Union[PATH_TYPE, List[PATH_TYPE]]] = None, 97 num_classes: Optional[int] = None, 98 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None, 99 ) -> List[Dict[str, Any]]: 100 self.load_labels_map(num_classes, labels_map) 101 if mask_files is None: 102 files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS) 103 else: 104 files, masks = filter_valid_files(files, mask_files, valid_extensions=IMG_EXTENSIONS) 105 return to_samples(files, mask_files) 106 107 def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]: 108 filepath = sample[DataKeys.INPUT] 109 sample[DataKeys.INPUT] = FT.to_tensor(image_default_loader(filepath)) 110 if DataKeys.TARGET in sample: 111 sample[DataKeys.TARGET] = torchvision.io.read_image(sample[DataKeys.TARGET])[0] 112 sample = super().load_sample(sample) 113 sample[DataKeys.METADATA]["filepath"] = filepath 114 return sample 115 116 117 class SemanticSegmentationFolderInput(SemanticSegmentationFilesInput): 118 def load_data( 119 self, 120 folder: PATH_TYPE, 121 mask_folder: Optional[PATH_TYPE] = None, 122 num_classes: Optional[int] = None, 123 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None, 124 ) -> List[Dict[str, Any]]: 125 self.load_labels_map(num_classes, labels_map) 126 files = os.listdir(folder) 127 if mask_folder is not None: 128 mask_files = os.listdir(mask_folder) 129 130 all_files = set(files).intersection(set(mask_files)) 131 if len(all_files) != len(files) or len(all_files) != len(mask_files): 132 rank_zero_warn( 133 f"Found inconsistent files in input folder: {folder} and mask folder: {mask_folder}. Some files" 134 " have been dropped.", 135 UserWarning, 136 ) 137 138 files = [os.path.join(folder, file) for file in all_files] 139 mask_files = [os.path.join(mask_folder, file) for file in all_files] 140 return super().load_data(files, mask_files) 141 return super().load_data(files) 142 143 144 class SemanticSegmentationFiftyOneInput(SemanticSegmentationFilesInput): 145 def load_data( 146 self, 147 sample_collection: SampleCollection, 148 label_field: str = "ground_truth", 149 num_classes: Optional[int] = None, 150 labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None, 151 ) -> List[Dict[str, Any]]: 152 self.load_labels_map(num_classes, labels_map) 153 154 self.label_field = label_field 155 label_utilities = FiftyOneLabelUtilities(label_field, fo.Segmentation) 156 label_utilities.validate(sample_collection) 157 158 self._fo_dataset_name = sample_collection.name 159 return to_samples(sample_collection.values("filepath")) 160 161 def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]: 162 filepath = sample[DataKeys.INPUT] 163 sample = super().load_sample(sample) 164 if not self.predicting: 165 fo_dataset = fo.load_dataset(self._fo_dataset_name) 166 fo_sample = fo_dataset[filepath] 167 sample[DataKeys.TARGET] = torch.from_numpy(fo_sample[self.label_field].mask).float() # H x W 168 return sample 169 170 171 class SemanticSegmentationDeserializer(ImageDeserializer): 172 def serve_load_sample(self, data: str) -> Dict[str, Any]: 173 result = super().serve_load_sample(data) 174 result[DataKeys.INPUT] = FT.to_tensor(result[DataKeys.INPUT]) 175 result[DataKeys.METADATA] = {"size": result[DataKeys.INPUT].shape[-2:]} 176 return result 177 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/flash/image/segmentation/input.py b/flash/image/segmentation/input.py --- a/flash/image/segmentation/input.py +++ b/flash/image/segmentation/input.py @@ -124,6 +124,7 @@ ) -> List[Dict[str, Any]]: self.load_labels_map(num_classes, labels_map) files = os.listdir(folder) + files.sort() if mask_folder is not None: mask_files = os.listdir(mask_folder) @@ -137,6 +138,8 @@ files = [os.path.join(folder, file) for file in all_files] mask_files = [os.path.join(mask_folder, file) for file in all_files] + files.sort() + mask_files.sort() return super().load_data(files, mask_files) return super().load_data(files)
{"golden_diff": "diff --git a/flash/image/segmentation/input.py b/flash/image/segmentation/input.py\n--- a/flash/image/segmentation/input.py\n+++ b/flash/image/segmentation/input.py\n@@ -124,6 +124,7 @@\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n files = os.listdir(folder)\n+ files.sort()\n if mask_folder is not None:\n mask_files = os.listdir(mask_folder)\n \n@@ -137,6 +138,8 @@\n \n files = [os.path.join(folder, file) for file in all_files]\n mask_files = [os.path.join(mask_folder, file) for file in all_files]\n+ files.sort()\n+ mask_files.sort()\n return super().load_data(files, mask_files)\n return super().load_data(files)\n", "issue": "Is there a way to seed experiments?\n## \u2753 Questions and Help\r\n\r\n#### What is your question?\r\nIs there a way to seed experiments? Attempts at using `seed_everything` from pytorch lightning do not appear to work (also with the the workers argument set to True).\r\n\r\n#### What's your environment?\r\n\r\n - OS: Linux\r\n - Packaging: pip\r\n - Version: 0.5.2\r\n\n", "before_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union\n\nimport torch\nfrom pytorch_lightning.utilities import rank_zero_warn\n\nfrom flash.core.data.io.input import DataKeys, ImageLabelsMap, Input\nfrom flash.core.data.utilities.paths import filter_valid_files, PATH_TYPE\nfrom flash.core.data.utilities.samples import to_samples\nfrom flash.core.data.utils import image_default_loader\nfrom flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _TORCHVISION_AVAILABLE, lazy_import\nfrom flash.image.data import ImageDeserializer, IMG_EXTENSIONS\nfrom flash.image.segmentation.output import SegmentationLabelsOutput\n\nSampleCollection = None\nif _FIFTYONE_AVAILABLE:\n fo = lazy_import(\"fiftyone\")\n if TYPE_CHECKING:\n from fiftyone.core.collections import SampleCollection\nelse:\n fo = None\n\nif _TORCHVISION_AVAILABLE:\n import torchvision\n import torchvision.transforms.functional as FT\n\n\nclass SemanticSegmentationInput(Input):\n def load_labels_map(\n self, num_classes: Optional[int] = None, labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None\n ) -> None:\n if num_classes is not None:\n self.num_classes = num_classes\n labels_map = labels_map or SegmentationLabelsOutput.create_random_labels_map(num_classes)\n\n if labels_map is not None:\n self.set_state(ImageLabelsMap(labels_map))\n self.labels_map = labels_map\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n sample[DataKeys.INPUT] = sample[DataKeys.INPUT].float()\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = sample[DataKeys.TARGET].float()\n sample[DataKeys.METADATA] = {\"size\": sample[DataKeys.INPUT].shape[-2:]}\n return sample\n\n\nclass SemanticSegmentationTensorInput(SemanticSegmentationInput):\n def load_data(\n self,\n tensor: Any,\n masks: Any = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n return to_samples(tensor, masks)\n\n\nclass SemanticSegmentationNumpyInput(SemanticSegmentationInput):\n def load_data(\n self,\n array: Any,\n masks: Any = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n return to_samples(array, masks)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n sample[DataKeys.INPUT] = torch.from_numpy(sample[DataKeys.INPUT])\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = torch.from_numpy(sample[DataKeys.TARGET])\n return super().load_sample(sample)\n\n\nclass SemanticSegmentationFilesInput(SemanticSegmentationInput):\n def load_data(\n self,\n files: Union[PATH_TYPE, List[PATH_TYPE]],\n mask_files: Optional[Union[PATH_TYPE, List[PATH_TYPE]]] = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n if mask_files is None:\n files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS)\n else:\n files, masks = filter_valid_files(files, mask_files, valid_extensions=IMG_EXTENSIONS)\n return to_samples(files, mask_files)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n filepath = sample[DataKeys.INPUT]\n sample[DataKeys.INPUT] = FT.to_tensor(image_default_loader(filepath))\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = torchvision.io.read_image(sample[DataKeys.TARGET])[0]\n sample = super().load_sample(sample)\n sample[DataKeys.METADATA][\"filepath\"] = filepath\n return sample\n\n\nclass SemanticSegmentationFolderInput(SemanticSegmentationFilesInput):\n def load_data(\n self,\n folder: PATH_TYPE,\n mask_folder: Optional[PATH_TYPE] = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n files = os.listdir(folder)\n if mask_folder is not None:\n mask_files = os.listdir(mask_folder)\n\n all_files = set(files).intersection(set(mask_files))\n if len(all_files) != len(files) or len(all_files) != len(mask_files):\n rank_zero_warn(\n f\"Found inconsistent files in input folder: {folder} and mask folder: {mask_folder}. Some files\"\n \" have been dropped.\",\n UserWarning,\n )\n\n files = [os.path.join(folder, file) for file in all_files]\n mask_files = [os.path.join(mask_folder, file) for file in all_files]\n return super().load_data(files, mask_files)\n return super().load_data(files)\n\n\nclass SemanticSegmentationFiftyOneInput(SemanticSegmentationFilesInput):\n def load_data(\n self,\n sample_collection: SampleCollection,\n label_field: str = \"ground_truth\",\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n\n self.label_field = label_field\n label_utilities = FiftyOneLabelUtilities(label_field, fo.Segmentation)\n label_utilities.validate(sample_collection)\n\n self._fo_dataset_name = sample_collection.name\n return to_samples(sample_collection.values(\"filepath\"))\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n filepath = sample[DataKeys.INPUT]\n sample = super().load_sample(sample)\n if not self.predicting:\n fo_dataset = fo.load_dataset(self._fo_dataset_name)\n fo_sample = fo_dataset[filepath]\n sample[DataKeys.TARGET] = torch.from_numpy(fo_sample[self.label_field].mask).float() # H x W\n return sample\n\n\nclass SemanticSegmentationDeserializer(ImageDeserializer):\n def serve_load_sample(self, data: str) -> Dict[str, Any]:\n result = super().serve_load_sample(data)\n result[DataKeys.INPUT] = FT.to_tensor(result[DataKeys.INPUT])\n result[DataKeys.METADATA] = {\"size\": result[DataKeys.INPUT].shape[-2:]}\n return result\n", "path": "flash/image/segmentation/input.py"}], "after_files": [{"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nfrom typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union\n\nimport torch\nfrom pytorch_lightning.utilities import rank_zero_warn\n\nfrom flash.core.data.io.input import DataKeys, ImageLabelsMap, Input\nfrom flash.core.data.utilities.paths import filter_valid_files, PATH_TYPE\nfrom flash.core.data.utilities.samples import to_samples\nfrom flash.core.data.utils import image_default_loader\nfrom flash.core.integrations.fiftyone.utils import FiftyOneLabelUtilities\nfrom flash.core.utilities.imports import _FIFTYONE_AVAILABLE, _TORCHVISION_AVAILABLE, lazy_import\nfrom flash.image.data import ImageDeserializer, IMG_EXTENSIONS\nfrom flash.image.segmentation.output import SegmentationLabelsOutput\n\nSampleCollection = None\nif _FIFTYONE_AVAILABLE:\n fo = lazy_import(\"fiftyone\")\n if TYPE_CHECKING:\n from fiftyone.core.collections import SampleCollection\nelse:\n fo = None\n\nif _TORCHVISION_AVAILABLE:\n import torchvision\n import torchvision.transforms.functional as FT\n\n\nclass SemanticSegmentationInput(Input):\n def load_labels_map(\n self, num_classes: Optional[int] = None, labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None\n ) -> None:\n if num_classes is not None:\n self.num_classes = num_classes\n labels_map = labels_map or SegmentationLabelsOutput.create_random_labels_map(num_classes)\n\n if labels_map is not None:\n self.set_state(ImageLabelsMap(labels_map))\n self.labels_map = labels_map\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n sample[DataKeys.INPUT] = sample[DataKeys.INPUT].float()\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = sample[DataKeys.TARGET].float()\n sample[DataKeys.METADATA] = {\"size\": sample[DataKeys.INPUT].shape[-2:]}\n return sample\n\n\nclass SemanticSegmentationTensorInput(SemanticSegmentationInput):\n def load_data(\n self,\n tensor: Any,\n masks: Any = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n return to_samples(tensor, masks)\n\n\nclass SemanticSegmentationNumpyInput(SemanticSegmentationInput):\n def load_data(\n self,\n array: Any,\n masks: Any = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n return to_samples(array, masks)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n sample[DataKeys.INPUT] = torch.from_numpy(sample[DataKeys.INPUT])\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = torch.from_numpy(sample[DataKeys.TARGET])\n return super().load_sample(sample)\n\n\nclass SemanticSegmentationFilesInput(SemanticSegmentationInput):\n def load_data(\n self,\n files: Union[PATH_TYPE, List[PATH_TYPE]],\n mask_files: Optional[Union[PATH_TYPE, List[PATH_TYPE]]] = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n if mask_files is None:\n files = filter_valid_files(files, valid_extensions=IMG_EXTENSIONS)\n else:\n files, masks = filter_valid_files(files, mask_files, valid_extensions=IMG_EXTENSIONS)\n return to_samples(files, mask_files)\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n filepath = sample[DataKeys.INPUT]\n sample[DataKeys.INPUT] = FT.to_tensor(image_default_loader(filepath))\n if DataKeys.TARGET in sample:\n sample[DataKeys.TARGET] = torchvision.io.read_image(sample[DataKeys.TARGET])[0]\n sample = super().load_sample(sample)\n sample[DataKeys.METADATA][\"filepath\"] = filepath\n return sample\n\n\nclass SemanticSegmentationFolderInput(SemanticSegmentationFilesInput):\n def load_data(\n self,\n folder: PATH_TYPE,\n mask_folder: Optional[PATH_TYPE] = None,\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n files = os.listdir(folder)\n files.sort()\n if mask_folder is not None:\n mask_files = os.listdir(mask_folder)\n\n all_files = set(files).intersection(set(mask_files))\n if len(all_files) != len(files) or len(all_files) != len(mask_files):\n rank_zero_warn(\n f\"Found inconsistent files in input folder: {folder} and mask folder: {mask_folder}. Some files\"\n \" have been dropped.\",\n UserWarning,\n )\n\n files = [os.path.join(folder, file) for file in all_files]\n mask_files = [os.path.join(mask_folder, file) for file in all_files]\n files.sort()\n mask_files.sort()\n return super().load_data(files, mask_files)\n return super().load_data(files)\n\n\nclass SemanticSegmentationFiftyOneInput(SemanticSegmentationFilesInput):\n def load_data(\n self,\n sample_collection: SampleCollection,\n label_field: str = \"ground_truth\",\n num_classes: Optional[int] = None,\n labels_map: Optional[Dict[int, Tuple[int, int, int]]] = None,\n ) -> List[Dict[str, Any]]:\n self.load_labels_map(num_classes, labels_map)\n\n self.label_field = label_field\n label_utilities = FiftyOneLabelUtilities(label_field, fo.Segmentation)\n label_utilities.validate(sample_collection)\n\n self._fo_dataset_name = sample_collection.name\n return to_samples(sample_collection.values(\"filepath\"))\n\n def load_sample(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n filepath = sample[DataKeys.INPUT]\n sample = super().load_sample(sample)\n if not self.predicting:\n fo_dataset = fo.load_dataset(self._fo_dataset_name)\n fo_sample = fo_dataset[filepath]\n sample[DataKeys.TARGET] = torch.from_numpy(fo_sample[self.label_field].mask).float() # H x W\n return sample\n\n\nclass SemanticSegmentationDeserializer(ImageDeserializer):\n def serve_load_sample(self, data: str) -> Dict[str, Any]:\n result = super().serve_load_sample(data)\n result[DataKeys.INPUT] = FT.to_tensor(result[DataKeys.INPUT])\n result[DataKeys.METADATA] = {\"size\": result[DataKeys.INPUT].shape[-2:]}\n return result\n", "path": "flash/image/segmentation/input.py"}]}
2,420
190
gh_patches_debug_52175
rasdani/github-patches
git_diff
microsoft__ptvsd-167
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error reading integer From VS (might not be a ptvsd bug, not sure at this point): Create new python application Add new item, python unit test Set the unit test as startup file F5 Result: ``` --------------------------- Microsoft Visual Studio --------------------------- Error reading integer. Unexpected token: Boolean. Path 'exitCode'. --------------------------- OK --------------------------- ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ptvsd/debugger.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. See LICENSE in the project root 3 # for license information. 4 5 import sys 6 7 8 __author__ = "Microsoft Corporation <[email protected]>" 9 __version__ = "4.0.0a1" 10 11 DONT_DEBUG = [] 12 13 14 def debug(filename, port_num, debug_id, debug_options, run_as): 15 # TODO: docstring 16 17 # import the wrapper first, so that it gets a chance 18 # to detour pydevd socket functionality. 19 import ptvsd.wrapper 20 import pydevd 21 22 args = [ 23 '--port', str(port_num), 24 '--client', '127.0.0.1', 25 ] 26 if run_as == 'module': 27 args.append('--module') 28 args.extend(('--file', filename + ":")) 29 else: 30 args.extend(('--file', filename)) 31 sys.argv[1:0] = args 32 try: 33 pydevd.main() 34 except SystemExit as ex: 35 ptvsd.wrapper.ptvsd_sys_exit_code = ex.code 36 raise 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py --- a/ptvsd/debugger.py +++ b/ptvsd/debugger.py @@ -32,5 +32,5 @@ try: pydevd.main() except SystemExit as ex: - ptvsd.wrapper.ptvsd_sys_exit_code = ex.code + ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code) raise
{"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -32,5 +32,5 @@\n try:\n pydevd.main()\n except SystemExit as ex:\n- ptvsd.wrapper.ptvsd_sys_exit_code = ex.code\n+ ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)\n raise\n", "issue": "Error reading integer\nFrom VS (might not be a ptvsd bug, not sure at this point):\r\nCreate new python application\r\nAdd new item, python unit test\r\nSet the unit test as startup file\r\nF5\r\n\r\nResult:\r\n```\r\n---------------------------\r\nMicrosoft Visual Studio\r\n---------------------------\r\nError reading integer. Unexpected token: Boolean. Path 'exitCode'.\r\n---------------------------\r\nOK \r\n---------------------------\r\n```\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a1\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n\n # import the wrapper first, so that it gets a chance\n # to detour pydevd socket functionality.\n import ptvsd.wrapper\n import pydevd\n\n args = [\n '--port', str(port_num),\n '--client', '127.0.0.1',\n ]\n if run_as == 'module':\n args.append('--module')\n args.extend(('--file', filename + \":\"))\n else:\n args.extend(('--file', filename))\n sys.argv[1:0] = args\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = ex.code\n raise\n", "path": "ptvsd/debugger.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\n\n__author__ = \"Microsoft Corporation <[email protected]>\"\n__version__ = \"4.0.0a1\"\n\nDONT_DEBUG = []\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as):\n # TODO: docstring\n\n # import the wrapper first, so that it gets a chance\n # to detour pydevd socket functionality.\n import ptvsd.wrapper\n import pydevd\n\n args = [\n '--port', str(port_num),\n '--client', '127.0.0.1',\n ]\n if run_as == 'module':\n args.append('--module')\n args.extend(('--file', filename + \":\"))\n else:\n args.extend(('--file', filename))\n sys.argv[1:0] = args\n try:\n pydevd.main()\n except SystemExit as ex:\n ptvsd.wrapper.ptvsd_sys_exit_code = int(ex.code)\n raise\n", "path": "ptvsd/debugger.py"}]}
654
104
gh_patches_debug_57163
rasdani/github-patches
git_diff
Bitmessage__PyBitmessage-2004
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Adding Protocol specification to docs (WIP) I am slowly formatting Protocol Specification doc. I see some typos and mistakes in the wiki, which I also hope to fix. [A quick preview](https://pybitmessage-test.readthedocs.io/en/doc/protocol.html) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 Configuration file for the Sphinx documentation builder. 4 5 For a full list of options see the documentation: 6 http://www.sphinx-doc.org/en/master/config 7 """ 8 9 import os 10 import sys 11 12 sys.path.insert(0, os.path.abspath('../src')) 13 14 from importlib import import_module 15 16 import version # noqa:E402 17 18 19 # -- Project information ----------------------------------------------------- 20 21 project = u'PyBitmessage' 22 copyright = u'2019, The Bitmessage Team' # pylint: disable=redefined-builtin 23 author = u'The Bitmessage Team' 24 25 # The short X.Y version 26 version = unicode(version.softwareVersion) 27 28 # The full version, including alpha/beta/rc tags 29 release = version 30 31 # -- General configuration --------------------------------------------------- 32 33 # If your documentation needs a minimal Sphinx version, state it here. 34 # 35 # needs_sphinx = '1.0' 36 37 # Add any Sphinx extension module names here, as strings. They can be 38 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 39 # ones. 40 extensions = [ 41 'sphinx.ext.autodoc', 42 'sphinx.ext.coverage', # FIXME: unused 43 'sphinx.ext.imgmath', # legacy unused 44 'sphinx.ext.intersphinx', 45 'sphinx.ext.linkcode', 46 'sphinx.ext.napoleon', 47 'sphinx.ext.todo', 48 'sphinxcontrib.apidoc', 49 'm2r', 50 ] 51 52 default_role = 'obj' 53 54 # Add any paths that contain templates here, relative to this directory. 55 templates_path = ['_templates'] 56 57 # The suffix(es) of source filenames. 58 # You can specify multiple suffix as a list of string: 59 # 60 source_suffix = ['.rst', '.md'] 61 62 # The master toctree document. 63 master_doc = 'index' 64 65 # The language for content autogenerated by Sphinx. Refer to documentation 66 # for a list of supported languages. 67 # 68 # This is also used if you do content translation via gettext catalogs. 69 # Usually you set "language" from the command line for these cases. 70 # language = None 71 72 # List of patterns, relative to source directory, that match files and 73 # directories to ignore when looking for source files. 74 # This pattern also affects html_static_path and html_extra_path . 75 exclude_patterns = ['_build'] 76 77 # The name of the Pygments (syntax highlighting) style to use. 78 pygments_style = 'sphinx' 79 80 # Don't prepend every class or function name with full module path 81 add_module_names = False 82 83 # A list of ignored prefixes for module index sorting. 84 modindex_common_prefix = ['pybitmessage.'] 85 86 87 # -- Options for HTML output ------------------------------------------------- 88 89 # The theme to use for HTML and HTML Help pages. See the documentation for 90 # a list of builtin themes. 91 # 92 html_theme = 'sphinx_rtd_theme' 93 94 # Theme options are theme-specific and customize the look and feel of a theme 95 # further. For a list of options available for each theme, see the 96 # documentation. 97 # 98 # html_theme_options = {} 99 100 # Add any paths that contain custom static files (such as style sheets) here, 101 # relative to this directory. They are copied after the builtin static files, 102 # so a file named "default.css" will overwrite the builtin "default.css". 103 html_static_path = ['_static'] 104 105 html_css_files = [ 106 'custom.css', 107 ] 108 109 # Custom sidebar templates, must be a dictionary that maps document names 110 # to template names. 111 # 112 # The default sidebars (for documents that don't match any pattern) are 113 # defined by theme itself. Builtin themes are using these templates by 114 # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', 115 # 'searchbox.html']``. 116 # 117 # html_sidebars = {} 118 119 html_show_sourcelink = False 120 121 # -- Options for HTMLHelp output --------------------------------------------- 122 123 # Output file base name for HTML help builder. 124 htmlhelp_basename = 'PyBitmessagedoc' 125 126 127 # -- Options for LaTeX output ------------------------------------------------ 128 129 latex_elements = { 130 # The paper size ('letterpaper' or 'a4paper'). 131 # 132 # 'papersize': 'letterpaper', 133 134 # The font size ('10pt', '11pt' or '12pt'). 135 # 136 # 'pointsize': '10pt', 137 138 # Additional stuff for the LaTeX preamble. 139 # 140 # 'preamble': '', 141 142 # Latex figure (float) alignment 143 # 144 # 'figure_align': 'htbp', 145 } 146 147 # Grouping the document tree into LaTeX files. List of tuples 148 # (source start file, target name, title, 149 # author, documentclass [howto, manual, or own class]). 150 latex_documents = [ 151 (master_doc, 'PyBitmessage.tex', u'PyBitmessage Documentation', 152 u'The Bitmessage Team', 'manual'), 153 ] 154 155 156 # -- Options for manual page output ------------------------------------------ 157 158 # One entry per manual page. List of tuples 159 # (source start file, name, description, authors, manual section). 160 man_pages = [ 161 (master_doc, 'pybitmessage', u'PyBitmessage Documentation', 162 [author], 1) 163 ] 164 165 166 # -- Options for Texinfo output ---------------------------------------------- 167 168 # Grouping the document tree into Texinfo files. List of tuples 169 # (source start file, target name, title, author, 170 # dir menu entry, description, category) 171 texinfo_documents = [ 172 (master_doc, 'PyBitmessage', u'PyBitmessage Documentation', 173 author, 'PyBitmessage', 'One line description of project.', 174 'Miscellaneous'), 175 ] 176 177 178 # -- Options for Epub output ------------------------------------------------- 179 180 # Bibliographic Dublin Core info. 181 epub_title = project 182 epub_author = author 183 epub_publisher = author 184 epub_copyright = copyright 185 186 # The unique identifier of the text. This can be a ISBN number 187 # or the project homepage. 188 # 189 # epub_identifier = '' 190 191 # A unique identification for the text. 192 # 193 # epub_uid = '' 194 195 # A list of files that should not be packed into the epub file. 196 epub_exclude_files = ['search.html'] 197 198 199 # -- Extension configuration ------------------------------------------------- 200 201 autodoc_mock_imports = [ 202 'debug', 203 'pybitmessage.bitmessagekivy', 204 'pybitmessage.bitmessageqt.foldertree', 205 'pybitmessage.helper_startup', 206 'pybitmessage.mock', 207 'pybitmessage.network.httpd', 208 'pybitmessage.network.https', 209 'ctypes', 210 'dialog', 211 'gi', 212 'kivy', 213 'logging', 214 'msgpack', 215 'numpy', 216 'pkg_resources', 217 'pycanberra', 218 'pyopencl', 219 'PyQt4', 220 'PyQt5', 221 'qrcode', 222 'stem', 223 'xdg', 224 ] 225 autodoc_member_order = 'bysource' 226 227 # Apidoc settings 228 apidoc_module_dir = '../pybitmessage' 229 apidoc_output_dir = 'autodoc' 230 apidoc_excluded_paths = [ 231 'bitmessagekivy', 'build_osx.py', 232 'bitmessageqt/addressvalidator.py', 'bitmessageqt/foldertree.py', 233 'bitmessageqt/migrationwizard.py', 'bitmessageqt/newaddresswizard.py', 234 'helper_startup.py', 235 'kivymd', 'mock', 'main.py', 'navigationdrawer', 'network/http*', 236 'src', 'tests', 'version.py' 237 ] 238 apidoc_module_first = True 239 apidoc_separate_modules = True 240 apidoc_toc_file = False 241 apidoc_extra_args = ['-a'] 242 243 # Napoleon settings 244 napoleon_google_docstring = True 245 246 247 # linkcode function 248 def linkcode_resolve(domain, info): 249 """This generates source URL's for sphinx.ext.linkcode""" 250 if domain != 'py' or not info['module']: 251 return 252 try: 253 home = os.path.abspath(import_module('pybitmessage').__path__[0]) 254 mod = import_module(info['module']).__file__ 255 except ImportError: 256 return 257 repo = 'https://github.com/Bitmessage/PyBitmessage/blob/v0.6/src%s' 258 path = mod.replace(home, '') 259 if path != mod: 260 # put the link only for top level definitions 261 if len(info['fullname'].split('.')) > 1: 262 return 263 if path.endswith('.pyc'): 264 path = path[:-1] 265 return repo % path 266 267 268 # -- Options for intersphinx extension --------------------------------------- 269 270 # Example configuration for intersphinx: refer to the Python standard library. 271 intersphinx_mapping = {'https://docs.python.org/2.7/': None} 272 273 # -- Options for todo extension ---------------------------------------------- 274 275 # If true, `todo` and `todoList` produce output, else they produce nothing. 276 todo_include_todos = True 277 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -19,7 +19,7 @@ # -- Project information ----------------------------------------------------- project = u'PyBitmessage' -copyright = u'2019, The Bitmessage Team' # pylint: disable=redefined-builtin +copyright = u'2019-2022, The Bitmessage Team' # pylint: disable=redefined-builtin author = u'The Bitmessage Team' # The short X.Y version
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -19,7 +19,7 @@\n # -- Project information -----------------------------------------------------\n \n project = u'PyBitmessage'\n-copyright = u'2019, The Bitmessage Team' # pylint: disable=redefined-builtin\n+copyright = u'2019-2022, The Bitmessage Team' # pylint: disable=redefined-builtin\n author = u'The Bitmessage Team'\n \n # The short X.Y version\n", "issue": "Adding Protocol specification to docs (WIP)\nI am slowly formatting Protocol Specification doc. I see some typos and mistakes in the wiki, which I also hope to fix.\r\n\r\n[A quick preview](https://pybitmessage-test.readthedocs.io/en/doc/protocol.html)\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nConfiguration file for the Sphinx documentation builder.\n\nFor a full list of options see the documentation:\nhttp://www.sphinx-doc.org/en/master/config\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('../src'))\n\nfrom importlib import import_module\n\nimport version # noqa:E402\n\n\n# -- Project information -----------------------------------------------------\n\nproject = u'PyBitmessage'\ncopyright = u'2019, The Bitmessage Team' # pylint: disable=redefined-builtin\nauthor = u'The Bitmessage Team'\n\n# The short X.Y version\nversion = unicode(version.softwareVersion)\n\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage', # FIXME: unused\n 'sphinx.ext.imgmath', # legacy unused\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.linkcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinxcontrib.apidoc',\n 'm2r',\n]\n\ndefault_role = 'obj'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\n# language = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Don't prepend every class or function name with full module path\nadd_module_names = False\n\n# A list of ignored prefixes for module index sorting.\nmodindex_common_prefix = ['pybitmessage.']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'custom.css',\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\nhtml_show_sourcelink = False\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'PyBitmessagedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'PyBitmessage.tex', u'PyBitmessage Documentation',\n u'The Bitmessage Team', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'pybitmessage', u'PyBitmessage Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'PyBitmessage', u'PyBitmessage Documentation',\n author, 'PyBitmessage', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n\n# -- Extension configuration -------------------------------------------------\n\nautodoc_mock_imports = [\n 'debug',\n 'pybitmessage.bitmessagekivy',\n 'pybitmessage.bitmessageqt.foldertree',\n 'pybitmessage.helper_startup',\n 'pybitmessage.mock',\n 'pybitmessage.network.httpd',\n 'pybitmessage.network.https',\n 'ctypes',\n 'dialog',\n 'gi',\n 'kivy',\n 'logging',\n 'msgpack',\n 'numpy',\n 'pkg_resources',\n 'pycanberra',\n 'pyopencl',\n 'PyQt4',\n 'PyQt5',\n 'qrcode',\n 'stem',\n 'xdg',\n]\nautodoc_member_order = 'bysource'\n\n# Apidoc settings\napidoc_module_dir = '../pybitmessage'\napidoc_output_dir = 'autodoc'\napidoc_excluded_paths = [\n 'bitmessagekivy', 'build_osx.py',\n 'bitmessageqt/addressvalidator.py', 'bitmessageqt/foldertree.py',\n 'bitmessageqt/migrationwizard.py', 'bitmessageqt/newaddresswizard.py',\n 'helper_startup.py',\n 'kivymd', 'mock', 'main.py', 'navigationdrawer', 'network/http*',\n 'src', 'tests', 'version.py'\n]\napidoc_module_first = True\napidoc_separate_modules = True\napidoc_toc_file = False\napidoc_extra_args = ['-a']\n\n# Napoleon settings\nnapoleon_google_docstring = True\n\n\n# linkcode function\ndef linkcode_resolve(domain, info):\n \"\"\"This generates source URL's for sphinx.ext.linkcode\"\"\"\n if domain != 'py' or not info['module']:\n return\n try:\n home = os.path.abspath(import_module('pybitmessage').__path__[0])\n mod = import_module(info['module']).__file__\n except ImportError:\n return\n repo = 'https://github.com/Bitmessage/PyBitmessage/blob/v0.6/src%s'\n path = mod.replace(home, '')\n if path != mod:\n # put the link only for top level definitions\n if len(info['fullname'].split('.')) > 1:\n return\n if path.endswith('.pyc'):\n path = path[:-1]\n return repo % path\n\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://docs.python.org/2.7/': None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nConfiguration file for the Sphinx documentation builder.\n\nFor a full list of options see the documentation:\nhttp://www.sphinx-doc.org/en/master/config\n\"\"\"\n\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath('../src'))\n\nfrom importlib import import_module\n\nimport version # noqa:E402\n\n\n# -- Project information -----------------------------------------------------\n\nproject = u'PyBitmessage'\ncopyright = u'2019-2022, The Bitmessage Team' # pylint: disable=redefined-builtin\nauthor = u'The Bitmessage Team'\n\n# The short X.Y version\nversion = unicode(version.softwareVersion)\n\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage', # FIXME: unused\n 'sphinx.ext.imgmath', # legacy unused\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.linkcode',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.todo',\n 'sphinxcontrib.apidoc',\n 'm2r',\n]\n\ndefault_role = 'obj'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\n# language = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# Don't prepend every class or function name with full module path\nadd_module_names = False\n\n# A list of ignored prefixes for module index sorting.\nmodindex_common_prefix = ['pybitmessage.']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_css_files = [\n 'custom.css',\n]\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# The default sidebars (for documents that don't match any pattern) are\n# defined by theme itself. Builtin themes are using these templates by\n# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',\n# 'searchbox.html']``.\n#\n# html_sidebars = {}\n\nhtml_show_sourcelink = False\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'PyBitmessagedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'PyBitmessage.tex', u'PyBitmessage Documentation',\n u'The Bitmessage Team', 'manual'),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'pybitmessage', u'PyBitmessage Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'PyBitmessage', u'PyBitmessage Documentation',\n author, 'PyBitmessage', 'One line description of project.',\n 'Miscellaneous'),\n]\n\n\n# -- Options for Epub output -------------------------------------------------\n\n# Bibliographic Dublin Core info.\nepub_title = project\nepub_author = author\nepub_publisher = author\nepub_copyright = copyright\n\n# The unique identifier of the text. This can be a ISBN number\n# or the project homepage.\n#\n# epub_identifier = ''\n\n# A unique identification for the text.\n#\n# epub_uid = ''\n\n# A list of files that should not be packed into the epub file.\nepub_exclude_files = ['search.html']\n\n\n# -- Extension configuration -------------------------------------------------\n\nautodoc_mock_imports = [\n 'debug',\n 'pybitmessage.bitmessagekivy',\n 'pybitmessage.bitmessageqt.foldertree',\n 'pybitmessage.helper_startup',\n 'pybitmessage.mock',\n 'pybitmessage.network.httpd',\n 'pybitmessage.network.https',\n 'ctypes',\n 'dialog',\n 'gi',\n 'kivy',\n 'logging',\n 'msgpack',\n 'numpy',\n 'pkg_resources',\n 'pycanberra',\n 'pyopencl',\n 'PyQt4',\n 'PyQt5',\n 'qrcode',\n 'stem',\n 'xdg',\n]\nautodoc_member_order = 'bysource'\n\n# Apidoc settings\napidoc_module_dir = '../pybitmessage'\napidoc_output_dir = 'autodoc'\napidoc_excluded_paths = [\n 'bitmessagekivy', 'build_osx.py',\n 'bitmessageqt/addressvalidator.py', 'bitmessageqt/foldertree.py',\n 'bitmessageqt/migrationwizard.py', 'bitmessageqt/newaddresswizard.py',\n 'helper_startup.py',\n 'kivymd', 'mock', 'main.py', 'navigationdrawer', 'network/http*',\n 'src', 'tests', 'version.py'\n]\napidoc_module_first = True\napidoc_separate_modules = True\napidoc_toc_file = False\napidoc_extra_args = ['-a']\n\n# Napoleon settings\nnapoleon_google_docstring = True\n\n\n# linkcode function\ndef linkcode_resolve(domain, info):\n \"\"\"This generates source URL's for sphinx.ext.linkcode\"\"\"\n if domain != 'py' or not info['module']:\n return\n try:\n home = os.path.abspath(import_module('pybitmessage').__path__[0])\n mod = import_module(info['module']).__file__\n except ImportError:\n return\n repo = 'https://github.com/Bitmessage/PyBitmessage/blob/v0.6/src%s'\n path = mod.replace(home, '')\n if path != mod:\n # put the link only for top level definitions\n if len(info['fullname'].split('.')) > 1:\n return\n if path.endswith('.pyc'):\n path = path[:-1]\n return repo % path\n\n\n# -- Options for intersphinx extension ---------------------------------------\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {'https://docs.python.org/2.7/': None}\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n", "path": "docs/conf.py"}]}
2,948
123
gh_patches_debug_3811
rasdani/github-patches
git_diff
openmc-dev__openmc-926
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Unable to run examples/python/pincell Hi, After generating the xml files and trying to `openmc` I get the following error: ``` Reading tallies XML file... ERROR: Two or more meshes use the same unique ID: 1 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/python/pincell/build-xml.py` Content: ``` 1 import openmc 2 3 ############################################################################### 4 # Simulation Input File Parameters 5 ############################################################################### 6 7 # OpenMC simulation parameters 8 batches = 100 9 inactive = 10 10 particles = 1000 11 12 13 ############################################################################### 14 # Exporting to OpenMC materials.xml file 15 ############################################################################### 16 17 18 # Instantiate some Materials and register the appropriate Nuclides 19 uo2 = openmc.Material(material_id=1, name='UO2 fuel at 2.4% wt enrichment') 20 uo2.set_density('g/cm3', 10.29769) 21 uo2.add_element('U', 1., enrichment=2.4) 22 uo2.add_element('O', 2.) 23 24 helium = openmc.Material(material_id=2, name='Helium for gap') 25 helium.set_density('g/cm3', 0.001598) 26 helium.add_element('He', 2.4044e-4) 27 28 zircaloy = openmc.Material(material_id=3, name='Zircaloy 4') 29 zircaloy.set_density('g/cm3', 6.55) 30 zircaloy.add_element('Sn', 0.014 , 'wo') 31 zircaloy.add_element('Fe', 0.00165, 'wo') 32 zircaloy.add_element('Cr', 0.001 , 'wo') 33 zircaloy.add_element('Zr', 0.98335, 'wo') 34 35 borated_water = openmc.Material(material_id=4, name='Borated water') 36 borated_water.set_density('g/cm3', 0.740582) 37 borated_water.add_element('B', 4.0e-5) 38 borated_water.add_element('H', 5.0e-2) 39 borated_water.add_element('O', 2.4e-2) 40 borated_water.add_s_alpha_beta('c_H_in_H2O') 41 42 # Instantiate a Materials collection and export to XML 43 materials_file = openmc.Materials([uo2, helium, zircaloy, borated_water]) 44 materials_file.export_to_xml() 45 46 47 ############################################################################### 48 # Exporting to OpenMC geometry.xml file 49 ############################################################################### 50 51 # Instantiate ZCylinder surfaces 52 fuel_or = openmc.ZCylinder(surface_id=1, x0=0, y0=0, R=0.39218, name='Fuel OR') 53 clad_ir = openmc.ZCylinder(surface_id=2, x0=0, y0=0, R=0.40005, name='Clad IR') 54 clad_or = openmc.ZCylinder(surface_id=3, x0=0, y0=0, R=0.45720, name='Clad OR') 55 left = openmc.XPlane(surface_id=4, x0=-0.62992, name='left') 56 right = openmc.XPlane(surface_id=5, x0=0.62992, name='right') 57 bottom = openmc.YPlane(surface_id=6, y0=-0.62992, name='bottom') 58 top = openmc.YPlane(surface_id=7, y0=0.62992, name='top') 59 60 left.boundary_type = 'reflective' 61 right.boundary_type = 'reflective' 62 top.boundary_type = 'reflective' 63 bottom.boundary_type = 'reflective' 64 65 # Instantiate Cells 66 fuel = openmc.Cell(cell_id=1, name='cell 1') 67 gap = openmc.Cell(cell_id=2, name='cell 2') 68 clad = openmc.Cell(cell_id=3, name='cell 3') 69 water = openmc.Cell(cell_id=4, name='cell 4') 70 71 # Use surface half-spaces to define regions 72 fuel.region = -fuel_or 73 gap.region = +fuel_or & -clad_ir 74 clad.region = +clad_ir & -clad_or 75 water.region = +clad_or & +left & -right & +bottom & -top 76 77 # Register Materials with Cells 78 fuel.fill = uo2 79 gap.fill = helium 80 clad.fill = zircaloy 81 water.fill = borated_water 82 83 # Instantiate Universe 84 root = openmc.Universe(universe_id=0, name='root universe') 85 86 # Register Cells with Universe 87 root.add_cells([fuel, gap, clad, water]) 88 89 # Instantiate a Geometry, register the root Universe, and export to XML 90 geometry = openmc.Geometry(root) 91 geometry.export_to_xml() 92 93 94 ############################################################################### 95 # Exporting to OpenMC settings.xml file 96 ############################################################################### 97 98 # Instantiate a Settings object, set all runtime parameters, and export to XML 99 settings_file = openmc.Settings() 100 settings_file.batches = batches 101 settings_file.inactive = inactive 102 settings_file.particles = particles 103 104 # Create an initial uniform spatial source distribution over fissionable zones 105 bounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1] 106 uniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True) 107 settings_file.source = openmc.source.Source(space=uniform_dist) 108 109 entropy_mesh = openmc.Mesh() 110 entropy_mesh.lower_left = [-0.39218, -0.39218, -1.e50] 111 entropy_mesh.upper_right = [0.39218, 0.39218, 1.e50] 112 entropy_mesh.dimension = [10, 10, 1] 113 settings_file.entropy_mesh = entropy_mesh 114 settings_file.export_to_xml() 115 116 117 ############################################################################### 118 # Exporting to OpenMC tallies.xml file 119 ############################################################################### 120 121 # Instantiate a tally mesh 122 mesh = openmc.Mesh(mesh_id=1) 123 mesh.type = 'regular' 124 mesh.dimension = [100, 100, 1] 125 mesh.lower_left = [-0.62992, -0.62992, -1.e50] 126 mesh.upper_right = [0.62992, 0.62992, 1.e50] 127 128 # Instantiate some tally Filters 129 energy_filter = openmc.EnergyFilter([0., 4., 20.e6]) 130 mesh_filter = openmc.MeshFilter(mesh) 131 132 # Instantiate the Tally 133 tally = openmc.Tally(tally_id=1, name='tally 1') 134 tally.filters = [energy_filter, mesh_filter] 135 tally.scores = ['flux', 'fission', 'nu-fission'] 136 137 # Instantiate a Tallies collection and export to XML 138 tallies_file = openmc.Tallies([tally]) 139 tallies_file.export_to_xml() 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/python/pincell/build-xml.py b/examples/python/pincell/build-xml.py --- a/examples/python/pincell/build-xml.py +++ b/examples/python/pincell/build-xml.py @@ -119,7 +119,7 @@ ############################################################################### # Instantiate a tally mesh -mesh = openmc.Mesh(mesh_id=1) +mesh = openmc.Mesh() mesh.type = 'regular' mesh.dimension = [100, 100, 1] mesh.lower_left = [-0.62992, -0.62992, -1.e50]
{"golden_diff": "diff --git a/examples/python/pincell/build-xml.py b/examples/python/pincell/build-xml.py\n--- a/examples/python/pincell/build-xml.py\n+++ b/examples/python/pincell/build-xml.py\n@@ -119,7 +119,7 @@\n ###############################################################################\n \n # Instantiate a tally mesh\n-mesh = openmc.Mesh(mesh_id=1)\n+mesh = openmc.Mesh()\n mesh.type = 'regular'\n mesh.dimension = [100, 100, 1]\n mesh.lower_left = [-0.62992, -0.62992, -1.e50]\n", "issue": "Unable to run examples/python/pincell\nHi,\r\n\r\nAfter generating the xml files and trying to `openmc` I get the following error:\r\n```\r\nReading tallies XML file...\r\nERROR: Two or more meshes use the same unique ID: 1\r\n```\n", "before_files": [{"content": "import openmc\n\n###############################################################################\n# Simulation Input File Parameters\n###############################################################################\n\n# OpenMC simulation parameters\nbatches = 100\ninactive = 10\nparticles = 1000\n\n\n###############################################################################\n# Exporting to OpenMC materials.xml file\n###############################################################################\n\n\n# Instantiate some Materials and register the appropriate Nuclides\nuo2 = openmc.Material(material_id=1, name='UO2 fuel at 2.4% wt enrichment')\nuo2.set_density('g/cm3', 10.29769)\nuo2.add_element('U', 1., enrichment=2.4)\nuo2.add_element('O', 2.)\n\nhelium = openmc.Material(material_id=2, name='Helium for gap')\nhelium.set_density('g/cm3', 0.001598)\nhelium.add_element('He', 2.4044e-4)\n\nzircaloy = openmc.Material(material_id=3, name='Zircaloy 4')\nzircaloy.set_density('g/cm3', 6.55)\nzircaloy.add_element('Sn', 0.014 , 'wo')\nzircaloy.add_element('Fe', 0.00165, 'wo')\nzircaloy.add_element('Cr', 0.001 , 'wo')\nzircaloy.add_element('Zr', 0.98335, 'wo')\n\nborated_water = openmc.Material(material_id=4, name='Borated water')\nborated_water.set_density('g/cm3', 0.740582)\nborated_water.add_element('B', 4.0e-5)\nborated_water.add_element('H', 5.0e-2)\nborated_water.add_element('O', 2.4e-2)\nborated_water.add_s_alpha_beta('c_H_in_H2O')\n\n# Instantiate a Materials collection and export to XML\nmaterials_file = openmc.Materials([uo2, helium, zircaloy, borated_water])\nmaterials_file.export_to_xml()\n\n\n###############################################################################\n# Exporting to OpenMC geometry.xml file\n###############################################################################\n\n# Instantiate ZCylinder surfaces\nfuel_or = openmc.ZCylinder(surface_id=1, x0=0, y0=0, R=0.39218, name='Fuel OR')\nclad_ir = openmc.ZCylinder(surface_id=2, x0=0, y0=0, R=0.40005, name='Clad IR')\nclad_or = openmc.ZCylinder(surface_id=3, x0=0, y0=0, R=0.45720, name='Clad OR')\nleft = openmc.XPlane(surface_id=4, x0=-0.62992, name='left')\nright = openmc.XPlane(surface_id=5, x0=0.62992, name='right')\nbottom = openmc.YPlane(surface_id=6, y0=-0.62992, name='bottom')\ntop = openmc.YPlane(surface_id=7, y0=0.62992, name='top')\n\nleft.boundary_type = 'reflective'\nright.boundary_type = 'reflective'\ntop.boundary_type = 'reflective'\nbottom.boundary_type = 'reflective'\n\n# Instantiate Cells\nfuel = openmc.Cell(cell_id=1, name='cell 1')\ngap = openmc.Cell(cell_id=2, name='cell 2')\nclad = openmc.Cell(cell_id=3, name='cell 3')\nwater = openmc.Cell(cell_id=4, name='cell 4')\n\n# Use surface half-spaces to define regions\nfuel.region = -fuel_or\ngap.region = +fuel_or & -clad_ir\nclad.region = +clad_ir & -clad_or\nwater.region = +clad_or & +left & -right & +bottom & -top\n\n# Register Materials with Cells\nfuel.fill = uo2\ngap.fill = helium\nclad.fill = zircaloy\nwater.fill = borated_water\n\n# Instantiate Universe\nroot = openmc.Universe(universe_id=0, name='root universe')\n\n# Register Cells with Universe\nroot.add_cells([fuel, gap, clad, water])\n\n# Instantiate a Geometry, register the root Universe, and export to XML\ngeometry = openmc.Geometry(root)\ngeometry.export_to_xml()\n\n\n###############################################################################\n# Exporting to OpenMC settings.xml file\n###############################################################################\n\n# Instantiate a Settings object, set all runtime parameters, and export to XML\nsettings_file = openmc.Settings()\nsettings_file.batches = batches\nsettings_file.inactive = inactive\nsettings_file.particles = particles\n\n# Create an initial uniform spatial source distribution over fissionable zones\nbounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]\nuniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)\nsettings_file.source = openmc.source.Source(space=uniform_dist)\n\nentropy_mesh = openmc.Mesh()\nentropy_mesh.lower_left = [-0.39218, -0.39218, -1.e50]\nentropy_mesh.upper_right = [0.39218, 0.39218, 1.e50]\nentropy_mesh.dimension = [10, 10, 1]\nsettings_file.entropy_mesh = entropy_mesh\nsettings_file.export_to_xml()\n\n\n###############################################################################\n# Exporting to OpenMC tallies.xml file\n###############################################################################\n\n# Instantiate a tally mesh\nmesh = openmc.Mesh(mesh_id=1)\nmesh.type = 'regular'\nmesh.dimension = [100, 100, 1]\nmesh.lower_left = [-0.62992, -0.62992, -1.e50]\nmesh.upper_right = [0.62992, 0.62992, 1.e50]\n\n# Instantiate some tally Filters\nenergy_filter = openmc.EnergyFilter([0., 4., 20.e6])\nmesh_filter = openmc.MeshFilter(mesh)\n\n# Instantiate the Tally\ntally = openmc.Tally(tally_id=1, name='tally 1')\ntally.filters = [energy_filter, mesh_filter]\ntally.scores = ['flux', 'fission', 'nu-fission']\n\n# Instantiate a Tallies collection and export to XML\ntallies_file = openmc.Tallies([tally])\ntallies_file.export_to_xml()\n", "path": "examples/python/pincell/build-xml.py"}], "after_files": [{"content": "import openmc\n\n###############################################################################\n# Simulation Input File Parameters\n###############################################################################\n\n# OpenMC simulation parameters\nbatches = 100\ninactive = 10\nparticles = 1000\n\n\n###############################################################################\n# Exporting to OpenMC materials.xml file\n###############################################################################\n\n\n# Instantiate some Materials and register the appropriate Nuclides\nuo2 = openmc.Material(material_id=1, name='UO2 fuel at 2.4% wt enrichment')\nuo2.set_density('g/cm3', 10.29769)\nuo2.add_element('U', 1., enrichment=2.4)\nuo2.add_element('O', 2.)\n\nhelium = openmc.Material(material_id=2, name='Helium for gap')\nhelium.set_density('g/cm3', 0.001598)\nhelium.add_element('He', 2.4044e-4)\n\nzircaloy = openmc.Material(material_id=3, name='Zircaloy 4')\nzircaloy.set_density('g/cm3', 6.55)\nzircaloy.add_element('Sn', 0.014 , 'wo')\nzircaloy.add_element('Fe', 0.00165, 'wo')\nzircaloy.add_element('Cr', 0.001 , 'wo')\nzircaloy.add_element('Zr', 0.98335, 'wo')\n\nborated_water = openmc.Material(material_id=4, name='Borated water')\nborated_water.set_density('g/cm3', 0.740582)\nborated_water.add_element('B', 4.0e-5)\nborated_water.add_element('H', 5.0e-2)\nborated_water.add_element('O', 2.4e-2)\nborated_water.add_s_alpha_beta('c_H_in_H2O')\n\n# Instantiate a Materials collection and export to XML\nmaterials_file = openmc.Materials([uo2, helium, zircaloy, borated_water])\nmaterials_file.export_to_xml()\n\n\n###############################################################################\n# Exporting to OpenMC geometry.xml file\n###############################################################################\n\n# Instantiate ZCylinder surfaces\nfuel_or = openmc.ZCylinder(surface_id=1, x0=0, y0=0, R=0.39218, name='Fuel OR')\nclad_ir = openmc.ZCylinder(surface_id=2, x0=0, y0=0, R=0.40005, name='Clad IR')\nclad_or = openmc.ZCylinder(surface_id=3, x0=0, y0=0, R=0.45720, name='Clad OR')\nleft = openmc.XPlane(surface_id=4, x0=-0.62992, name='left')\nright = openmc.XPlane(surface_id=5, x0=0.62992, name='right')\nbottom = openmc.YPlane(surface_id=6, y0=-0.62992, name='bottom')\ntop = openmc.YPlane(surface_id=7, y0=0.62992, name='top')\n\nleft.boundary_type = 'reflective'\nright.boundary_type = 'reflective'\ntop.boundary_type = 'reflective'\nbottom.boundary_type = 'reflective'\n\n# Instantiate Cells\nfuel = openmc.Cell(cell_id=1, name='cell 1')\ngap = openmc.Cell(cell_id=2, name='cell 2')\nclad = openmc.Cell(cell_id=3, name='cell 3')\nwater = openmc.Cell(cell_id=4, name='cell 4')\n\n# Use surface half-spaces to define regions\nfuel.region = -fuel_or\ngap.region = +fuel_or & -clad_ir\nclad.region = +clad_ir & -clad_or\nwater.region = +clad_or & +left & -right & +bottom & -top\n\n# Register Materials with Cells\nfuel.fill = uo2\ngap.fill = helium\nclad.fill = zircaloy\nwater.fill = borated_water\n\n# Instantiate Universe\nroot = openmc.Universe(universe_id=0, name='root universe')\n\n# Register Cells with Universe\nroot.add_cells([fuel, gap, clad, water])\n\n# Instantiate a Geometry, register the root Universe, and export to XML\ngeometry = openmc.Geometry(root)\ngeometry.export_to_xml()\n\n\n###############################################################################\n# Exporting to OpenMC settings.xml file\n###############################################################################\n\n# Instantiate a Settings object, set all runtime parameters, and export to XML\nsettings_file = openmc.Settings()\nsettings_file.batches = batches\nsettings_file.inactive = inactive\nsettings_file.particles = particles\n\n# Create an initial uniform spatial source distribution over fissionable zones\nbounds = [-0.62992, -0.62992, -1, 0.62992, 0.62992, 1]\nuniform_dist = openmc.stats.Box(bounds[:3], bounds[3:], only_fissionable=True)\nsettings_file.source = openmc.source.Source(space=uniform_dist)\n\nentropy_mesh = openmc.Mesh()\nentropy_mesh.lower_left = [-0.39218, -0.39218, -1.e50]\nentropy_mesh.upper_right = [0.39218, 0.39218, 1.e50]\nentropy_mesh.dimension = [10, 10, 1]\nsettings_file.entropy_mesh = entropy_mesh\nsettings_file.export_to_xml()\n\n\n###############################################################################\n# Exporting to OpenMC tallies.xml file\n###############################################################################\n\n# Instantiate a tally mesh\nmesh = openmc.Mesh()\nmesh.type = 'regular'\nmesh.dimension = [100, 100, 1]\nmesh.lower_left = [-0.62992, -0.62992, -1.e50]\nmesh.upper_right = [0.62992, 0.62992, 1.e50]\n\n# Instantiate some tally Filters\nenergy_filter = openmc.EnergyFilter([0., 4., 20.e6])\nmesh_filter = openmc.MeshFilter(mesh)\n\n# Instantiate the Tally\ntally = openmc.Tally(tally_id=1, name='tally 1')\ntally.filters = [energy_filter, mesh_filter]\ntally.scores = ['flux', 'fission', 'nu-fission']\n\n# Instantiate a Tallies collection and export to XML\ntallies_file = openmc.Tallies([tally])\ntallies_file.export_to_xml()\n", "path": "examples/python/pincell/build-xml.py"}]}
2,098
139
gh_patches_debug_7240
rasdani/github-patches
git_diff
ansible__awx-14489
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tower Settings of type on/off idempotency not working ### Please confirm the following - [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html). - [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates. - [X] I understand that AWX is open source software provided for free and that I might not receive a timely response. - [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.) ### Bug Summary When trying to set the setting named AWX_MOUNT_ISOLATED_PATHS_ON_K8S to true, no matter what I put in the playbook it is always marked as "changed: true", even if the setting is already true.. Sample Task: ``` - name: SET AWX EXPOSE HOST PATHS awx.awx.settings: name: "AWX_MOUNT_ISOLATED_PATHS_ON_K8S" value: true ``` When I change the playbook value and register the results of the task to review the results I get this: | Value In Playbook | Resulting Debug| | ------- | ------- | | true | ok: [awxlab] => {<br> "this_setting": {<br> "changed": true,<br> "failed": false,<br> "new_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": "True"<br> },<br> "old_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": true<br> },<br> "value": true<br> }<br>}| | True | ok: [awxlab] => {<br> "this_setting": {<br> "changed": true,<br> "failed": false,<br> "new_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": "True"<br> },<br> "old_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": true<br> },<br> "value": true<br> }<br>}| | "true" | ok: [awxlab] => {<br> "this_setting": {<br> "changed": true,<br> "failed": false,<br> "new_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": "true"<br> },<br> "old_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": true<br> },<br> "value": true<br> }<br>}| | "True" | ok: [awxlab] => {<br> "this_setting": {<br> "changed": true,<br> "failed": false,<br> "new_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": "True"<br> },<br> "old_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": true<br> },<br> "value": true<br> }<br>} | yes | ok: [awxlab] => {<br> "this_setting": {<br> "changed": true,<br> "failed": false,<br> "new_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": "True"<br> },<br> "old_values": {<br> "AWX_MOUNT_ISOLATED_PATHS_ON_K8S": true<br> },<br> "value": true<br> }<br>} The documentation says this: ```> AWX.AWX.SETTINGS ([...]collections/ansible_collections/awx/awx/plugins/modules/settings.py) Modify Automation Platform Controller settings. See https://www.ansible.com/tower for an overview. OPTIONS (= is mandatory): - value Value to be modified for given setting. **If given a non-string type, will make best effort to cast it to type API expects.** For better control over types, use the `settings' param instead. default: null type: str ``` This leads me to believe that the logic used to sanitize the input might be doing a little extra or the conversion of 'settings' parameters to/from name/value parameters. The documentation does show an example of how to use both the settings parameter and the name/value parameter. So, this may be lower priority but I wanted to get this somewhere that could be search by others who may be running into this issue. ### AWX version 23.1.0 ### Select the relevant components - [ ] UI - [ ] UI (tech preview) - [ ] API - [ ] Docs - [X] Collection - [ ] CLI - [ ] Other ### Installation method kubernetes ### Modifications no ### Ansible version 2.15.4 ### Operating system CentOS Stream release 9 ### Web browser Firefox ### Steps to reproduce Use a playbook that modifies a boolean setting. Use the awx.awx.setting module. Use the name/value parameters instead of the settings paremeter. Sample: ``` - name: SET AWX EXPOSE HOST PATHS awx.awx.settings: name: "AWX_MOUNT_ISOLATED_PATHS_ON_K8S" value: true register: this_setting - name: Debug this setting debug: var=this_setting ``` ### Expected results When the setting is already true, idempotency keeps the playbook from trying to update the setting again. ### Actual results Regardless if the setting is true or not, the playbook always updates the setting. ### Additional information _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `awx_collection/plugins/modules/settings.py` Content: ``` 1 #!/usr/bin/python 2 # coding: utf-8 -*- 3 4 # (c) 2018, Nikhil Jain <[email protected]> 5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 7 from __future__ import absolute_import, division, print_function 8 9 __metaclass__ = type 10 11 12 ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} 13 14 15 DOCUMENTATION = ''' 16 --- 17 module: settings 18 author: "Nikhil Jain (@jainnikhil30)" 19 short_description: Modify Automation Platform Controller settings. 20 description: 21 - Modify Automation Platform Controller settings. See 22 U(https://www.ansible.com/tower) for an overview. 23 options: 24 name: 25 description: 26 - Name of setting to modify 27 type: str 28 value: 29 description: 30 - Value to be modified for given setting. 31 - If given a non-string type, will make best effort to cast it to type API expects. 32 - For better control over types, use the C(settings) param instead. 33 type: str 34 settings: 35 description: 36 - A data structure to be sent into the settings endpoint 37 type: dict 38 requirements: 39 - pyyaml 40 extends_documentation_fragment: awx.awx.auth 41 ''' 42 43 EXAMPLES = ''' 44 - name: Set the value of AWX_ISOLATION_BASE_PATH 45 settings: 46 name: AWX_ISOLATION_BASE_PATH 47 value: "/tmp" 48 register: testing_settings 49 50 - name: Set the value of AWX_ISOLATION_SHOW_PATHS 51 settings: 52 name: "AWX_ISOLATION_SHOW_PATHS" 53 value: "'/var/lib/awx/projects/', '/tmp'" 54 register: testing_settings 55 56 - name: Set the LDAP Auth Bind Password 57 settings: 58 name: "AUTH_LDAP_BIND_PASSWORD" 59 value: "Password" 60 no_log: true 61 62 - name: Set all the LDAP Auth Bind Params 63 settings: 64 settings: 65 AUTH_LDAP_BIND_PASSWORD: "password" 66 AUTH_LDAP_USER_ATTR_MAP: 67 email: "mail" 68 first_name: "givenName" 69 last_name: "surname" 70 ''' 71 72 from ..module_utils.controller_api import ControllerAPIModule 73 74 try: 75 import yaml 76 77 HAS_YAML = True 78 except ImportError: 79 HAS_YAML = False 80 81 82 def coerce_type(module, value): 83 # If our value is already None we can just return directly 84 if value is None: 85 return value 86 87 yaml_ish = bool((value.startswith('{') and value.endswith('}')) or (value.startswith('[') and value.endswith(']'))) 88 if yaml_ish: 89 if not HAS_YAML: 90 module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'") 91 return yaml.safe_load(value) 92 elif value.lower in ('true', 'false', 't', 'f'): 93 return {'t': True, 'f': False}[value[0].lower()] 94 try: 95 return int(value) 96 except ValueError: 97 pass 98 return value 99 100 101 def main(): 102 # Any additional arguments that are not fields of the item can be added here 103 argument_spec = dict( 104 name=dict(), 105 value=dict(), 106 settings=dict(type='dict'), 107 ) 108 109 # Create a module for ourselves 110 module = ControllerAPIModule( 111 argument_spec=argument_spec, 112 required_one_of=[['name', 'settings']], 113 mutually_exclusive=[['name', 'settings']], 114 required_if=[['name', 'present', ['value']]], 115 ) 116 117 # Extract our parameters 118 name = module.params.get('name') 119 value = module.params.get('value') 120 new_settings = module.params.get('settings') 121 122 # If we were given a name/value pair we will just make settings out of that and proceed normally 123 if new_settings is None: 124 new_value = coerce_type(module, value) 125 126 new_settings = {name: new_value} 127 128 # Load the existing settings 129 existing_settings = module.get_endpoint('settings/all')['json'] 130 131 # Begin a json response 132 json_output = {'changed': False, 'old_values': {}, 'new_values': {}} 133 134 # Check any of the settings to see if anything needs to be updated 135 needs_update = False 136 for a_setting in new_settings: 137 if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]: 138 # At least one thing is different so we need to patch 139 needs_update = True 140 json_output['old_values'][a_setting] = existing_settings[a_setting] 141 json_output['new_values'][a_setting] = new_settings[a_setting] 142 143 if module._diff: 144 json_output['diff'] = {'before': json_output['old_values'], 'after': json_output['new_values']} 145 146 # If nothing needs an update we can simply exit with the response (as not changed) 147 if not needs_update: 148 module.exit_json(**json_output) 149 150 if module.check_mode and module._diff: 151 json_output['changed'] = True 152 module.exit_json(**json_output) 153 154 # Make the call to update the settings 155 response = module.patch_endpoint('settings/all', **{'data': new_settings}) 156 157 if response['status_code'] == 200: 158 # Set the changed response to True 159 json_output['changed'] = True 160 161 # To deal with the old style values we need to return 'value' in the response 162 new_values = {} 163 for a_setting in new_settings: 164 new_values[a_setting] = response['json'][a_setting] 165 166 # If we were using a name we will just add a value of a string, otherwise we will return an array in values 167 if name is not None: 168 json_output['value'] = new_values[name] 169 else: 170 json_output['values'] = new_values 171 172 module.exit_json(**json_output) 173 elif 'json' in response and '__all__' in response['json']: 174 module.fail_json(msg=response['json']['__all__']) 175 else: 176 module.fail_json(**{'msg': "Unable to update settings, see response", 'response': response}) 177 178 179 if __name__ == '__main__': 180 main() 181 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/awx_collection/plugins/modules/settings.py b/awx_collection/plugins/modules/settings.py --- a/awx_collection/plugins/modules/settings.py +++ b/awx_collection/plugins/modules/settings.py @@ -89,7 +89,7 @@ if not HAS_YAML: module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'") return yaml.safe_load(value) - elif value.lower in ('true', 'false', 't', 'f'): + elif value.lower() in ('true', 'false', 't', 'f'): return {'t': True, 'f': False}[value[0].lower()] try: return int(value)
{"golden_diff": "diff --git a/awx_collection/plugins/modules/settings.py b/awx_collection/plugins/modules/settings.py\n--- a/awx_collection/plugins/modules/settings.py\n+++ b/awx_collection/plugins/modules/settings.py\n@@ -89,7 +89,7 @@\n if not HAS_YAML:\n module.fail_json(msg=\"yaml is not installed, try 'pip install pyyaml'\")\n return yaml.safe_load(value)\n- elif value.lower in ('true', 'false', 't', 'f'):\n+ elif value.lower() in ('true', 'false', 't', 'f'):\n return {'t': True, 'f': False}[value[0].lower()]\n try:\n return int(value)\n", "issue": "Tower Settings of type on/off idempotency not working\n### Please confirm the following\n\n- [X] I agree to follow this project's [code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html).\n- [X] I have checked the [current issues](https://github.com/ansible/awx/issues) for duplicates.\n- [X] I understand that AWX is open source software provided for free and that I might not receive a timely response.\n- [X] I am **NOT** reporting a (potential) security vulnerability. (These should be emailed to `[email protected]` instead.)\n\n### Bug Summary\n\nWhen trying to set the setting named AWX_MOUNT_ISOLATED_PATHS_ON_K8S to true, no matter what I put in the playbook it is always marked as \"changed: true\", even if the setting is already true..\r\n\r\nSample Task:\r\n```\r\n - name: SET AWX EXPOSE HOST PATHS\r\n awx.awx.settings:\r\n name: \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\"\r\n value: true\r\n\r\n```\r\n\r\nWhen I change the playbook value and register the results of the task to review the results I get this:\r\n\r\n| Value In Playbook | Resulting Debug|\r\n| ------- | ------- |\r\n| true | ok: [awxlab] => {<br> \"this_setting\": {<br> \"changed\": true,<br> \"failed\": false,<br> \"new_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": \"True\"<br> },<br> \"old_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": true<br> },<br> \"value\": true<br> }<br>}|\r\n| True | ok: [awxlab] => {<br> \"this_setting\": {<br> \"changed\": true,<br> \"failed\": false,<br> \"new_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": \"True\"<br> },<br> \"old_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": true<br> },<br> \"value\": true<br> }<br>}|\r\n| \"true\" | ok: [awxlab] => {<br> \"this_setting\": {<br> \"changed\": true,<br> \"failed\": false,<br> \"new_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": \"true\"<br> },<br> \"old_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": true<br> },<br> \"value\": true<br> }<br>}|\r\n| \"True\" | ok: [awxlab] => {<br> \"this_setting\": {<br> \"changed\": true,<br> \"failed\": false,<br> \"new_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": \"True\"<br> },<br> \"old_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": true<br> },<br> \"value\": true<br> }<br>}\r\n| yes | ok: [awxlab] => {<br> \"this_setting\": {<br> \"changed\": true,<br> \"failed\": false,<br> \"new_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": \"True\"<br> },<br> \"old_values\": {<br> \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\": true<br> },<br> \"value\": true<br> }<br>}\r\n\r\n\r\nThe documentation says this:\r\n```> AWX.AWX.SETTINGS ([...]collections/ansible_collections/awx/awx/plugins/modules/settings.py)\r\n\r\n Modify Automation Platform Controller settings. See https://www.ansible.com/tower for an overview.\r\n\r\nOPTIONS (= is mandatory):\r\n- value\r\n Value to be modified for given setting.\r\n **If given a non-string type, will make best effort to cast it to type API expects.**\r\n For better control over types, use the `settings' param instead.\r\n default: null\r\n type: str\r\n```\r\n\r\nThis leads me to believe that the logic used to sanitize the input might be doing a little extra or the conversion of 'settings' parameters to/from name/value parameters.\r\n\r\nThe documentation does show an example of how to use both the settings parameter and the name/value parameter. So, this may be lower priority but I wanted to get this somewhere that could be search by others who may be running into this issue.\n\n### AWX version\n\n23.1.0\n\n### Select the relevant components\n\n- [ ] UI\n- [ ] UI (tech preview)\n- [ ] API\n- [ ] Docs\n- [X] Collection\n- [ ] CLI\n- [ ] Other\n\n### Installation method\n\nkubernetes\n\n### Modifications\n\nno\n\n### Ansible version\n\n2.15.4\n\n### Operating system\n\nCentOS Stream release 9\n\n### Web browser\n\nFirefox\n\n### Steps to reproduce\n\nUse a playbook that modifies a boolean setting.\r\nUse the awx.awx.setting module.\r\nUse the name/value parameters instead of the settings paremeter.\r\nSample:\r\n```\r\n - name: SET AWX EXPOSE HOST PATHS\r\n awx.awx.settings:\r\n name: \"AWX_MOUNT_ISOLATED_PATHS_ON_K8S\"\r\n value: true\r\n register: this_setting\r\n \r\n - name: Debug this setting\r\n debug: var=this_setting\r\n```\n\n### Expected results\n\nWhen the setting is already true, idempotency keeps the playbook from trying to update the setting again.\n\n### Actual results\n\nRegardless if the setting is true or not, the playbook always updates the setting.\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2018, Nikhil Jain <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: settings\nauthor: \"Nikhil Jain (@jainnikhil30)\"\nshort_description: Modify Automation Platform Controller settings.\ndescription:\n - Modify Automation Platform Controller settings. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - Name of setting to modify\n type: str\n value:\n description:\n - Value to be modified for given setting.\n - If given a non-string type, will make best effort to cast it to type API expects.\n - For better control over types, use the C(settings) param instead.\n type: str\n settings:\n description:\n - A data structure to be sent into the settings endpoint\n type: dict\nrequirements:\n - pyyaml\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Set the value of AWX_ISOLATION_BASE_PATH\n settings:\n name: AWX_ISOLATION_BASE_PATH\n value: \"/tmp\"\n register: testing_settings\n\n- name: Set the value of AWX_ISOLATION_SHOW_PATHS\n settings:\n name: \"AWX_ISOLATION_SHOW_PATHS\"\n value: \"'/var/lib/awx/projects/', '/tmp'\"\n register: testing_settings\n\n- name: Set the LDAP Auth Bind Password\n settings:\n name: \"AUTH_LDAP_BIND_PASSWORD\"\n value: \"Password\"\n no_log: true\n\n- name: Set all the LDAP Auth Bind Params\n settings:\n settings:\n AUTH_LDAP_BIND_PASSWORD: \"password\"\n AUTH_LDAP_USER_ATTR_MAP:\n email: \"mail\"\n first_name: \"givenName\"\n last_name: \"surname\"\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\n\ntry:\n import yaml\n\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\n\ndef coerce_type(module, value):\n # If our value is already None we can just return directly\n if value is None:\n return value\n\n yaml_ish = bool((value.startswith('{') and value.endswith('}')) or (value.startswith('[') and value.endswith(']')))\n if yaml_ish:\n if not HAS_YAML:\n module.fail_json(msg=\"yaml is not installed, try 'pip install pyyaml'\")\n return yaml.safe_load(value)\n elif value.lower in ('true', 'false', 't', 'f'):\n return {'t': True, 'f': False}[value[0].lower()]\n try:\n return int(value)\n except ValueError:\n pass\n return value\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(),\n value=dict(),\n settings=dict(type='dict'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(\n argument_spec=argument_spec,\n required_one_of=[['name', 'settings']],\n mutually_exclusive=[['name', 'settings']],\n required_if=[['name', 'present', ['value']]],\n )\n\n # Extract our parameters\n name = module.params.get('name')\n value = module.params.get('value')\n new_settings = module.params.get('settings')\n\n # If we were given a name/value pair we will just make settings out of that and proceed normally\n if new_settings is None:\n new_value = coerce_type(module, value)\n\n new_settings = {name: new_value}\n\n # Load the existing settings\n existing_settings = module.get_endpoint('settings/all')['json']\n\n # Begin a json response\n json_output = {'changed': False, 'old_values': {}, 'new_values': {}}\n\n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n for a_setting in new_settings:\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n json_output['old_values'][a_setting] = existing_settings[a_setting]\n json_output['new_values'][a_setting] = new_settings[a_setting]\n\n if module._diff:\n json_output['diff'] = {'before': json_output['old_values'], 'after': json_output['new_values']}\n\n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n module.exit_json(**json_output)\n\n if module.check_mode and module._diff:\n json_output['changed'] = True\n module.exit_json(**json_output)\n\n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n\n if response['status_code'] == 200:\n # Set the changed response to True\n json_output['changed'] = True\n\n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n for a_setting in new_settings:\n new_values[a_setting] = response['json'][a_setting]\n\n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n json_output['value'] = new_values[name]\n else:\n json_output['values'] = new_values\n\n module.exit_json(**json_output)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n module.fail_json(**{'msg': \"Unable to update settings, see response\", 'response': response})\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/settings.py"}], "after_files": [{"content": "#!/usr/bin/python\n# coding: utf-8 -*-\n\n# (c) 2018, Nikhil Jain <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n\n__metaclass__ = type\n\n\nANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'}\n\n\nDOCUMENTATION = '''\n---\nmodule: settings\nauthor: \"Nikhil Jain (@jainnikhil30)\"\nshort_description: Modify Automation Platform Controller settings.\ndescription:\n - Modify Automation Platform Controller settings. See\n U(https://www.ansible.com/tower) for an overview.\noptions:\n name:\n description:\n - Name of setting to modify\n type: str\n value:\n description:\n - Value to be modified for given setting.\n - If given a non-string type, will make best effort to cast it to type API expects.\n - For better control over types, use the C(settings) param instead.\n type: str\n settings:\n description:\n - A data structure to be sent into the settings endpoint\n type: dict\nrequirements:\n - pyyaml\nextends_documentation_fragment: awx.awx.auth\n'''\n\nEXAMPLES = '''\n- name: Set the value of AWX_ISOLATION_BASE_PATH\n settings:\n name: AWX_ISOLATION_BASE_PATH\n value: \"/tmp\"\n register: testing_settings\n\n- name: Set the value of AWX_ISOLATION_SHOW_PATHS\n settings:\n name: \"AWX_ISOLATION_SHOW_PATHS\"\n value: \"'/var/lib/awx/projects/', '/tmp'\"\n register: testing_settings\n\n- name: Set the LDAP Auth Bind Password\n settings:\n name: \"AUTH_LDAP_BIND_PASSWORD\"\n value: \"Password\"\n no_log: true\n\n- name: Set all the LDAP Auth Bind Params\n settings:\n settings:\n AUTH_LDAP_BIND_PASSWORD: \"password\"\n AUTH_LDAP_USER_ATTR_MAP:\n email: \"mail\"\n first_name: \"givenName\"\n last_name: \"surname\"\n'''\n\nfrom ..module_utils.controller_api import ControllerAPIModule\n\ntry:\n import yaml\n\n HAS_YAML = True\nexcept ImportError:\n HAS_YAML = False\n\n\ndef coerce_type(module, value):\n # If our value is already None we can just return directly\n if value is None:\n return value\n\n yaml_ish = bool((value.startswith('{') and value.endswith('}')) or (value.startswith('[') and value.endswith(']')))\n if yaml_ish:\n if not HAS_YAML:\n module.fail_json(msg=\"yaml is not installed, try 'pip install pyyaml'\")\n return yaml.safe_load(value)\n elif value.lower() in ('true', 'false', 't', 'f'):\n return {'t': True, 'f': False}[value[0].lower()]\n try:\n return int(value)\n except ValueError:\n pass\n return value\n\n\ndef main():\n # Any additional arguments that are not fields of the item can be added here\n argument_spec = dict(\n name=dict(),\n value=dict(),\n settings=dict(type='dict'),\n )\n\n # Create a module for ourselves\n module = ControllerAPIModule(\n argument_spec=argument_spec,\n required_one_of=[['name', 'settings']],\n mutually_exclusive=[['name', 'settings']],\n required_if=[['name', 'present', ['value']]],\n )\n\n # Extract our parameters\n name = module.params.get('name')\n value = module.params.get('value')\n new_settings = module.params.get('settings')\n\n # If we were given a name/value pair we will just make settings out of that and proceed normally\n if new_settings is None:\n new_value = coerce_type(module, value)\n\n new_settings = {name: new_value}\n\n # Load the existing settings\n existing_settings = module.get_endpoint('settings/all')['json']\n\n # Begin a json response\n json_output = {'changed': False, 'old_values': {}, 'new_values': {}}\n\n # Check any of the settings to see if anything needs to be updated\n needs_update = False\n for a_setting in new_settings:\n if a_setting not in existing_settings or existing_settings[a_setting] != new_settings[a_setting]:\n # At least one thing is different so we need to patch\n needs_update = True\n json_output['old_values'][a_setting] = existing_settings[a_setting]\n json_output['new_values'][a_setting] = new_settings[a_setting]\n\n if module._diff:\n json_output['diff'] = {'before': json_output['old_values'], 'after': json_output['new_values']}\n\n # If nothing needs an update we can simply exit with the response (as not changed)\n if not needs_update:\n module.exit_json(**json_output)\n\n if module.check_mode and module._diff:\n json_output['changed'] = True\n module.exit_json(**json_output)\n\n # Make the call to update the settings\n response = module.patch_endpoint('settings/all', **{'data': new_settings})\n\n if response['status_code'] == 200:\n # Set the changed response to True\n json_output['changed'] = True\n\n # To deal with the old style values we need to return 'value' in the response\n new_values = {}\n for a_setting in new_settings:\n new_values[a_setting] = response['json'][a_setting]\n\n # If we were using a name we will just add a value of a string, otherwise we will return an array in values\n if name is not None:\n json_output['value'] = new_values[name]\n else:\n json_output['values'] = new_values\n\n module.exit_json(**json_output)\n elif 'json' in response and '__all__' in response['json']:\n module.fail_json(msg=response['json']['__all__'])\n else:\n module.fail_json(**{'msg': \"Unable to update settings, see response\", 'response': response})\n\n\nif __name__ == '__main__':\n main()\n", "path": "awx_collection/plugins/modules/settings.py"}]}
3,395
151
gh_patches_debug_29809
rasdani/github-patches
git_diff
DataBiosphere__toil-3691
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add type hints to humanize.py Add type hints to `src/toil/lib/humanize.py` so it can be checked under mypy during linting. Refers to #3568. ┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-946) ┆Issue Number: TOIL-946 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/toil/lib/humanize.py` Content: ``` 1 # Used by cactus; now a wrapper and not used in Toil. 2 # TODO: Remove from cactus and then remove from Toil. 3 # See https://github.com/DataBiosphere/toil/pull/3529#discussion_r611735988 4 5 # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ 6 import logging 7 from typing import Optional, SupportsInt 8 from toil.lib.conversions import bytes2human as b2h, human2bytes as h2b 9 10 """ 11 Bytes-to-human / human-to-bytes converter. 12 Based on: http://goo.gl/kTQMs 13 Working with Python 2.x and 3.x. 14 15 Author: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com> 16 License: MIT 17 """ 18 19 logger = logging.getLogger(__name__) 20 21 22 def bytes2human(n: SupportsInt, fmt: Optional[str] = None, symbols: Optional[str] = None) -> str: 23 """ 24 Convert n bytes into a human readable string based on format. 25 symbols can be either "customary", "customary_ext", "iec" or "iec_ext", 26 see: http://goo.gl/kTQMs 27 """ 28 logger.warning('Deprecated toil method. Please use "toil.lib.conversions.bytes2human()" instead."') 29 return b2h(n) 30 31 32 def human2bytes(s): 33 """ 34 Attempts to guess the string format based on default symbols 35 set and return the corresponding bytes as an integer. 36 37 When unable to recognize the format ValueError is raised. 38 """ 39 logger.warning('Deprecated toil method. Please use "toil.lib.conversions.human2bytes()" instead."') 40 return h2b(s) 41 ``` Path: `contrib/admin/mypy-with-ignore.py` Content: ``` 1 #!/usr/bin/env python3 2 """ 3 Runs mypy and ignores files that do not yet have passing type hints. 4 5 Does not type check test files (any path including "src/toil/test"). 6 """ 7 import os 8 import subprocess 9 import sys 10 11 os.environ['MYPYPATH'] = 'contrib/typeshed' 12 pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa 13 sys.path.insert(0, pkg_root) # noqa 14 15 from src.toil.lib.resources import glob # type: ignore 16 17 18 def main(): 19 all_files_to_check = [] 20 for d in ['dashboard', 'docker', 'docs', 'src']: 21 all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d)) 22 23 # TODO: Remove these paths as typing is added and mypy conflicts are addressed 24 ignore_paths = [os.path.abspath(f) for f in [ 25 'docker/Dockerfile.py', 26 'docs/conf.py', 27 'docs/vendor/sphinxcontrib/fulltoc.py', 28 'docs/vendor/sphinxcontrib/__init__.py', 29 'src/toil/job.py', 30 'src/toil/leader.py', 31 'src/toil/common.py', 32 'src/toil/worker.py', 33 'src/toil/toilState.py', 34 'src/toil/__init__.py', 35 'src/toil/resource.py', 36 'src/toil/deferred.py', 37 'src/toil/version.py', 38 'src/toil/wdl/utils.py', 39 'src/toil/wdl/wdl_types.py', 40 'src/toil/wdl/wdl_synthesis.py', 41 'src/toil/wdl/wdl_analysis.py', 42 'src/toil/wdl/wdl_functions.py', 43 'src/toil/wdl/toilwdl.py', 44 'src/toil/wdl/versions/draft2.py', 45 'src/toil/wdl/versions/v1.py', 46 'src/toil/wdl/versions/dev.py', 47 'src/toil/provisioners/clusterScaler.py', 48 'src/toil/provisioners/abstractProvisioner.py', 49 'src/toil/provisioners/gceProvisioner.py', 50 'src/toil/provisioners/__init__.py', 51 'src/toil/provisioners/node.py', 52 'src/toil/provisioners/aws/boto2Context.py', 53 'src/toil/provisioners/aws/awsProvisioner.py', 54 'src/toil/provisioners/aws/__init__.py', 55 'src/toil/batchSystems/slurm.py', 56 'src/toil/batchSystems/gridengine.py', 57 'src/toil/batchSystems/singleMachine.py', 58 'src/toil/batchSystems/abstractBatchSystem.py', 59 'src/toil/batchSystems/parasol.py', 60 'src/toil/batchSystems/kubernetes.py', 61 'src/toil/batchSystems/torque.py', 62 'src/toil/batchSystems/options.py', 63 'src/toil/batchSystems/registry.py', 64 'src/toil/batchSystems/lsf.py', 65 'src/toil/batchSystems/__init__.py', 66 'src/toil/batchSystems/abstractGridEngineBatchSystem.py', 67 'src/toil/batchSystems/lsfHelper.py', 68 'src/toil/batchSystems/htcondor.py', 69 'src/toil/batchSystems/mesos/batchSystem.py', 70 'src/toil/batchSystems/mesos/executor.py', 71 'src/toil/batchSystems/mesos/conftest.py', 72 'src/toil/batchSystems/mesos/__init__.py', 73 'src/toil/batchSystems/mesos/test/__init__.py', 74 'src/toil/cwl/conftest.py', 75 'src/toil/cwl/__init__.py', 76 'src/toil/cwl/cwltoil.py', 77 'src/toil/fileStores/cachingFileStore.py', 78 'src/toil/fileStores/abstractFileStore.py', 79 'src/toil/fileStores/nonCachingFileStore.py', 80 'src/toil/fileStores/__init__.py', 81 'src/toil/jobStores/utils.py', 82 'src/toil/jobStores/conftest.py', 83 'src/toil/jobStores/fileJobStore.py', 84 'src/toil/jobStores/__init__.py', 85 'src/toil/jobStores/googleJobStore.py', 86 'src/toil/jobStores/aws/utils.py', 87 'src/toil/jobStores/aws/jobStore.py', 88 'src/toil/jobStores/aws/__init__.py', 89 'src/toil/utils/__init__.py', 90 'src/toil/lib/memoize.py', 91 'src/toil/lib/throttle.py', 92 'src/toil/lib/humanize.py', 93 'src/toil/lib/iterables.py', 94 'src/toil/lib/bioio.py', 95 'src/toil/lib/ec2.py', 96 'src/toil/lib/expando.py', 97 'src/toil/lib/exceptions.py', 98 'src/toil/lib/__init__.py', 99 'src/toil/lib/generatedEC2Lists.py', 100 'src/toil/lib/retry.py', 101 'src/toil/lib/objects.py', 102 'src/toil/lib/io.py', 103 'src/toil/lib/docker.py', 104 'src/toil/lib/encryption/_dummy.py', 105 'src/toil/lib/encryption/conftest.py', 106 'src/toil/lib/encryption/__init__.py', 107 'src/toil/lib/aws/utils.py', 108 'src/toil/lib/aws/__init__.py' 109 ]] 110 111 filtered_files_to_check = [] 112 for file_path in all_files_to_check: 113 if file_path not in ignore_paths and 'src/toil/test' not in file_path: 114 filtered_files_to_check.append(file_path) 115 # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default? 116 args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check 117 p = subprocess.run(args=args, stdout=subprocess.PIPE) 118 result = p.stdout.decode() 119 print(result) 120 if 'Success: no issues found' not in result: 121 exit(1) 122 123 124 if __name__ == '__main__': 125 main() 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py --- a/contrib/admin/mypy-with-ignore.py +++ b/contrib/admin/mypy-with-ignore.py @@ -89,7 +89,6 @@ 'src/toil/utils/__init__.py', 'src/toil/lib/memoize.py', 'src/toil/lib/throttle.py', - 'src/toil/lib/humanize.py', 'src/toil/lib/iterables.py', 'src/toil/lib/bioio.py', 'src/toil/lib/ec2.py', diff --git a/src/toil/lib/humanize.py b/src/toil/lib/humanize.py --- a/src/toil/lib/humanize.py +++ b/src/toil/lib/humanize.py @@ -4,7 +4,7 @@ # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/ import logging -from typing import Optional, SupportsInt +from typing import SupportsInt from toil.lib.conversions import bytes2human as b2h, human2bytes as h2b """ @@ -19,17 +19,15 @@ logger = logging.getLogger(__name__) -def bytes2human(n: SupportsInt, fmt: Optional[str] = None, symbols: Optional[str] = None) -> str: +def bytes2human(n: SupportsInt) -> str: """ - Convert n bytes into a human readable string based on format. - symbols can be either "customary", "customary_ext", "iec" or "iec_ext", - see: http://goo.gl/kTQMs + Convert n bytes into a human readable string. """ logger.warning('Deprecated toil method. Please use "toil.lib.conversions.bytes2human()" instead."') return b2h(n) -def human2bytes(s): +def human2bytes(s: str) -> int: """ Attempts to guess the string format based on default symbols set and return the corresponding bytes as an integer.
{"golden_diff": "diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py\n--- a/contrib/admin/mypy-with-ignore.py\n+++ b/contrib/admin/mypy-with-ignore.py\n@@ -89,7 +89,6 @@\n 'src/toil/utils/__init__.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n- 'src/toil/lib/humanize.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\ndiff --git a/src/toil/lib/humanize.py b/src/toil/lib/humanize.py\n--- a/src/toil/lib/humanize.py\n+++ b/src/toil/lib/humanize.py\n@@ -4,7 +4,7 @@\n \n # http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/\n import logging\n-from typing import Optional, SupportsInt\n+from typing import SupportsInt\n from toil.lib.conversions import bytes2human as b2h, human2bytes as h2b\n \n \"\"\"\n@@ -19,17 +19,15 @@\n logger = logging.getLogger(__name__)\n \n \n-def bytes2human(n: SupportsInt, fmt: Optional[str] = None, symbols: Optional[str] = None) -> str:\n+def bytes2human(n: SupportsInt) -> str:\n \"\"\"\n- Convert n bytes into a human readable string based on format.\n- symbols can be either \"customary\", \"customary_ext\", \"iec\" or \"iec_ext\",\n- see: http://goo.gl/kTQMs\n+ Convert n bytes into a human readable string.\n \"\"\"\n logger.warning('Deprecated toil method. Please use \"toil.lib.conversions.bytes2human()\" instead.\"')\n return b2h(n)\n \n \n-def human2bytes(s):\n+def human2bytes(s: str) -> int:\n \"\"\"\n Attempts to guess the string format based on default symbols\n set and return the corresponding bytes as an integer.\n", "issue": "Add type hints to humanize.py\nAdd type hints to `src/toil/lib/humanize.py` so it can be checked under mypy during linting.\n\nRefers to #3568.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-946)\n\u2506Issue Number: TOIL-946\n\n", "before_files": [{"content": "# Used by cactus; now a wrapper and not used in Toil.\n# TODO: Remove from cactus and then remove from Toil.\n# See https://github.com/DataBiosphere/toil/pull/3529#discussion_r611735988\n\n# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/\nimport logging\nfrom typing import Optional, SupportsInt\nfrom toil.lib.conversions import bytes2human as b2h, human2bytes as h2b\n\n\"\"\"\nBytes-to-human / human-to-bytes converter.\nBased on: http://goo.gl/kTQMs\nWorking with Python 2.x and 3.x.\n\nAuthor: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>\nLicense: MIT\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef bytes2human(n: SupportsInt, fmt: Optional[str] = None, symbols: Optional[str] = None) -> str:\n \"\"\"\n Convert n bytes into a human readable string based on format.\n symbols can be either \"customary\", \"customary_ext\", \"iec\" or \"iec_ext\",\n see: http://goo.gl/kTQMs\n \"\"\"\n logger.warning('Deprecated toil method. Please use \"toil.lib.conversions.bytes2human()\" instead.\"')\n return b2h(n)\n\n\ndef human2bytes(s):\n \"\"\"\n Attempts to guess the string format based on default symbols\n set and return the corresponding bytes as an integer.\n\n When unable to recognize the format ValueError is raised.\n \"\"\"\n logger.warning('Deprecated toil method. Please use \"toil.lib.conversions.human2bytes()\" instead.\"')\n return h2b(s)\n", "path": "src/toil/lib/humanize.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\nos.environ['MYPYPATH'] = 'contrib/typeshed'\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/common.py',\n 'src/toil/worker.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}], "after_files": [{"content": "# Used by cactus; now a wrapper and not used in Toil.\n# TODO: Remove from cactus and then remove from Toil.\n# See https://github.com/DataBiosphere/toil/pull/3529#discussion_r611735988\n\n# http://code.activestate.com/recipes/578019-bytes-to-human-human-to-bytes-converter/\nimport logging\nfrom typing import SupportsInt\nfrom toil.lib.conversions import bytes2human as b2h, human2bytes as h2b\n\n\"\"\"\nBytes-to-human / human-to-bytes converter.\nBased on: http://goo.gl/kTQMs\nWorking with Python 2.x and 3.x.\n\nAuthor: Giampaolo Rodola' <g.rodola [AT] gmail [DOT] com>\nLicense: MIT\n\"\"\"\n\nlogger = logging.getLogger(__name__)\n\n\ndef bytes2human(n: SupportsInt) -> str:\n \"\"\"\n Convert n bytes into a human readable string.\n \"\"\"\n logger.warning('Deprecated toil method. Please use \"toil.lib.conversions.bytes2human()\" instead.\"')\n return b2h(n)\n\n\ndef human2bytes(s: str) -> int:\n \"\"\"\n Attempts to guess the string format based on default symbols\n set and return the corresponding bytes as an integer.\n\n When unable to recognize the format ValueError is raised.\n \"\"\"\n logger.warning('Deprecated toil method. Please use \"toil.lib.conversions.human2bytes()\" instead.\"')\n return h2b(s)\n", "path": "src/toil/lib/humanize.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\nos.environ['MYPYPATH'] = 'contrib/typeshed'\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/common.py',\n 'src/toil/worker.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}]}
2,445
460
gh_patches_debug_17990
rasdani/github-patches
git_diff
rotki__rotki-3143
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrading DB from v26->v27 can fail if user balancer LP events stored in their DB ## Problem Definition A user who upgraded from v1.16.2 to v1.18.1 notified us that they saw a DB upgrade failure from v26->v27. Which means the app versions v1.17.2 to v1.18.0. Turns out that for specific user DBs who have had some Balancer LP events detected and had both the balancer events and the balancer pools DB table populated the DB upgrade would fail, since the upgrade deletes the balancer pools table first, hence possibly hitting a constraint. ## Workaround Workaround is rather easy. Download v1.17.0-v1.17.2, since that can open v26 DB, purge all uniswap and balancer data, and then open with v1.18.XX. ## Task Fix the upgrade so that this does not occur even for this special case of users. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rotkehlchen/db/upgrades/v26_v27.py` Content: ``` 1 from typing import TYPE_CHECKING 2 3 if TYPE_CHECKING: 4 from rotkehlchen.db.dbhandler import DBHandler 5 6 7 def upgrade_v26_to_v27(db: 'DBHandler') -> None: 8 """Upgrades the DB from v26 to v27 9 10 - Deletes and recreates the tables that were changed after removing UnknownEthereumToken 11 """ 12 cursor = db.conn.cursor() 13 cursor.execute('DROP TABLE IF EXISTS balancer_pools;') 14 15 cursor.execute('DROP TABLE IF EXISTS balancer_events;') 16 cursor.execute(""" 17 CREATE TABLE IF NOT EXISTS balancer_events ( 18 tx_hash VARCHAR[42] NOT NULL, 19 log_index INTEGER NOT NULL, 20 address VARCHAR[42] NOT NULL, 21 timestamp INTEGER NOT NULL, 22 type TEXT NOT NULL, 23 pool_address_token TEXT NOT NULL, 24 lp_amount TEXT NOT NULL, 25 usd_value TEXT NOT NULL, 26 amount0 TEXT NOT NULL, 27 amount1 TEXT NOT NULL, 28 amount2 TEXT, 29 amount3 TEXT, 30 amount4 TEXT, 31 amount5 TEXT, 32 amount6 TEXT, 33 amount7 TEXT, 34 FOREIGN KEY (pool_address_token) REFERENCES assets(identifier) ON UPDATE CASCADE, 35 PRIMARY KEY (tx_hash, log_index) 36 ); 37 """) 38 cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE "balancer_events%";') 39 40 cursor.execute('DROP TABLE IF EXISTS amm_swaps;') 41 cursor.execute(""" 42 CREATE TABLE IF NOT EXISTS amm_swaps ( 43 tx_hash VARCHAR[42] NOT NULL, 44 log_index INTEGER NOT NULL, 45 address VARCHAR[42] NOT NULL, 46 from_address VARCHAR[42] NOT NULL, 47 to_address VARCHAR[42] NOT NULL, 48 timestamp INTEGER NOT NULL, 49 location CHAR(1) NOT NULL DEFAULT('A') REFERENCES location(location), 50 token0_identifier TEXT NOT NULL, 51 token1_identifier TEXT NOT NULL, 52 amount0_in TEXT, 53 amount1_in TEXT, 54 amount0_out TEXT, 55 amount1_out TEXT, 56 FOREIGN KEY(token0_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE, 57 FOREIGN KEY(token1_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE, 58 PRIMARY KEY (tx_hash, log_index) 59 );""") 60 cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE "balancer_trades%";') 61 cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE "uniswap_trades%";') 62 63 cursor.execute('DROP TABLE IF EXISTS uniswap_events;') 64 cursor.execute(""" 65 CREATE TABLE IF NOT EXISTS uniswap_events ( 66 tx_hash VARCHAR[42] NOT NULL, 67 log_index INTEGER NOT NULL, 68 address VARCHAR[42] NOT NULL, 69 timestamp INTEGER NOT NULL, 70 type TEXT NOT NULL, 71 pool_address VARCHAR[42] NOT NULL, 72 token0_identifier TEXT NOT NULL, 73 token1_identifier TEXT NOT NULL, 74 amount0 TEXT, 75 amount1 TEXT, 76 usd_price TEXT, 77 lp_amount TEXT, 78 FOREIGN KEY(token0_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE, 79 FOREIGN KEY(token1_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE, 80 PRIMARY KEY (tx_hash, log_index) 81 );""") 82 cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE "uniswap_events%";') 83 84 db.conn.commit() 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rotkehlchen/db/upgrades/v26_v27.py b/rotkehlchen/db/upgrades/v26_v27.py --- a/rotkehlchen/db/upgrades/v26_v27.py +++ b/rotkehlchen/db/upgrades/v26_v27.py @@ -10,8 +10,6 @@ - Deletes and recreates the tables that were changed after removing UnknownEthereumToken """ cursor = db.conn.cursor() - cursor.execute('DROP TABLE IF EXISTS balancer_pools;') - cursor.execute('DROP TABLE IF EXISTS balancer_events;') cursor.execute(""" CREATE TABLE IF NOT EXISTS balancer_events ( @@ -35,6 +33,7 @@ PRIMARY KEY (tx_hash, log_index) ); """) + cursor.execute('DROP TABLE IF EXISTS balancer_pools;') cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE "balancer_events%";') cursor.execute('DROP TABLE IF EXISTS amm_swaps;')
{"golden_diff": "diff --git a/rotkehlchen/db/upgrades/v26_v27.py b/rotkehlchen/db/upgrades/v26_v27.py\n--- a/rotkehlchen/db/upgrades/v26_v27.py\n+++ b/rotkehlchen/db/upgrades/v26_v27.py\n@@ -10,8 +10,6 @@\n - Deletes and recreates the tables that were changed after removing UnknownEthereumToken\n \"\"\"\n cursor = db.conn.cursor()\n- cursor.execute('DROP TABLE IF EXISTS balancer_pools;')\n-\n cursor.execute('DROP TABLE IF EXISTS balancer_events;')\n cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS balancer_events (\n@@ -35,6 +33,7 @@\n PRIMARY KEY (tx_hash, log_index)\n );\n \"\"\")\n+ cursor.execute('DROP TABLE IF EXISTS balancer_pools;')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"balancer_events%\";')\n \n cursor.execute('DROP TABLE IF EXISTS amm_swaps;')\n", "issue": "Upgrading DB from v26->v27 can fail if user balancer LP events stored in their DB\n## Problem Definition\r\n\r\nA user who upgraded from v1.16.2 to v1.18.1 notified us that they saw a DB upgrade failure from v26->v27. Which means the app versions v1.17.2 to v1.18.0.\r\n\r\nTurns out that for specific user DBs who have had some Balancer LP events detected and had both the balancer events and the balancer pools DB table populated the DB upgrade would fail, since the upgrade deletes the balancer pools table first, hence possibly hitting a constraint.\r\n\r\n## Workaround\r\n\r\nWorkaround is rather easy. Download v1.17.0-v1.17.2, since that can open v26 DB, purge all uniswap and balancer data, and then open with v1.18.XX.\r\n\r\n## Task\r\n\r\nFix the upgrade so that this does not occur even for this special case of users.\n", "before_files": [{"content": "from typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\n\ndef upgrade_v26_to_v27(db: 'DBHandler') -> None:\n \"\"\"Upgrades the DB from v26 to v27\n\n - Deletes and recreates the tables that were changed after removing UnknownEthereumToken\n \"\"\"\n cursor = db.conn.cursor()\n cursor.execute('DROP TABLE IF EXISTS balancer_pools;')\n\n cursor.execute('DROP TABLE IF EXISTS balancer_events;')\n cursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS balancer_events (\n tx_hash VARCHAR[42] NOT NULL,\n log_index INTEGER NOT NULL,\n address VARCHAR[42] NOT NULL,\n timestamp INTEGER NOT NULL,\n type TEXT NOT NULL,\n pool_address_token TEXT NOT NULL,\n lp_amount TEXT NOT NULL,\n usd_value TEXT NOT NULL,\n amount0 TEXT NOT NULL,\n amount1 TEXT NOT NULL,\n amount2 TEXT,\n amount3 TEXT,\n amount4 TEXT,\n amount5 TEXT,\n amount6 TEXT,\n amount7 TEXT,\n FOREIGN KEY (pool_address_token) REFERENCES assets(identifier) ON UPDATE CASCADE,\n PRIMARY KEY (tx_hash, log_index)\n);\n\"\"\")\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"balancer_events%\";')\n\n cursor.execute('DROP TABLE IF EXISTS amm_swaps;')\n cursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS amm_swaps (\n tx_hash VARCHAR[42] NOT NULL,\n log_index INTEGER NOT NULL,\n address VARCHAR[42] NOT NULL,\n from_address VARCHAR[42] NOT NULL,\n to_address VARCHAR[42] NOT NULL,\n timestamp INTEGER NOT NULL,\n location CHAR(1) NOT NULL DEFAULT('A') REFERENCES location(location),\n token0_identifier TEXT NOT NULL,\n token1_identifier TEXT NOT NULL,\n amount0_in TEXT,\n amount1_in TEXT,\n amount0_out TEXT,\n amount1_out TEXT,\n FOREIGN KEY(token0_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE,\n FOREIGN KEY(token1_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE,\n PRIMARY KEY (tx_hash, log_index)\n);\"\"\")\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"balancer_trades%\";')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"uniswap_trades%\";')\n\n cursor.execute('DROP TABLE IF EXISTS uniswap_events;')\n cursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS uniswap_events (\n tx_hash VARCHAR[42] NOT NULL,\n log_index INTEGER NOT NULL,\n address VARCHAR[42] NOT NULL,\n timestamp INTEGER NOT NULL,\n type TEXT NOT NULL,\n pool_address VARCHAR[42] NOT NULL,\n token0_identifier TEXT NOT NULL,\n token1_identifier TEXT NOT NULL,\n amount0 TEXT,\n amount1 TEXT,\n usd_price TEXT,\n lp_amount TEXT,\n FOREIGN KEY(token0_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE,\n FOREIGN KEY(token1_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE,\n PRIMARY KEY (tx_hash, log_index)\n);\"\"\")\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"uniswap_events%\";')\n\n db.conn.commit()\n", "path": "rotkehlchen/db/upgrades/v26_v27.py"}], "after_files": [{"content": "from typing import TYPE_CHECKING\n\nif TYPE_CHECKING:\n from rotkehlchen.db.dbhandler import DBHandler\n\n\ndef upgrade_v26_to_v27(db: 'DBHandler') -> None:\n \"\"\"Upgrades the DB from v26 to v27\n\n - Deletes and recreates the tables that were changed after removing UnknownEthereumToken\n \"\"\"\n cursor = db.conn.cursor()\n cursor.execute('DROP TABLE IF EXISTS balancer_events;')\n cursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS balancer_events (\n tx_hash VARCHAR[42] NOT NULL,\n log_index INTEGER NOT NULL,\n address VARCHAR[42] NOT NULL,\n timestamp INTEGER NOT NULL,\n type TEXT NOT NULL,\n pool_address_token TEXT NOT NULL,\n lp_amount TEXT NOT NULL,\n usd_value TEXT NOT NULL,\n amount0 TEXT NOT NULL,\n amount1 TEXT NOT NULL,\n amount2 TEXT,\n amount3 TEXT,\n amount4 TEXT,\n amount5 TEXT,\n amount6 TEXT,\n amount7 TEXT,\n FOREIGN KEY (pool_address_token) REFERENCES assets(identifier) ON UPDATE CASCADE,\n PRIMARY KEY (tx_hash, log_index)\n);\n\"\"\")\n cursor.execute('DROP TABLE IF EXISTS balancer_pools;')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"balancer_events%\";')\n\n cursor.execute('DROP TABLE IF EXISTS amm_swaps;')\n cursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS amm_swaps (\n tx_hash VARCHAR[42] NOT NULL,\n log_index INTEGER NOT NULL,\n address VARCHAR[42] NOT NULL,\n from_address VARCHAR[42] NOT NULL,\n to_address VARCHAR[42] NOT NULL,\n timestamp INTEGER NOT NULL,\n location CHAR(1) NOT NULL DEFAULT('A') REFERENCES location(location),\n token0_identifier TEXT NOT NULL,\n token1_identifier TEXT NOT NULL,\n amount0_in TEXT,\n amount1_in TEXT,\n amount0_out TEXT,\n amount1_out TEXT,\n FOREIGN KEY(token0_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE,\n FOREIGN KEY(token1_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE,\n PRIMARY KEY (tx_hash, log_index)\n);\"\"\")\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"balancer_trades%\";')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"uniswap_trades%\";')\n\n cursor.execute('DROP TABLE IF EXISTS uniswap_events;')\n cursor.execute(\"\"\"\nCREATE TABLE IF NOT EXISTS uniswap_events (\n tx_hash VARCHAR[42] NOT NULL,\n log_index INTEGER NOT NULL,\n address VARCHAR[42] NOT NULL,\n timestamp INTEGER NOT NULL,\n type TEXT NOT NULL,\n pool_address VARCHAR[42] NOT NULL,\n token0_identifier TEXT NOT NULL,\n token1_identifier TEXT NOT NULL,\n amount0 TEXT,\n amount1 TEXT,\n usd_price TEXT,\n lp_amount TEXT,\n FOREIGN KEY(token0_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE,\n FOREIGN KEY(token1_identifier) REFERENCES assets(identifier) ON UPDATE CASCADE,\n PRIMARY KEY (tx_hash, log_index)\n);\"\"\")\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"uniswap_events%\";')\n\n db.conn.commit()\n", "path": "rotkehlchen/db/upgrades/v26_v27.py"}]}
1,344
228
gh_patches_debug_27803
rasdani/github-patches
git_diff
facebookresearch__hydra-614
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Option to access hydra configs from interpolation The hydra config node is removed from the tree before the program stats, which makes it impossible to rely on it at runtime. The reason it's removed is that otherwise the user config would be very large even for a simple program. That config however is still available at runtime through the HydraConfig singleston. The idea here is to simply register a resolver function by the name of hydra, which would provide access to hydra config. User usage would look like: ```yaml gpu_id: ${hydra:job.num} ``` This can be useful for accessing everything inside the hydra config, including parameters only available at runtime. In addition, provide an option to call to_absolute_dir from interpolations. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hydra/core/utils.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 import copy 3 import logging 4 import os 5 import re 6 import sys 7 import warnings 8 from contextlib import contextmanager 9 from dataclasses import dataclass 10 from os.path import basename, dirname, splitext 11 from pathlib import Path 12 from time import localtime, strftime 13 from typing import Any, Dict, Optional, Sequence, Tuple, Union 14 15 from omegaconf import DictConfig, OmegaConf, open_dict 16 17 from hydra.core.hydra_config import HydraConfig 18 from hydra.core.singleton import Singleton 19 from hydra.types import TaskFunction 20 21 log = logging.getLogger(__name__) 22 23 24 def configure_log( 25 log_config: DictConfig, verbose_config: Union[bool, str, Sequence[str]] 26 ) -> None: 27 assert isinstance(verbose_config, (bool, str)) or OmegaConf.is_list(verbose_config) 28 if log_config is not None: 29 conf: Dict[str, Any] = OmegaConf.to_container( # type: ignore 30 log_config, resolve=True 31 ) 32 logging.config.dictConfig(conf) 33 else: 34 # default logging to stdout 35 root = logging.getLogger() 36 root.setLevel(logging.INFO) 37 handler = logging.StreamHandler(sys.stdout) 38 formatter = logging.Formatter( 39 "[%(asctime)s][%(name)s][%(levelname)s] - %(message)s" 40 ) 41 handler.setFormatter(formatter) 42 root.addHandler(handler) 43 if isinstance(verbose_config, bool): 44 if verbose_config: 45 logging.getLogger().setLevel(logging.DEBUG) 46 else: 47 if isinstance(verbose_config, str): 48 verbose_list = OmegaConf.create([verbose_config]) 49 elif OmegaConf.is_list(verbose_config): 50 verbose_list = verbose_config # type: ignore 51 else: 52 assert False 53 54 for logger in verbose_list: 55 logging.getLogger(logger).setLevel(logging.DEBUG) 56 57 58 def _save_config(cfg: DictConfig, filename: str, output_dir: Path) -> None: 59 output_dir.mkdir(parents=True, exist_ok=True) 60 with open(str(output_dir / filename), "w") as file: 61 file.write(cfg.pretty()) 62 63 64 def filter_overrides(overrides: Sequence[str]) -> Sequence[str]: 65 """ 66 :param overrides: overrides list 67 :return: returning a new overrides list with all the keys starting with hydra. filtered. 68 """ 69 return [x for x in overrides if not x.startswith("hydra.")] 70 71 72 def run_job( 73 config: DictConfig, 74 task_function: TaskFunction, 75 job_dir_key: str, 76 job_subdir_key: Optional[str], 77 ) -> "JobReturn": 78 old_cwd = os.getcwd() 79 working_dir = str(OmegaConf.select(config, job_dir_key)) 80 if job_subdir_key is not None: 81 # evaluate job_subdir_key lazily. 82 # this is running on the client side in sweep and contains things such as job:id which 83 # are only available there. 84 subdir = str(OmegaConf.select(config, job_subdir_key)) 85 working_dir = os.path.join(working_dir, subdir) 86 try: 87 ret = JobReturn() 88 ret.working_dir = working_dir 89 task_cfg = copy.deepcopy(config) 90 with open_dict(task_cfg): 91 del task_cfg["hydra"] 92 ret.cfg = task_cfg 93 ret.hydra_cfg = OmegaConf.create({"hydra": HydraConfig.get()}) 94 overrides = OmegaConf.to_container(config.hydra.overrides.task) 95 assert isinstance(overrides, list) 96 ret.overrides = overrides 97 # handle output directories here 98 Path(str(working_dir)).mkdir(parents=True, exist_ok=True) 99 os.chdir(working_dir) 100 hydra_output = Path(config.hydra.output_subdir) 101 102 configure_log(config.hydra.job_logging, config.hydra.verbose) 103 104 hydra_cfg = OmegaConf.masked_copy(config, "hydra") 105 assert isinstance(hydra_cfg, DictConfig) 106 107 _save_config(task_cfg, "config.yaml", hydra_output) 108 _save_config(hydra_cfg, "hydra.yaml", hydra_output) 109 _save_config(config.hydra.overrides.task, "overrides.yaml", hydra_output) 110 with env_override(hydra_cfg.hydra.job.env_set): 111 ret.return_value = task_function(task_cfg) 112 ret.task_name = JobRuntime.instance().get("name") 113 114 # shut down logging to ensure job log files are closed. 115 # If logging is still required after run_job caller is responsible to re-initialize it. 116 logging.shutdown() 117 118 return ret 119 finally: 120 os.chdir(old_cwd) 121 122 123 def get_valid_filename(s: str) -> str: 124 s = str(s).strip().replace(" ", "_") 125 return re.sub(r"(?u)[^-\w.]", "", s) 126 127 128 def setup_globals() -> None: 129 try: 130 OmegaConf.register_resolver( 131 "now", lambda pattern: strftime(pattern, localtime()) 132 ) 133 except AssertionError: 134 # calling it again in no_workers mode will throw. safe to ignore. 135 pass 136 137 138 @dataclass 139 class JobReturn: 140 overrides: Optional[Sequence[str]] = None 141 return_value: Any = None 142 cfg: Optional[DictConfig] = None 143 hydra_cfg: Optional[DictConfig] = None 144 working_dir: Optional[str] = None 145 task_name: Optional[str] = None 146 147 148 class JobRuntime(metaclass=Singleton): 149 def __init__(self) -> None: 150 self.conf: DictConfig = OmegaConf.create() 151 self.set("name", "UNKNOWN_NAME") 152 153 def get(self, key: str) -> Any: 154 ret = OmegaConf.select(self.conf, key) 155 if ret is None: 156 raise KeyError(f"Key not found in {type(self).__name__}: {key}") 157 return ret 158 159 def set(self, key: str, value: Any) -> None: 160 log.debug(f"Setting {type(self).__name__}:{key}={value}") 161 self.conf[key] = value 162 163 164 def split_config_path( 165 config_path: Optional[str], config_name: Optional[str] 166 ) -> Tuple[Optional[str], Optional[str]]: 167 if config_path is None or config_path == "": 168 return None, config_name 169 split_file = splitext(config_path) 170 if split_file[1] in (".yaml", ".yml"): 171 # assuming dir/config.yaml form 172 config_file: Optional[str] = basename(config_path) 173 config_dir: Optional[str] = dirname(config_path) 174 msg = ( 175 "\nUsing config_path to specify the config name is deprecated, specify the config name via config_name" 176 "\nSee https://hydra.cc/next/upgrades/0.11_to_1.0/config_path_changes" 177 ) 178 warnings.warn( 179 category=UserWarning, message=msg, 180 ) 181 else: 182 # assuming dir form without a config file. 183 config_file = None 184 config_dir = config_path 185 186 if config_dir == "": 187 config_dir = None 188 189 if config_file == "": 190 config_file = None 191 192 if config_file is not None: 193 if config_name is not None: 194 raise ValueError( 195 "Config name should be specified in either normalized_config_path or config_name, but not both" 196 ) 197 config_name = config_file 198 199 return config_dir, config_name 200 201 202 @contextmanager 203 def env_override(env: Dict[str, str]) -> Any: 204 """Temporarily set environment variables inside the context manager and 205 fully restore previous environment afterwards 206 """ 207 original_env = {key: os.getenv(key) for key in env} 208 os.environ.update(env) 209 try: 210 yield 211 finally: 212 for key, value in original_env.items(): 213 if value is None: 214 del os.environ[key] 215 else: 216 os.environ[key] = value 217 ``` Path: `hydra/core/hydra_config.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved 2 from typing import Any, Optional 3 4 from omegaconf import DictConfig, OmegaConf 5 6 from hydra.conf import HydraConf 7 from hydra.core.singleton import Singleton 8 9 10 class HydraConfig(metaclass=Singleton): 11 def __init__(self) -> None: 12 self.cfg: Optional[HydraConf] = None 13 14 def set_config(self, cfg: DictConfig) -> None: 15 assert cfg is not None 16 OmegaConf.set_readonly(cfg.hydra, True) 17 assert OmegaConf.get_type(cfg, "hydra") == HydraConf 18 self.cfg = cfg # type: ignore 19 20 @staticmethod 21 def get() -> HydraConf: 22 instance = HydraConfig.instance() 23 if instance.cfg is None: 24 raise ValueError("HydraConfig was not set") 25 return instance.cfg.hydra # type: ignore 26 27 @staticmethod 28 def initialized() -> bool: 29 instance = HydraConfig.instance() 30 return instance.cfg is not None 31 32 @staticmethod 33 def instance(*args: Any, **kwargs: Any) -> "HydraConfig": 34 return Singleton.instance(HydraConfig, *args, **kwargs) # type: ignore 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hydra/core/hydra_config.py b/hydra/core/hydra_config.py --- a/hydra/core/hydra_config.py +++ b/hydra/core/hydra_config.py @@ -15,7 +15,7 @@ assert cfg is not None OmegaConf.set_readonly(cfg.hydra, True) assert OmegaConf.get_type(cfg, "hydra") == HydraConf - self.cfg = cfg # type: ignore + self.cfg = OmegaConf.masked_copy(cfg, "hydra") # type: ignore @staticmethod def get() -> HydraConf: diff --git a/hydra/core/utils.py b/hydra/core/utils.py --- a/hydra/core/utils.py +++ b/hydra/core/utils.py @@ -10,7 +10,7 @@ from os.path import basename, dirname, splitext from pathlib import Path from time import localtime, strftime -from typing import Any, Dict, Optional, Sequence, Tuple, Union +from typing import Any, Dict, Optional, Sequence, Tuple, Union, cast from omegaconf import DictConfig, OmegaConf, open_dict @@ -126,13 +126,18 @@ def setup_globals() -> None: - try: - OmegaConf.register_resolver( - "now", lambda pattern: strftime(pattern, localtime()) - ) - except AssertionError: - # calling it again in no_workers mode will throw. safe to ignore. - pass + def register(name: str, f: Any) -> None: + try: + OmegaConf.register_resolver(name, f) + except AssertionError: + # calling it again in no_workers mode will throw. safe to ignore. + pass + + register("now", lambda pattern: strftime(pattern, localtime())) + register( + "hydra", + lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path), + ) @dataclass
{"golden_diff": "diff --git a/hydra/core/hydra_config.py b/hydra/core/hydra_config.py\n--- a/hydra/core/hydra_config.py\n+++ b/hydra/core/hydra_config.py\n@@ -15,7 +15,7 @@\n assert cfg is not None\n OmegaConf.set_readonly(cfg.hydra, True)\n assert OmegaConf.get_type(cfg, \"hydra\") == HydraConf\n- self.cfg = cfg # type: ignore\n+ self.cfg = OmegaConf.masked_copy(cfg, \"hydra\") # type: ignore\n \n @staticmethod\n def get() -> HydraConf:\ndiff --git a/hydra/core/utils.py b/hydra/core/utils.py\n--- a/hydra/core/utils.py\n+++ b/hydra/core/utils.py\n@@ -10,7 +10,7 @@\n from os.path import basename, dirname, splitext\n from pathlib import Path\n from time import localtime, strftime\n-from typing import Any, Dict, Optional, Sequence, Tuple, Union\n+from typing import Any, Dict, Optional, Sequence, Tuple, Union, cast\n \n from omegaconf import DictConfig, OmegaConf, open_dict\n \n@@ -126,13 +126,18 @@\n \n \n def setup_globals() -> None:\n- try:\n- OmegaConf.register_resolver(\n- \"now\", lambda pattern: strftime(pattern, localtime())\n- )\n- except AssertionError:\n- # calling it again in no_workers mode will throw. safe to ignore.\n- pass\n+ def register(name: str, f: Any) -> None:\n+ try:\n+ OmegaConf.register_resolver(name, f)\n+ except AssertionError:\n+ # calling it again in no_workers mode will throw. safe to ignore.\n+ pass\n+\n+ register(\"now\", lambda pattern: strftime(pattern, localtime()))\n+ register(\n+ \"hydra\",\n+ lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path),\n+ )\n \n \n @dataclass\n", "issue": "Option to access hydra configs from interpolation\nThe hydra config node is removed from the tree before the program stats, which makes it impossible to rely on it at runtime.\r\nThe reason it's removed is that otherwise the user config would be very large even for a simple program.\r\nThat config however is still available at runtime through the HydraConfig singleston.\r\n\r\nThe idea here is to simply register a resolver function by the name of hydra, which would provide access to hydra config.\r\n\r\nUser usage would look like:\r\n```yaml\r\ngpu_id: ${hydra:job.num}\r\n```\r\n\r\nThis can be useful for accessing everything inside the hydra config, including parameters only available at runtime.\r\n\r\nIn addition, provide an option to call to_absolute_dir from interpolations.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport copy\nimport logging\nimport os\nimport re\nimport sys\nimport warnings\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom os.path import basename, dirname, splitext\nfrom pathlib import Path\nfrom time import localtime, strftime\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Union\n\nfrom omegaconf import DictConfig, OmegaConf, open_dict\n\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.core.singleton import Singleton\nfrom hydra.types import TaskFunction\n\nlog = logging.getLogger(__name__)\n\n\ndef configure_log(\n log_config: DictConfig, verbose_config: Union[bool, str, Sequence[str]]\n) -> None:\n assert isinstance(verbose_config, (bool, str)) or OmegaConf.is_list(verbose_config)\n if log_config is not None:\n conf: Dict[str, Any] = OmegaConf.to_container( # type: ignore\n log_config, resolve=True\n )\n logging.config.dictConfig(conf)\n else:\n # default logging to stdout\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(name)s][%(levelname)s] - %(message)s\"\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n if isinstance(verbose_config, bool):\n if verbose_config:\n logging.getLogger().setLevel(logging.DEBUG)\n else:\n if isinstance(verbose_config, str):\n verbose_list = OmegaConf.create([verbose_config])\n elif OmegaConf.is_list(verbose_config):\n verbose_list = verbose_config # type: ignore\n else:\n assert False\n\n for logger in verbose_list:\n logging.getLogger(logger).setLevel(logging.DEBUG)\n\n\ndef _save_config(cfg: DictConfig, filename: str, output_dir: Path) -> None:\n output_dir.mkdir(parents=True, exist_ok=True)\n with open(str(output_dir / filename), \"w\") as file:\n file.write(cfg.pretty())\n\n\ndef filter_overrides(overrides: Sequence[str]) -> Sequence[str]:\n \"\"\"\n :param overrides: overrides list\n :return: returning a new overrides list with all the keys starting with hydra. filtered.\n \"\"\"\n return [x for x in overrides if not x.startswith(\"hydra.\")]\n\n\ndef run_job(\n config: DictConfig,\n task_function: TaskFunction,\n job_dir_key: str,\n job_subdir_key: Optional[str],\n) -> \"JobReturn\":\n old_cwd = os.getcwd()\n working_dir = str(OmegaConf.select(config, job_dir_key))\n if job_subdir_key is not None:\n # evaluate job_subdir_key lazily.\n # this is running on the client side in sweep and contains things such as job:id which\n # are only available there.\n subdir = str(OmegaConf.select(config, job_subdir_key))\n working_dir = os.path.join(working_dir, subdir)\n try:\n ret = JobReturn()\n ret.working_dir = working_dir\n task_cfg = copy.deepcopy(config)\n with open_dict(task_cfg):\n del task_cfg[\"hydra\"]\n ret.cfg = task_cfg\n ret.hydra_cfg = OmegaConf.create({\"hydra\": HydraConfig.get()})\n overrides = OmegaConf.to_container(config.hydra.overrides.task)\n assert isinstance(overrides, list)\n ret.overrides = overrides\n # handle output directories here\n Path(str(working_dir)).mkdir(parents=True, exist_ok=True)\n os.chdir(working_dir)\n hydra_output = Path(config.hydra.output_subdir)\n\n configure_log(config.hydra.job_logging, config.hydra.verbose)\n\n hydra_cfg = OmegaConf.masked_copy(config, \"hydra\")\n assert isinstance(hydra_cfg, DictConfig)\n\n _save_config(task_cfg, \"config.yaml\", hydra_output)\n _save_config(hydra_cfg, \"hydra.yaml\", hydra_output)\n _save_config(config.hydra.overrides.task, \"overrides.yaml\", hydra_output)\n with env_override(hydra_cfg.hydra.job.env_set):\n ret.return_value = task_function(task_cfg)\n ret.task_name = JobRuntime.instance().get(\"name\")\n\n # shut down logging to ensure job log files are closed.\n # If logging is still required after run_job caller is responsible to re-initialize it.\n logging.shutdown()\n\n return ret\n finally:\n os.chdir(old_cwd)\n\n\ndef get_valid_filename(s: str) -> str:\n s = str(s).strip().replace(\" \", \"_\")\n return re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n\n\ndef setup_globals() -> None:\n try:\n OmegaConf.register_resolver(\n \"now\", lambda pattern: strftime(pattern, localtime())\n )\n except AssertionError:\n # calling it again in no_workers mode will throw. safe to ignore.\n pass\n\n\n@dataclass\nclass JobReturn:\n overrides: Optional[Sequence[str]] = None\n return_value: Any = None\n cfg: Optional[DictConfig] = None\n hydra_cfg: Optional[DictConfig] = None\n working_dir: Optional[str] = None\n task_name: Optional[str] = None\n\n\nclass JobRuntime(metaclass=Singleton):\n def __init__(self) -> None:\n self.conf: DictConfig = OmegaConf.create()\n self.set(\"name\", \"UNKNOWN_NAME\")\n\n def get(self, key: str) -> Any:\n ret = OmegaConf.select(self.conf, key)\n if ret is None:\n raise KeyError(f\"Key not found in {type(self).__name__}: {key}\")\n return ret\n\n def set(self, key: str, value: Any) -> None:\n log.debug(f\"Setting {type(self).__name__}:{key}={value}\")\n self.conf[key] = value\n\n\ndef split_config_path(\n config_path: Optional[str], config_name: Optional[str]\n) -> Tuple[Optional[str], Optional[str]]:\n if config_path is None or config_path == \"\":\n return None, config_name\n split_file = splitext(config_path)\n if split_file[1] in (\".yaml\", \".yml\"):\n # assuming dir/config.yaml form\n config_file: Optional[str] = basename(config_path)\n config_dir: Optional[str] = dirname(config_path)\n msg = (\n \"\\nUsing config_path to specify the config name is deprecated, specify the config name via config_name\"\n \"\\nSee https://hydra.cc/next/upgrades/0.11_to_1.0/config_path_changes\"\n )\n warnings.warn(\n category=UserWarning, message=msg,\n )\n else:\n # assuming dir form without a config file.\n config_file = None\n config_dir = config_path\n\n if config_dir == \"\":\n config_dir = None\n\n if config_file == \"\":\n config_file = None\n\n if config_file is not None:\n if config_name is not None:\n raise ValueError(\n \"Config name should be specified in either normalized_config_path or config_name, but not both\"\n )\n config_name = config_file\n\n return config_dir, config_name\n\n\n@contextmanager\ndef env_override(env: Dict[str, str]) -> Any:\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n original_env = {key: os.getenv(key) for key in env}\n os.environ.update(env)\n try:\n yield\n finally:\n for key, value in original_env.items():\n if value is None:\n del os.environ[key]\n else:\n os.environ[key] = value\n", "path": "hydra/core/utils.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Any, Optional\n\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom hydra.conf import HydraConf\nfrom hydra.core.singleton import Singleton\n\n\nclass HydraConfig(metaclass=Singleton):\n def __init__(self) -> None:\n self.cfg: Optional[HydraConf] = None\n\n def set_config(self, cfg: DictConfig) -> None:\n assert cfg is not None\n OmegaConf.set_readonly(cfg.hydra, True)\n assert OmegaConf.get_type(cfg, \"hydra\") == HydraConf\n self.cfg = cfg # type: ignore\n\n @staticmethod\n def get() -> HydraConf:\n instance = HydraConfig.instance()\n if instance.cfg is None:\n raise ValueError(\"HydraConfig was not set\")\n return instance.cfg.hydra # type: ignore\n\n @staticmethod\n def initialized() -> bool:\n instance = HydraConfig.instance()\n return instance.cfg is not None\n\n @staticmethod\n def instance(*args: Any, **kwargs: Any) -> \"HydraConfig\":\n return Singleton.instance(HydraConfig, *args, **kwargs) # type: ignore\n", "path": "hydra/core/hydra_config.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport copy\nimport logging\nimport os\nimport re\nimport sys\nimport warnings\nfrom contextlib import contextmanager\nfrom dataclasses import dataclass\nfrom os.path import basename, dirname, splitext\nfrom pathlib import Path\nfrom time import localtime, strftime\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Union, cast\n\nfrom omegaconf import DictConfig, OmegaConf, open_dict\n\nfrom hydra.core.hydra_config import HydraConfig\nfrom hydra.core.singleton import Singleton\nfrom hydra.types import TaskFunction\n\nlog = logging.getLogger(__name__)\n\n\ndef configure_log(\n log_config: DictConfig, verbose_config: Union[bool, str, Sequence[str]]\n) -> None:\n assert isinstance(verbose_config, (bool, str)) or OmegaConf.is_list(verbose_config)\n if log_config is not None:\n conf: Dict[str, Any] = OmegaConf.to_container( # type: ignore\n log_config, resolve=True\n )\n logging.config.dictConfig(conf)\n else:\n # default logging to stdout\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(asctime)s][%(name)s][%(levelname)s] - %(message)s\"\n )\n handler.setFormatter(formatter)\n root.addHandler(handler)\n if isinstance(verbose_config, bool):\n if verbose_config:\n logging.getLogger().setLevel(logging.DEBUG)\n else:\n if isinstance(verbose_config, str):\n verbose_list = OmegaConf.create([verbose_config])\n elif OmegaConf.is_list(verbose_config):\n verbose_list = verbose_config # type: ignore\n else:\n assert False\n\n for logger in verbose_list:\n logging.getLogger(logger).setLevel(logging.DEBUG)\n\n\ndef _save_config(cfg: DictConfig, filename: str, output_dir: Path) -> None:\n output_dir.mkdir(parents=True, exist_ok=True)\n with open(str(output_dir / filename), \"w\") as file:\n file.write(cfg.pretty())\n\n\ndef filter_overrides(overrides: Sequence[str]) -> Sequence[str]:\n \"\"\"\n :param overrides: overrides list\n :return: returning a new overrides list with all the keys starting with hydra. filtered.\n \"\"\"\n return [x for x in overrides if not x.startswith(\"hydra.\")]\n\n\ndef run_job(\n config: DictConfig,\n task_function: TaskFunction,\n job_dir_key: str,\n job_subdir_key: Optional[str],\n) -> \"JobReturn\":\n old_cwd = os.getcwd()\n working_dir = str(OmegaConf.select(config, job_dir_key))\n if job_subdir_key is not None:\n # evaluate job_subdir_key lazily.\n # this is running on the client side in sweep and contains things such as job:id which\n # are only available there.\n subdir = str(OmegaConf.select(config, job_subdir_key))\n working_dir = os.path.join(working_dir, subdir)\n try:\n ret = JobReturn()\n ret.working_dir = working_dir\n task_cfg = copy.deepcopy(config)\n with open_dict(task_cfg):\n del task_cfg[\"hydra\"]\n ret.cfg = task_cfg\n ret.hydra_cfg = OmegaConf.create({\"hydra\": HydraConfig.get()})\n overrides = OmegaConf.to_container(config.hydra.overrides.task)\n assert isinstance(overrides, list)\n ret.overrides = overrides\n # handle output directories here\n Path(str(working_dir)).mkdir(parents=True, exist_ok=True)\n os.chdir(working_dir)\n hydra_output = Path(config.hydra.output_subdir)\n\n configure_log(config.hydra.job_logging, config.hydra.verbose)\n\n hydra_cfg = OmegaConf.masked_copy(config, \"hydra\")\n assert isinstance(hydra_cfg, DictConfig)\n\n _save_config(task_cfg, \"config.yaml\", hydra_output)\n _save_config(hydra_cfg, \"hydra.yaml\", hydra_output)\n _save_config(config.hydra.overrides.task, \"overrides.yaml\", hydra_output)\n with env_override(hydra_cfg.hydra.job.env_set):\n ret.return_value = task_function(task_cfg)\n ret.task_name = JobRuntime.instance().get(\"name\")\n\n # shut down logging to ensure job log files are closed.\n # If logging is still required after run_job caller is responsible to re-initialize it.\n logging.shutdown()\n\n return ret\n finally:\n os.chdir(old_cwd)\n\n\ndef get_valid_filename(s: str) -> str:\n s = str(s).strip().replace(\" \", \"_\")\n return re.sub(r\"(?u)[^-\\w.]\", \"\", s)\n\n\ndef setup_globals() -> None:\n def register(name: str, f: Any) -> None:\n try:\n OmegaConf.register_resolver(name, f)\n except AssertionError:\n # calling it again in no_workers mode will throw. safe to ignore.\n pass\n\n register(\"now\", lambda pattern: strftime(pattern, localtime()))\n register(\n \"hydra\",\n lambda path: OmegaConf.select(cast(DictConfig, HydraConfig.get()), path),\n )\n\n\n@dataclass\nclass JobReturn:\n overrides: Optional[Sequence[str]] = None\n return_value: Any = None\n cfg: Optional[DictConfig] = None\n hydra_cfg: Optional[DictConfig] = None\n working_dir: Optional[str] = None\n task_name: Optional[str] = None\n\n\nclass JobRuntime(metaclass=Singleton):\n def __init__(self) -> None:\n self.conf: DictConfig = OmegaConf.create()\n self.set(\"name\", \"UNKNOWN_NAME\")\n\n def get(self, key: str) -> Any:\n ret = OmegaConf.select(self.conf, key)\n if ret is None:\n raise KeyError(f\"Key not found in {type(self).__name__}: {key}\")\n return ret\n\n def set(self, key: str, value: Any) -> None:\n log.debug(f\"Setting {type(self).__name__}:{key}={value}\")\n self.conf[key] = value\n\n\ndef split_config_path(\n config_path: Optional[str], config_name: Optional[str]\n) -> Tuple[Optional[str], Optional[str]]:\n if config_path is None or config_path == \"\":\n return None, config_name\n split_file = splitext(config_path)\n if split_file[1] in (\".yaml\", \".yml\"):\n # assuming dir/config.yaml form\n config_file: Optional[str] = basename(config_path)\n config_dir: Optional[str] = dirname(config_path)\n msg = (\n \"\\nUsing config_path to specify the config name is deprecated, specify the config name via config_name\"\n \"\\nSee https://hydra.cc/next/upgrades/0.11_to_1.0/config_path_changes\"\n )\n warnings.warn(\n category=UserWarning, message=msg,\n )\n else:\n # assuming dir form without a config file.\n config_file = None\n config_dir = config_path\n\n if config_dir == \"\":\n config_dir = None\n\n if config_file == \"\":\n config_file = None\n\n if config_file is not None:\n if config_name is not None:\n raise ValueError(\n \"Config name should be specified in either normalized_config_path or config_name, but not both\"\n )\n config_name = config_file\n\n return config_dir, config_name\n\n\n@contextmanager\ndef env_override(env: Dict[str, str]) -> Any:\n \"\"\"Temporarily set environment variables inside the context manager and\n fully restore previous environment afterwards\n \"\"\"\n original_env = {key: os.getenv(key) for key in env}\n os.environ.update(env)\n try:\n yield\n finally:\n for key, value in original_env.items():\n if value is None:\n del os.environ[key]\n else:\n os.environ[key] = value\n", "path": "hydra/core/utils.py"}, {"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Any, Optional\n\nfrom omegaconf import DictConfig, OmegaConf\n\nfrom hydra.conf import HydraConf\nfrom hydra.core.singleton import Singleton\n\n\nclass HydraConfig(metaclass=Singleton):\n def __init__(self) -> None:\n self.cfg: Optional[HydraConf] = None\n\n def set_config(self, cfg: DictConfig) -> None:\n assert cfg is not None\n OmegaConf.set_readonly(cfg.hydra, True)\n assert OmegaConf.get_type(cfg, \"hydra\") == HydraConf\n self.cfg = OmegaConf.masked_copy(cfg, \"hydra\") # type: ignore\n\n @staticmethod\n def get() -> HydraConf:\n instance = HydraConfig.instance()\n if instance.cfg is None:\n raise ValueError(\"HydraConfig was not set\")\n return instance.cfg.hydra # type: ignore\n\n @staticmethod\n def initialized() -> bool:\n instance = HydraConfig.instance()\n return instance.cfg is not None\n\n @staticmethod\n def instance(*args: Any, **kwargs: Any) -> \"HydraConfig\":\n return Singleton.instance(HydraConfig, *args, **kwargs) # type: ignore\n", "path": "hydra/core/hydra_config.py"}]}
2,972
443
gh_patches_debug_11800
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-2017
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Backwards incompatible change to MSE for pixelwise regression ## 🐛 Bug #1937 introduces an unintended consequence: pixelwise regression is no longer supported. ### To Reproduce Run the following script: ```python import torch import torchmetrics B = 4 H = W = 3 x = torch.rand(B, H, W) y = torch.rand(B, H, W) torchmetrics.functional.mean_squared_error(x, y) ``` This results in the following error msg: ``` Traceback (most recent call last): File "test.py", line 10, in <module> torchmetrics.functional.mean_squared_error(x, y, num_outputs=H * W) File "lib/python3.10/site-packages/torchmetrics/functional/regression/mse.py", line 84, in mean_squared_error sum_squared_error, n_obs = _mean_squared_error_update(preds, target, num_outputs=num_outputs) File "lib/python3.10/site-packages/torchmetrics/functional/regression/mse.py", line 35, in _mean_squared_error_update _check_data_shape_to_num_outputs(preds, target, num_outputs, allow_1d_reshape=True) File "lib/python3.10/site-packages/torchmetrics/functional/regression/utils.py", line 31, in _check_data_shape_to_num_outputs raise ValueError( ValueError: Expected both predictions and target to be either 1- or 2-dimensional tensors, but got 3 and 3. ``` ### Expected behavior I would expect the MSE metrics to support pixelwise regression (predicting a single regression value for each pixel in an image). The above script works fine with torchmetrics 1.0.3. ### Environment - TorchMetrics version: 1.1.0, spack - Python & PyTorch Version: 3.10.10, 2.1.0 - Any other relevant information such as OS: macOS ### Additional context @SkafteNicki @Borda @justusschock --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/functional/regression/mse.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Tuple, Union 15 16 import torch 17 from torch import Tensor 18 19 from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs 20 from torchmetrics.utilities.checks import _check_same_shape 21 22 23 def _mean_squared_error_update(preds: Tensor, target: Tensor, num_outputs: int) -> Tuple[Tensor, int]: 24 """Update and returns variables required to compute Mean Squared Error. 25 26 Check for same shape of input tensors. 27 28 Args: 29 preds: Predicted tensor 30 target: Ground truth tensor 31 num_outputs: Number of outputs in multioutput setting 32 33 """ 34 _check_same_shape(preds, target) 35 _check_data_shape_to_num_outputs(preds, target, num_outputs, allow_1d_reshape=True) 36 if num_outputs == 1: 37 preds = preds.view(-1) 38 target = target.view(-1) 39 diff = preds - target 40 sum_squared_error = torch.sum(diff * diff, dim=0) 41 n_obs = target.shape[0] 42 return sum_squared_error, n_obs 43 44 45 def _mean_squared_error_compute(sum_squared_error: Tensor, n_obs: Union[int, Tensor], squared: bool = True) -> Tensor: 46 """Compute Mean Squared Error. 47 48 Args: 49 sum_squared_error: Sum of square of errors over all observations 50 n_obs: Number of predictions or observations 51 squared: Returns RMSE value if set to False. 52 53 Example: 54 >>> preds = torch.tensor([0., 1, 2, 3]) 55 >>> target = torch.tensor([0., 1, 2, 2]) 56 >>> sum_squared_error, n_obs = _mean_squared_error_update(preds, target, num_outputs=1) 57 >>> _mean_squared_error_compute(sum_squared_error, n_obs) 58 tensor(0.2500) 59 60 """ 61 return sum_squared_error / n_obs if squared else torch.sqrt(sum_squared_error / n_obs) 62 63 64 def mean_squared_error(preds: Tensor, target: Tensor, squared: bool = True, num_outputs: int = 1) -> Tensor: 65 """Compute mean squared error. 66 67 Args: 68 preds: estimated labels 69 target: ground truth labels 70 squared: returns RMSE value if set to False 71 num_outputs: Number of outputs in multioutput setting 72 73 Return: 74 Tensor with MSE 75 76 Example: 77 >>> from torchmetrics.functional.regression import mean_squared_error 78 >>> x = torch.tensor([0., 1, 2, 3]) 79 >>> y = torch.tensor([0., 1, 2, 2]) 80 >>> mean_squared_error(x, y) 81 tensor(0.2500) 82 83 """ 84 sum_squared_error, n_obs = _mean_squared_error_update(preds, target, num_outputs=num_outputs) 85 return _mean_squared_error_compute(sum_squared_error, n_obs, squared=squared) 86 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/functional/regression/mse.py b/src/torchmetrics/functional/regression/mse.py --- a/src/torchmetrics/functional/regression/mse.py +++ b/src/torchmetrics/functional/regression/mse.py @@ -16,7 +16,6 @@ import torch from torch import Tensor -from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs from torchmetrics.utilities.checks import _check_same_shape @@ -32,7 +31,6 @@ """ _check_same_shape(preds, target) - _check_data_shape_to_num_outputs(preds, target, num_outputs, allow_1d_reshape=True) if num_outputs == 1: preds = preds.view(-1) target = target.view(-1)
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/mse.py b/src/torchmetrics/functional/regression/mse.py\n--- a/src/torchmetrics/functional/regression/mse.py\n+++ b/src/torchmetrics/functional/regression/mse.py\n@@ -16,7 +16,6 @@\n import torch\n from torch import Tensor\n \n-from torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\n from torchmetrics.utilities.checks import _check_same_shape\n \n \n@@ -32,7 +31,6 @@\n \n \"\"\"\n _check_same_shape(preds, target)\n- _check_data_shape_to_num_outputs(preds, target, num_outputs, allow_1d_reshape=True)\n if num_outputs == 1:\n preds = preds.view(-1)\n target = target.view(-1)\n", "issue": "Backwards incompatible change to MSE for pixelwise regression\n## \ud83d\udc1b Bug\r\n\r\n#1937 introduces an unintended consequence: pixelwise regression is no longer supported.\r\n\r\n### To Reproduce\r\n\r\nRun the following script:\r\n```python\r\nimport torch\r\nimport torchmetrics\r\n\r\nB = 4\r\nH = W = 3\r\n\r\nx = torch.rand(B, H, W)\r\ny = torch.rand(B, H, W)\r\n\r\ntorchmetrics.functional.mean_squared_error(x, y)\r\n```\r\nThis results in the following error msg:\r\n```\r\nTraceback (most recent call last):\r\n File \"test.py\", line 10, in <module>\r\n torchmetrics.functional.mean_squared_error(x, y, num_outputs=H * W)\r\n File \"lib/python3.10/site-packages/torchmetrics/functional/regression/mse.py\", line 84, in mean_squared_error\r\n sum_squared_error, n_obs = _mean_squared_error_update(preds, target, num_outputs=num_outputs)\r\n File \"lib/python3.10/site-packages/torchmetrics/functional/regression/mse.py\", line 35, in _mean_squared_error_update\r\n _check_data_shape_to_num_outputs(preds, target, num_outputs, allow_1d_reshape=True)\r\n File \"lib/python3.10/site-packages/torchmetrics/functional/regression/utils.py\", line 31, in _check_data_shape_to_num_outputs\r\n raise ValueError(\r\nValueError: Expected both predictions and target to be either 1- or 2-dimensional tensors, but got 3 and 3.\r\n```\r\n\r\n### Expected behavior\r\n\r\nI would expect the MSE metrics to support pixelwise regression (predicting a single regression value for each pixel in an image). The above script works fine with torchmetrics 1.0.3.\r\n\r\n### Environment\r\n\r\n- TorchMetrics version: 1.1.0, spack\r\n- Python & PyTorch Version: 3.10.10, 2.1.0\r\n- Any other relevant information such as OS: macOS\r\n\r\n### Additional context\r\n\r\n@SkafteNicki @Borda @justusschock \n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.functional.regression.utils import _check_data_shape_to_num_outputs\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _mean_squared_error_update(preds: Tensor, target: Tensor, num_outputs: int) -> Tuple[Tensor, int]:\n \"\"\"Update and returns variables required to compute Mean Squared Error.\n\n Check for same shape of input tensors.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n num_outputs: Number of outputs in multioutput setting\n\n \"\"\"\n _check_same_shape(preds, target)\n _check_data_shape_to_num_outputs(preds, target, num_outputs, allow_1d_reshape=True)\n if num_outputs == 1:\n preds = preds.view(-1)\n target = target.view(-1)\n diff = preds - target\n sum_squared_error = torch.sum(diff * diff, dim=0)\n n_obs = target.shape[0]\n return sum_squared_error, n_obs\n\n\ndef _mean_squared_error_compute(sum_squared_error: Tensor, n_obs: Union[int, Tensor], squared: bool = True) -> Tensor:\n \"\"\"Compute Mean Squared Error.\n\n Args:\n sum_squared_error: Sum of square of errors over all observations\n n_obs: Number of predictions or observations\n squared: Returns RMSE value if set to False.\n\n Example:\n >>> preds = torch.tensor([0., 1, 2, 3])\n >>> target = torch.tensor([0., 1, 2, 2])\n >>> sum_squared_error, n_obs = _mean_squared_error_update(preds, target, num_outputs=1)\n >>> _mean_squared_error_compute(sum_squared_error, n_obs)\n tensor(0.2500)\n\n \"\"\"\n return sum_squared_error / n_obs if squared else torch.sqrt(sum_squared_error / n_obs)\n\n\ndef mean_squared_error(preds: Tensor, target: Tensor, squared: bool = True, num_outputs: int = 1) -> Tensor:\n \"\"\"Compute mean squared error.\n\n Args:\n preds: estimated labels\n target: ground truth labels\n squared: returns RMSE value if set to False\n num_outputs: Number of outputs in multioutput setting\n\n Return:\n Tensor with MSE\n\n Example:\n >>> from torchmetrics.functional.regression import mean_squared_error\n >>> x = torch.tensor([0., 1, 2, 3])\n >>> y = torch.tensor([0., 1, 2, 2])\n >>> mean_squared_error(x, y)\n tensor(0.2500)\n\n \"\"\"\n sum_squared_error, n_obs = _mean_squared_error_update(preds, target, num_outputs=num_outputs)\n return _mean_squared_error_compute(sum_squared_error, n_obs, squared=squared)\n", "path": "src/torchmetrics/functional/regression/mse.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Tuple, Union\n\nimport torch\nfrom torch import Tensor\n\nfrom torchmetrics.utilities.checks import _check_same_shape\n\n\ndef _mean_squared_error_update(preds: Tensor, target: Tensor, num_outputs: int) -> Tuple[Tensor, int]:\n \"\"\"Update and returns variables required to compute Mean Squared Error.\n\n Check for same shape of input tensors.\n\n Args:\n preds: Predicted tensor\n target: Ground truth tensor\n num_outputs: Number of outputs in multioutput setting\n\n \"\"\"\n _check_same_shape(preds, target)\n if num_outputs == 1:\n preds = preds.view(-1)\n target = target.view(-1)\n diff = preds - target\n sum_squared_error = torch.sum(diff * diff, dim=0)\n n_obs = target.shape[0]\n return sum_squared_error, n_obs\n\n\ndef _mean_squared_error_compute(sum_squared_error: Tensor, n_obs: Union[int, Tensor], squared: bool = True) -> Tensor:\n \"\"\"Compute Mean Squared Error.\n\n Args:\n sum_squared_error: Sum of square of errors over all observations\n n_obs: Number of predictions or observations\n squared: Returns RMSE value if set to False.\n\n Example:\n >>> preds = torch.tensor([0., 1, 2, 3])\n >>> target = torch.tensor([0., 1, 2, 2])\n >>> sum_squared_error, n_obs = _mean_squared_error_update(preds, target, num_outputs=1)\n >>> _mean_squared_error_compute(sum_squared_error, n_obs)\n tensor(0.2500)\n\n \"\"\"\n return sum_squared_error / n_obs if squared else torch.sqrt(sum_squared_error / n_obs)\n\n\ndef mean_squared_error(preds: Tensor, target: Tensor, squared: bool = True, num_outputs: int = 1) -> Tensor:\n \"\"\"Compute mean squared error.\n\n Args:\n preds: estimated labels\n target: ground truth labels\n squared: returns RMSE value if set to False\n num_outputs: Number of outputs in multioutput setting\n\n Return:\n Tensor with MSE\n\n Example:\n >>> from torchmetrics.functional.regression import mean_squared_error\n >>> x = torch.tensor([0., 1, 2, 3])\n >>> y = torch.tensor([0., 1, 2, 2])\n >>> mean_squared_error(x, y)\n tensor(0.2500)\n\n \"\"\"\n sum_squared_error, n_obs = _mean_squared_error_update(preds, target, num_outputs=num_outputs)\n return _mean_squared_error_compute(sum_squared_error, n_obs, squared=squared)\n", "path": "src/torchmetrics/functional/regression/mse.py"}]}
1,626
179
gh_patches_debug_41543
rasdani/github-patches
git_diff
quantumlib__Cirq-3366
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- float32 parameters leads to significant performance loss of parameter resolver **Description of the issue** Calling `cirq.resolve_parameters` to resolve the parameters of a symbolic circuit with `np.float32` parameters leads to a significant performance loss. I ran into this bug while working on loading circuit parameters from a `.npy` file on [this TensorQuantum Dataset PR](https://github.com/tensorflow/quantum/pull/304). Verifying that the circuits produced the expected wave functions took ~30 minutes on the CI, since we have to check hundreds of circuits. On further investigation, I discovered that the parameters were 32-bit, and casting them to 64-bit decreased the parameter resolve time by a large margin. **How to reproduce the issue** ```python import cirq import sympy import time import numpy as np from collections import namedtuple import matplotlib.pyplot as plt def unique_name(): """Generator to generate an infinite number of unique names. Yields: Python `str` of the form "theta_<integer>". """ num = 0 while True: yield "theta_" + str(num) num += 1 def time_resolve_param(dtype, nspins, max_depth): depth_grid = list(range(2, max_depth, 2)) times = [] for depth in depth_grid: qubits = cirq.GridQubit.rect(nspins, 1) # Symbolic parameters name_generator = unique_name() symbol_names = [next(name_generator) for _ in range(depth)] symbols = [sympy.Symbol(name) for name in symbol_names] # Random values with dtype params = np.random.randn(2 * depth).astype(dtype) params = dict(zip(symbol_names, params.flatten())) # Define the circuit. circuit = cirq.Circuit(cirq.H.on_each(qubits)) zipped_qubits = list(zip(qubits, qubits[1:])) for d in range(depth): for q1, q2 in zipped_qubits: circuit.append(cirq.ZZ(q1, q2)**(symbols[d])) # Resolve parameters and time resolve_start = time.time() param_resolver = cirq.resolve_parameters(circuit, params) t_param = time.time() - resolve_start times.append(t_param) return depth_grid, times if __name__ == "__main__": NSPINS = 10 MAXDEPTH = 50 grid, times_32 = time_resolve_param(np.float32, NSPINS, MAXDEPTH) _, times_64 = time_resolve_param(np.float64, NSPINS, MAXDEPTH) plt.plot(grid, times_32, label='np.float32') plt.plot(grid, times_64, label='np.float64') plt.xlabel('circuit depth') plt.ylabel('time (s)') plt.legend() plt.grid() plt.show() ``` This produces the following figure ![float32_vs_float64_paramresolver](https://user-images.githubusercontent.com/32705838/94204975-4edbe100-fe90-11ea-9d2a-630f10e31aba.png) As we can see, with increasing circuit depth the time it takes to resolve the 32-bit parameters creates a massive slowdown. **Cirq version** Cirq 0.8.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cirq/study/resolver.py` Content: ``` 1 # Copyright 2018 The Cirq Developers 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Resolves ParameterValues to assigned values.""" 16 17 from typing import Any, Dict, Iterator, Optional, TYPE_CHECKING, Union, cast 18 import numpy as np 19 import sympy 20 from cirq._compat import proper_repr 21 from cirq._doc import document 22 23 if TYPE_CHECKING: 24 import cirq 25 26 27 ParamDictType = Dict['cirq.TParamKey', 'cirq.TParamVal'] 28 document( 29 ParamDictType, # type: ignore 30 """Dictionary from symbols to values.""") 31 32 ParamResolverOrSimilarType = Union['cirq.ParamResolver', ParamDictType, None] 33 document( 34 ParamResolverOrSimilarType, # type: ignore 35 """Something that can be used to turn parameters into values.""") 36 37 38 class ParamResolver: 39 """Resolves parameters to actual values. 40 41 A parameter is a variable whose value has not been determined. 42 A ParamResolver is an object that can be used to assign values for these 43 variables. 44 45 ParamResolvers are hashable. 46 47 Attributes: 48 param_dict: A dictionary from the ParameterValue key (str) to its 49 assigned value. 50 """ 51 52 def __new__(cls, param_dict: 'cirq.ParamResolverOrSimilarType' = None): 53 if isinstance(param_dict, ParamResolver): 54 return param_dict 55 return super().__new__(cls) 56 57 def __init__(self, 58 param_dict: 'cirq.ParamResolverOrSimilarType' = None) -> None: 59 if hasattr(self, 'param_dict'): 60 return # Already initialized. Got wrapped as part of the __new__. 61 62 self._param_hash: Optional[int] = None 63 self.param_dict = cast(ParamDictType, 64 {} if param_dict is None else param_dict) 65 66 def value_of(self, 67 value: Union['cirq.TParamKey', float]) -> 'cirq.TParamVal': 68 """Attempt to resolve a parameter to its assigned value. 69 70 Floats are returned without modification. Strings are resolved via 71 the parameter dictionary with exact match only. Otherwise, strings 72 are considered to be sympy.Symbols with the name as the input string. 73 74 A sympy.Symbol is first checked for exact match in the parameter 75 dictionary. Otherwise, it is treated as a sympy.Basic. 76 77 A sympy.Basic is resolved using sympy substitution. 78 79 Note that passing a formula to this resolver can be slow due to the 80 underlying sympy library. For circuits relying on quick performance, 81 it is recommended that all formulas are flattened before-hand using 82 cirq.flatten or other means so that formula resolution is avoided. 83 If unable to resolve a sympy.Symbol, returns it unchanged. 84 If unable to resolve a name, returns a sympy.Symbol with that name. 85 86 Args: 87 value: The parameter to try to resolve. 88 89 Returns: 90 The value of the parameter as resolved by this resolver. 91 """ 92 # Input is a float, no resolution needed: return early 93 if isinstance(value, float): 94 return value 95 96 # Handles 2 cases: 97 # Input is a string and maps to a number in the dictionary 98 # Input is a symbol and maps to a number in the dictionary 99 # In both cases, return it directly. 100 if value in self.param_dict: 101 param_value = self.param_dict[value] 102 if isinstance(param_value, (float, int)): 103 return param_value 104 105 # Input is a string and is not in the dictionary. 106 # Treat it as a symbol instead. 107 if isinstance(value, str): 108 # If the string is in the param_dict as a value, return it. 109 # Otherwise, try using the symbol instead. 110 return self.value_of(sympy.Symbol(value)) 111 112 # Input is a symbol (sympy.Symbol('a')) and its string maps to a number 113 # in the dictionary ({'a': 1.0}). Return it. 114 if (isinstance(value, sympy.Symbol) and value.name in self.param_dict): 115 param_value = self.param_dict[value.name] 116 if isinstance(param_value, (float, int)): 117 return param_value 118 119 # The following resolves common sympy expressions 120 # If sympy did its job and wasn't slower than molasses, 121 # we wouldn't need the following block. 122 if isinstance(value, sympy.Add): 123 summation = self.value_of(value.args[0]) 124 for addend in value.args[1:]: 125 summation += self.value_of(addend) 126 return summation 127 if isinstance(value, sympy.Mul): 128 product = self.value_of(value.args[0]) 129 for factor in value.args[1:]: 130 product *= self.value_of(factor) 131 return product 132 if isinstance(value, sympy.Pow) and len(value.args) == 2: 133 return np.power(self.value_of(value.args[0]), 134 self.value_of(value.args[1])) 135 if value == sympy.pi: 136 return np.pi 137 if value == sympy.S.NegativeOne: 138 return -1 139 140 # Input is either a sympy formula or the dictionary maps to a 141 # formula. Use sympy to resolve the value. 142 # Note that sympy.subs() is slow, so we want to avoid this and 143 # only use it for cases that require complicated resolution. 144 if isinstance(value, sympy.Basic): 145 v = value.subs(self.param_dict) 146 if v.free_symbols: 147 return v 148 elif sympy.im(v): 149 return complex(v) 150 else: 151 return float(v) 152 153 # No known way to resolve this variable, return unchanged. 154 return value 155 156 def __iter__(self) -> Iterator[Union[str, sympy.Symbol]]: 157 return iter(self.param_dict) 158 159 def __bool__(self) -> bool: 160 return bool(self.param_dict) 161 162 def __getitem__(self, 163 key: Union[sympy.Basic, float, str]) -> 'cirq.TParamVal': 164 return self.value_of(key) 165 166 def __hash__(self) -> int: 167 if self._param_hash is None: 168 self._param_hash = hash(frozenset(self.param_dict.items())) 169 return self._param_hash 170 171 def __eq__(self, other): 172 if not isinstance(other, ParamResolver): 173 return NotImplemented 174 return self.param_dict == other.param_dict 175 176 def __ne__(self, other): 177 return not self == other 178 179 def __repr__(self) -> str: 180 param_dict_repr = ('{' + ', '.join([ 181 f'{proper_repr(k)}: {proper_repr(v)}' 182 for k, v in self.param_dict.items() 183 ]) + '}') 184 return f'cirq.ParamResolver({param_dict_repr})' 185 186 def _json_dict_(self) -> Dict[str, Any]: 187 return { 188 'cirq_type': self.__class__.__name__, 189 # JSON requires mappings to have keys of basic types. 190 'param_dict': list(self.param_dict.items()) 191 } 192 193 @classmethod 194 def _from_json_dict_(cls, param_dict, **kwargs): 195 return cls(dict(param_dict)) 196 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cirq/study/resolver.py b/cirq/study/resolver.py --- a/cirq/study/resolver.py +++ b/cirq/study/resolver.py @@ -13,7 +13,7 @@ # limitations under the License. """Resolves ParameterValues to assigned values.""" - +import numbers from typing import Any, Dict, Iterator, Optional, TYPE_CHECKING, Union, cast import numpy as np import sympy @@ -89,9 +89,11 @@ Returns: The value of the parameter as resolved by this resolver. """ - # Input is a float, no resolution needed: return early - if isinstance(value, float): - return value + + # Input is a pass through type, no resolution needed: return early + v = _sympy_pass_through(value) + if v is not None: + return v # Handles 2 cases: # Input is a string and maps to a number in the dictionary @@ -99,8 +101,9 @@ # In both cases, return it directly. if value in self.param_dict: param_value = self.param_dict[value] - if isinstance(param_value, (float, int)): - return param_value + v = _sympy_pass_through(param_value) + if v is not None: + return v # Input is a string and is not in the dictionary. # Treat it as a symbol instead. @@ -111,10 +114,11 @@ # Input is a symbol (sympy.Symbol('a')) and its string maps to a number # in the dictionary ({'a': 1.0}). Return it. - if (isinstance(value, sympy.Symbol) and value.name in self.param_dict): + if isinstance(value, sympy.Symbol) and value.name in self.param_dict: param_value = self.param_dict[value.name] - if isinstance(param_value, (float, int)): - return param_value + v = _sympy_pass_through(param_value) + if v is not None: + return v # The following resolves common sympy expressions # If sympy did its job and wasn't slower than molasses, @@ -132,10 +136,6 @@ if isinstance(value, sympy.Pow) and len(value.args) == 2: return np.power(self.value_of(value.args[0]), self.value_of(value.args[1])) - if value == sympy.pi: - return np.pi - if value == sympy.S.NegativeOne: - return -1 # Input is either a sympy formula or the dictionary maps to a # formula. Use sympy to resolve the value. @@ -193,3 +193,15 @@ @classmethod def _from_json_dict_(cls, param_dict, **kwargs): return cls(dict(param_dict)) + + +def _sympy_pass_through(val: Any) -> Optional[Any]: + if isinstance(val, numbers.Number) and not isinstance(val, sympy.Basic): + return val + if isinstance(val, sympy.core.numbers.IntegerConstant): + return val.p + if isinstance(val, sympy.core.numbers.RationalConstant): + return val.p / val.q + if val == sympy.pi: + return np.pi + return None
{"golden_diff": "diff --git a/cirq/study/resolver.py b/cirq/study/resolver.py\n--- a/cirq/study/resolver.py\n+++ b/cirq/study/resolver.py\n@@ -13,7 +13,7 @@\n # limitations under the License.\n \n \"\"\"Resolves ParameterValues to assigned values.\"\"\"\n-\n+import numbers\n from typing import Any, Dict, Iterator, Optional, TYPE_CHECKING, Union, cast\n import numpy as np\n import sympy\n@@ -89,9 +89,11 @@\n Returns:\n The value of the parameter as resolved by this resolver.\n \"\"\"\n- # Input is a float, no resolution needed: return early\n- if isinstance(value, float):\n- return value\n+\n+ # Input is a pass through type, no resolution needed: return early\n+ v = _sympy_pass_through(value)\n+ if v is not None:\n+ return v\n \n # Handles 2 cases:\n # Input is a string and maps to a number in the dictionary\n@@ -99,8 +101,9 @@\n # In both cases, return it directly.\n if value in self.param_dict:\n param_value = self.param_dict[value]\n- if isinstance(param_value, (float, int)):\n- return param_value\n+ v = _sympy_pass_through(param_value)\n+ if v is not None:\n+ return v\n \n # Input is a string and is not in the dictionary.\n # Treat it as a symbol instead.\n@@ -111,10 +114,11 @@\n \n # Input is a symbol (sympy.Symbol('a')) and its string maps to a number\n # in the dictionary ({'a': 1.0}). Return it.\n- if (isinstance(value, sympy.Symbol) and value.name in self.param_dict):\n+ if isinstance(value, sympy.Symbol) and value.name in self.param_dict:\n param_value = self.param_dict[value.name]\n- if isinstance(param_value, (float, int)):\n- return param_value\n+ v = _sympy_pass_through(param_value)\n+ if v is not None:\n+ return v\n \n # The following resolves common sympy expressions\n # If sympy did its job and wasn't slower than molasses,\n@@ -132,10 +136,6 @@\n if isinstance(value, sympy.Pow) and len(value.args) == 2:\n return np.power(self.value_of(value.args[0]),\n self.value_of(value.args[1]))\n- if value == sympy.pi:\n- return np.pi\n- if value == sympy.S.NegativeOne:\n- return -1\n \n # Input is either a sympy formula or the dictionary maps to a\n # formula. Use sympy to resolve the value.\n@@ -193,3 +193,15 @@\n @classmethod\n def _from_json_dict_(cls, param_dict, **kwargs):\n return cls(dict(param_dict))\n+\n+\n+def _sympy_pass_through(val: Any) -> Optional[Any]:\n+ if isinstance(val, numbers.Number) and not isinstance(val, sympy.Basic):\n+ return val\n+ if isinstance(val, sympy.core.numbers.IntegerConstant):\n+ return val.p\n+ if isinstance(val, sympy.core.numbers.RationalConstant):\n+ return val.p / val.q\n+ if val == sympy.pi:\n+ return np.pi\n+ return None\n", "issue": "float32 parameters leads to significant performance loss of parameter resolver\n**Description of the issue**\r\nCalling `cirq.resolve_parameters` to resolve the parameters of a symbolic circuit with `np.float32` parameters leads to a significant performance loss. I ran into this bug while working on loading circuit parameters from a `.npy` file on [this TensorQuantum Dataset PR](https://github.com/tensorflow/quantum/pull/304). Verifying that the circuits produced the expected wave functions took ~30 minutes on the CI, since we have to check hundreds of circuits. On further investigation, I discovered that the parameters were 32-bit, and casting them to 64-bit decreased the parameter resolve time by a large margin.\r\n\r\n**How to reproduce the issue**\r\n\r\n```python\r\n\r\nimport cirq\r\nimport sympy\r\nimport time\r\nimport numpy as np\r\nfrom collections import namedtuple\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\ndef unique_name():\r\n \"\"\"Generator to generate an infinite number of unique names.\r\n\r\n Yields:\r\n Python `str` of the form \"theta_<integer>\".\r\n\r\n \"\"\"\r\n num = 0\r\n while True:\r\n yield \"theta_\" + str(num)\r\n num += 1\r\n\r\n\r\ndef time_resolve_param(dtype, nspins, max_depth):\r\n depth_grid = list(range(2, max_depth, 2))\r\n times = []\r\n for depth in depth_grid:\r\n qubits = cirq.GridQubit.rect(nspins, 1)\r\n\r\n # Symbolic parameters\r\n name_generator = unique_name()\r\n symbol_names = [next(name_generator) for _ in range(depth)]\r\n symbols = [sympy.Symbol(name) for name in symbol_names]\r\n\r\n # Random values with dtype\r\n params = np.random.randn(2 * depth).astype(dtype)\r\n params = dict(zip(symbol_names, params.flatten()))\r\n\r\n # Define the circuit.\r\n circuit = cirq.Circuit(cirq.H.on_each(qubits))\r\n zipped_qubits = list(zip(qubits, qubits[1:]))\r\n for d in range(depth):\r\n for q1, q2 in zipped_qubits:\r\n circuit.append(cirq.ZZ(q1, q2)**(symbols[d]))\r\n\r\n # Resolve parameters and time\r\n resolve_start = time.time()\r\n param_resolver = cirq.resolve_parameters(circuit, params)\r\n t_param = time.time() - resolve_start\r\n times.append(t_param)\r\n\r\n return depth_grid, times\r\n\r\n\r\nif __name__ == \"__main__\":\r\n NSPINS = 10\r\n MAXDEPTH = 50\r\n grid, times_32 = time_resolve_param(np.float32, NSPINS, MAXDEPTH)\r\n _, times_64 = time_resolve_param(np.float64, NSPINS, MAXDEPTH)\r\n\r\n plt.plot(grid, times_32, label='np.float32')\r\n plt.plot(grid, times_64, label='np.float64')\r\n plt.xlabel('circuit depth')\r\n plt.ylabel('time (s)')\r\n plt.legend()\r\n plt.grid()\r\n plt.show()\r\n\r\n\r\n```\r\nThis produces the following figure\r\n\r\n![float32_vs_float64_paramresolver](https://user-images.githubusercontent.com/32705838/94204975-4edbe100-fe90-11ea-9d2a-630f10e31aba.png)\r\n\r\nAs we can see, with increasing circuit depth the time it takes to resolve the 32-bit parameters creates a massive slowdown.\r\n\r\n**Cirq version**\r\nCirq 0.8.2\r\n\n", "before_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Resolves ParameterValues to assigned values.\"\"\"\n\nfrom typing import Any, Dict, Iterator, Optional, TYPE_CHECKING, Union, cast\nimport numpy as np\nimport sympy\nfrom cirq._compat import proper_repr\nfrom cirq._doc import document\n\nif TYPE_CHECKING:\n import cirq\n\n\nParamDictType = Dict['cirq.TParamKey', 'cirq.TParamVal']\ndocument(\n ParamDictType, # type: ignore\n \"\"\"Dictionary from symbols to values.\"\"\")\n\nParamResolverOrSimilarType = Union['cirq.ParamResolver', ParamDictType, None]\ndocument(\n ParamResolverOrSimilarType, # type: ignore\n \"\"\"Something that can be used to turn parameters into values.\"\"\")\n\n\nclass ParamResolver:\n \"\"\"Resolves parameters to actual values.\n\n A parameter is a variable whose value has not been determined.\n A ParamResolver is an object that can be used to assign values for these\n variables.\n\n ParamResolvers are hashable.\n\n Attributes:\n param_dict: A dictionary from the ParameterValue key (str) to its\n assigned value.\n \"\"\"\n\n def __new__(cls, param_dict: 'cirq.ParamResolverOrSimilarType' = None):\n if isinstance(param_dict, ParamResolver):\n return param_dict\n return super().__new__(cls)\n\n def __init__(self,\n param_dict: 'cirq.ParamResolverOrSimilarType' = None) -> None:\n if hasattr(self, 'param_dict'):\n return # Already initialized. Got wrapped as part of the __new__.\n\n self._param_hash: Optional[int] = None\n self.param_dict = cast(ParamDictType,\n {} if param_dict is None else param_dict)\n\n def value_of(self,\n value: Union['cirq.TParamKey', float]) -> 'cirq.TParamVal':\n \"\"\"Attempt to resolve a parameter to its assigned value.\n\n Floats are returned without modification. Strings are resolved via\n the parameter dictionary with exact match only. Otherwise, strings\n are considered to be sympy.Symbols with the name as the input string.\n\n A sympy.Symbol is first checked for exact match in the parameter\n dictionary. Otherwise, it is treated as a sympy.Basic.\n\n A sympy.Basic is resolved using sympy substitution.\n\n Note that passing a formula to this resolver can be slow due to the\n underlying sympy library. For circuits relying on quick performance,\n it is recommended that all formulas are flattened before-hand using\n cirq.flatten or other means so that formula resolution is avoided.\n If unable to resolve a sympy.Symbol, returns it unchanged.\n If unable to resolve a name, returns a sympy.Symbol with that name.\n\n Args:\n value: The parameter to try to resolve.\n\n Returns:\n The value of the parameter as resolved by this resolver.\n \"\"\"\n # Input is a float, no resolution needed: return early\n if isinstance(value, float):\n return value\n\n # Handles 2 cases:\n # Input is a string and maps to a number in the dictionary\n # Input is a symbol and maps to a number in the dictionary\n # In both cases, return it directly.\n if value in self.param_dict:\n param_value = self.param_dict[value]\n if isinstance(param_value, (float, int)):\n return param_value\n\n # Input is a string and is not in the dictionary.\n # Treat it as a symbol instead.\n if isinstance(value, str):\n # If the string is in the param_dict as a value, return it.\n # Otherwise, try using the symbol instead.\n return self.value_of(sympy.Symbol(value))\n\n # Input is a symbol (sympy.Symbol('a')) and its string maps to a number\n # in the dictionary ({'a': 1.0}). Return it.\n if (isinstance(value, sympy.Symbol) and value.name in self.param_dict):\n param_value = self.param_dict[value.name]\n if isinstance(param_value, (float, int)):\n return param_value\n\n # The following resolves common sympy expressions\n # If sympy did its job and wasn't slower than molasses,\n # we wouldn't need the following block.\n if isinstance(value, sympy.Add):\n summation = self.value_of(value.args[0])\n for addend in value.args[1:]:\n summation += self.value_of(addend)\n return summation\n if isinstance(value, sympy.Mul):\n product = self.value_of(value.args[0])\n for factor in value.args[1:]:\n product *= self.value_of(factor)\n return product\n if isinstance(value, sympy.Pow) and len(value.args) == 2:\n return np.power(self.value_of(value.args[0]),\n self.value_of(value.args[1]))\n if value == sympy.pi:\n return np.pi\n if value == sympy.S.NegativeOne:\n return -1\n\n # Input is either a sympy formula or the dictionary maps to a\n # formula. Use sympy to resolve the value.\n # Note that sympy.subs() is slow, so we want to avoid this and\n # only use it for cases that require complicated resolution.\n if isinstance(value, sympy.Basic):\n v = value.subs(self.param_dict)\n if v.free_symbols:\n return v\n elif sympy.im(v):\n return complex(v)\n else:\n return float(v)\n\n # No known way to resolve this variable, return unchanged.\n return value\n\n def __iter__(self) -> Iterator[Union[str, sympy.Symbol]]:\n return iter(self.param_dict)\n\n def __bool__(self) -> bool:\n return bool(self.param_dict)\n\n def __getitem__(self,\n key: Union[sympy.Basic, float, str]) -> 'cirq.TParamVal':\n return self.value_of(key)\n\n def __hash__(self) -> int:\n if self._param_hash is None:\n self._param_hash = hash(frozenset(self.param_dict.items()))\n return self._param_hash\n\n def __eq__(self, other):\n if not isinstance(other, ParamResolver):\n return NotImplemented\n return self.param_dict == other.param_dict\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self) -> str:\n param_dict_repr = ('{' + ', '.join([\n f'{proper_repr(k)}: {proper_repr(v)}'\n for k, v in self.param_dict.items()\n ]) + '}')\n return f'cirq.ParamResolver({param_dict_repr})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'cirq_type': self.__class__.__name__,\n # JSON requires mappings to have keys of basic types.\n 'param_dict': list(self.param_dict.items())\n }\n\n @classmethod\n def _from_json_dict_(cls, param_dict, **kwargs):\n return cls(dict(param_dict))\n", "path": "cirq/study/resolver.py"}], "after_files": [{"content": "# Copyright 2018 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Resolves ParameterValues to assigned values.\"\"\"\nimport numbers\nfrom typing import Any, Dict, Iterator, Optional, TYPE_CHECKING, Union, cast\nimport numpy as np\nimport sympy\nfrom cirq._compat import proper_repr\nfrom cirq._doc import document\n\nif TYPE_CHECKING:\n import cirq\n\n\nParamDictType = Dict['cirq.TParamKey', 'cirq.TParamVal']\ndocument(\n ParamDictType, # type: ignore\n \"\"\"Dictionary from symbols to values.\"\"\")\n\nParamResolverOrSimilarType = Union['cirq.ParamResolver', ParamDictType, None]\ndocument(\n ParamResolverOrSimilarType, # type: ignore\n \"\"\"Something that can be used to turn parameters into values.\"\"\")\n\n\nclass ParamResolver:\n \"\"\"Resolves parameters to actual values.\n\n A parameter is a variable whose value has not been determined.\n A ParamResolver is an object that can be used to assign values for these\n variables.\n\n ParamResolvers are hashable.\n\n Attributes:\n param_dict: A dictionary from the ParameterValue key (str) to its\n assigned value.\n \"\"\"\n\n def __new__(cls, param_dict: 'cirq.ParamResolverOrSimilarType' = None):\n if isinstance(param_dict, ParamResolver):\n return param_dict\n return super().__new__(cls)\n\n def __init__(self,\n param_dict: 'cirq.ParamResolverOrSimilarType' = None) -> None:\n if hasattr(self, 'param_dict'):\n return # Already initialized. Got wrapped as part of the __new__.\n\n self._param_hash: Optional[int] = None\n self.param_dict = cast(ParamDictType,\n {} if param_dict is None else param_dict)\n\n def value_of(self,\n value: Union['cirq.TParamKey', float]) -> 'cirq.TParamVal':\n \"\"\"Attempt to resolve a parameter to its assigned value.\n\n Floats are returned without modification. Strings are resolved via\n the parameter dictionary with exact match only. Otherwise, strings\n are considered to be sympy.Symbols with the name as the input string.\n\n A sympy.Symbol is first checked for exact match in the parameter\n dictionary. Otherwise, it is treated as a sympy.Basic.\n\n A sympy.Basic is resolved using sympy substitution.\n\n Note that passing a formula to this resolver can be slow due to the\n underlying sympy library. For circuits relying on quick performance,\n it is recommended that all formulas are flattened before-hand using\n cirq.flatten or other means so that formula resolution is avoided.\n If unable to resolve a sympy.Symbol, returns it unchanged.\n If unable to resolve a name, returns a sympy.Symbol with that name.\n\n Args:\n value: The parameter to try to resolve.\n\n Returns:\n The value of the parameter as resolved by this resolver.\n \"\"\"\n\n # Input is a pass through type, no resolution needed: return early\n v = _sympy_pass_through(value)\n if v is not None:\n return v\n\n # Handles 2 cases:\n # Input is a string and maps to a number in the dictionary\n # Input is a symbol and maps to a number in the dictionary\n # In both cases, return it directly.\n if value in self.param_dict:\n param_value = self.param_dict[value]\n v = _sympy_pass_through(param_value)\n if v is not None:\n return v\n\n # Input is a string and is not in the dictionary.\n # Treat it as a symbol instead.\n if isinstance(value, str):\n # If the string is in the param_dict as a value, return it.\n # Otherwise, try using the symbol instead.\n return self.value_of(sympy.Symbol(value))\n\n # Input is a symbol (sympy.Symbol('a')) and its string maps to a number\n # in the dictionary ({'a': 1.0}). Return it.\n if isinstance(value, sympy.Symbol) and value.name in self.param_dict:\n param_value = self.param_dict[value.name]\n v = _sympy_pass_through(param_value)\n if v is not None:\n return v\n\n # The following resolves common sympy expressions\n # If sympy did its job and wasn't slower than molasses,\n # we wouldn't need the following block.\n if isinstance(value, sympy.Add):\n summation = self.value_of(value.args[0])\n for addend in value.args[1:]:\n summation += self.value_of(addend)\n return summation\n if isinstance(value, sympy.Mul):\n product = self.value_of(value.args[0])\n for factor in value.args[1:]:\n product *= self.value_of(factor)\n return product\n if isinstance(value, sympy.Pow) and len(value.args) == 2:\n return np.power(self.value_of(value.args[0]),\n self.value_of(value.args[1]))\n\n # Input is either a sympy formula or the dictionary maps to a\n # formula. Use sympy to resolve the value.\n # Note that sympy.subs() is slow, so we want to avoid this and\n # only use it for cases that require complicated resolution.\n if isinstance(value, sympy.Basic):\n v = value.subs(self.param_dict)\n if v.free_symbols:\n return v\n elif sympy.im(v):\n return complex(v)\n else:\n return float(v)\n\n # No known way to resolve this variable, return unchanged.\n return value\n\n def __iter__(self) -> Iterator[Union[str, sympy.Symbol]]:\n return iter(self.param_dict)\n\n def __bool__(self) -> bool:\n return bool(self.param_dict)\n\n def __getitem__(self,\n key: Union[sympy.Basic, float, str]) -> 'cirq.TParamVal':\n return self.value_of(key)\n\n def __hash__(self) -> int:\n if self._param_hash is None:\n self._param_hash = hash(frozenset(self.param_dict.items()))\n return self._param_hash\n\n def __eq__(self, other):\n if not isinstance(other, ParamResolver):\n return NotImplemented\n return self.param_dict == other.param_dict\n\n def __ne__(self, other):\n return not self == other\n\n def __repr__(self) -> str:\n param_dict_repr = ('{' + ', '.join([\n f'{proper_repr(k)}: {proper_repr(v)}'\n for k, v in self.param_dict.items()\n ]) + '}')\n return f'cirq.ParamResolver({param_dict_repr})'\n\n def _json_dict_(self) -> Dict[str, Any]:\n return {\n 'cirq_type': self.__class__.__name__,\n # JSON requires mappings to have keys of basic types.\n 'param_dict': list(self.param_dict.items())\n }\n\n @classmethod\n def _from_json_dict_(cls, param_dict, **kwargs):\n return cls(dict(param_dict))\n\n\ndef _sympy_pass_through(val: Any) -> Optional[Any]:\n if isinstance(val, numbers.Number) and not isinstance(val, sympy.Basic):\n return val\n if isinstance(val, sympy.core.numbers.IntegerConstant):\n return val.p\n if isinstance(val, sympy.core.numbers.RationalConstant):\n return val.p / val.q\n if val == sympy.pi:\n return np.pi\n return None\n", "path": "cirq/study/resolver.py"}]}
3,168
767
gh_patches_debug_31092
rasdani/github-patches
git_diff
pwndbg__pwndbg-1888
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Refactor EventWrapper It seems we can maybe remove EventWrapper class now since we depend on GDB >= 9.2? https://github.com/pwndbg/pwndbg/issues/1854#issuecomment-1664527390 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/gdblib/events.py` Content: ``` 1 """ 2 Enables callbacks into functions to be automatically invoked 3 when various events occur to the debuggee (e.g. STOP on SIGINT) 4 by using a decorator. 5 """ 6 7 from __future__ import annotations 8 9 import sys 10 from functools import partial 11 from functools import wraps 12 from typing import Any 13 from typing import Callable 14 15 import gdb 16 17 from pwndbg.gdblib.config import config 18 19 debug = config.add_param("debug-events", False, "display internal event debugging info") 20 21 22 # There is no GDB way to get a notification when the binary itself 23 # is loaded from disk, by the operating system, before absolutely 24 # anything happens 25 # 26 # However, we get an Objfile event when the binary is loaded, before 27 # its entry point is invoked. 28 # 29 # We also get an Objfile event when we load up GDB, so we need 30 # to detect when the binary is running or not. 31 # 32 # Additionally, when attaching to a process running under QEMU, the 33 # very first event which is fired is a 'stop' event. We need to 34 # capture this so that we can fire off all of the 'start' events first. 35 class StartEvent: 36 def __init__(self) -> None: 37 self.registered: list[Callable] = [] 38 self.running = False 39 40 def connect(self, function) -> None: 41 if function not in self.registered: 42 self.registered.append(function) 43 44 def disconnect(self, function) -> None: 45 if function in self.registered: 46 self.registered.remove(function) 47 48 def on_new_objfile(self) -> None: 49 if self.running or not gdb.selected_thread(): 50 return 51 52 self.running = True 53 54 for function in self.registered: 55 if debug: 56 sys.stdout.write( 57 "{!r} {}.{}\n".format("start", function.__module__, function.__name__) 58 ) 59 function() 60 61 def on_exited(self) -> None: 62 self.running = False 63 64 def on_stop(self) -> None: 65 self.on_new_objfile() 66 67 68 gdb.events.start = StartEvent() 69 70 71 class EventWrapper: 72 """ 73 Wrapper for GDB events which may not exist on older GDB versions but we still can 74 fire them manually (to invoke them you have to call `invoke_callbacks`). 75 """ 76 77 def __init__(self, name: str) -> None: 78 self.name = name 79 80 self._event = getattr(gdb.events, self.name, None) 81 self._is_real_event = self._event is not None 82 83 def connect(self, func) -> None: 84 if self._event is not None: 85 self._event.connect(func) 86 87 def disconnect(self, func) -> None: 88 if self._event is not None: 89 self._event.disconnect(func) 90 91 @property 92 def is_real_event(self) -> bool: 93 return self._is_real_event 94 95 def invoke_callbacks(self) -> None: 96 """ 97 As an optimization please don't call this if your GDB has this event (check `.is_real_event`). 98 """ 99 for f in registered[self]: 100 f() 101 102 103 # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook 104 before_prompt_event = EventWrapper("before_prompt") 105 gdb.events.before_prompt = before_prompt_event 106 107 108 # In order to support reloading, we must be able to re-fire 109 # all 'objfile' and 'stop' events. 110 registered: dict[Any, list[Callable]] = { 111 gdb.events.exited: [], 112 gdb.events.cont: [], 113 gdb.events.new_objfile: [], 114 gdb.events.stop: [], 115 gdb.events.start: [], 116 gdb.events.new_thread: [], 117 gdb.events.before_prompt: [], # The real event might not exist, but we wrap it 118 gdb.events.memory_changed: [], 119 gdb.events.register_changed: [], 120 } 121 122 123 # When performing remote debugging, gdbserver is very noisy about which 124 # objects are loaded. This greatly slows down the debugging session. 125 # In order to combat this, we keep track of which objfiles have been loaded 126 # this session, and only emit objfile events for each *new* file. 127 objfile_cache: dict[str, set[str]] = {} 128 129 130 def connect(func, event_handler, name=""): 131 if debug: 132 print("Connecting", func.__name__, event_handler) 133 134 @wraps(func) 135 def caller(*a): 136 if debug: 137 sys.stdout.write(f"{name!r} {func.__module__}.{func.__name__} {a!r}\n") 138 139 if a and isinstance(a[0], gdb.NewObjFileEvent): 140 objfile = a[0].new_objfile 141 handler = f"{func.__module__}.{func.__name__}" 142 path = objfile.filename 143 dispatched = objfile_cache.get(path, set()) 144 145 if handler in dispatched: 146 return 147 148 dispatched.add(handler) 149 objfile_cache[path] = dispatched 150 151 try: 152 func() 153 except Exception as e: 154 import pwndbg.exception 155 156 pwndbg.exception.handle() 157 raise e 158 159 registered[event_handler].append(caller) 160 event_handler.connect(caller) 161 return func 162 163 164 def exit(func): 165 return connect(func, gdb.events.exited, "exit") 166 167 168 def cont(func): 169 return connect(func, gdb.events.cont, "cont") 170 171 172 def new_objfile(func): 173 return connect(func, gdb.events.new_objfile, "obj") 174 175 176 def stop(func): 177 return connect(func, gdb.events.stop, "stop") 178 179 180 def start(func): 181 return connect(func, gdb.events.start, "start") 182 183 184 def thread(func): 185 return connect(func, gdb.events.new_thread, "thread") 186 187 188 before_prompt = partial(connect, event_handler=gdb.events.before_prompt, name="before_prompt") 189 190 191 def reg_changed(func): 192 try: 193 return connect(func, gdb.events.register_changed, "reg_changed") 194 except AttributeError: 195 return func 196 197 198 def mem_changed(func): 199 try: 200 return connect(func, gdb.events.memory_changed, "mem_changed") 201 except AttributeError: 202 return func 203 204 205 def log_objfiles(ofile=None) -> None: 206 if not (debug and ofile): 207 return 208 209 name = ofile.new_objfile.filename 210 211 print("objfile: %r" % name) 212 gdb.execute("info sharedlibrary") 213 214 215 gdb.events.new_objfile.connect(log_objfiles) 216 217 218 def after_reload(start=True) -> None: 219 if gdb.selected_inferior().pid: 220 for f in registered[gdb.events.stop]: 221 f() 222 for f in registered[gdb.events.start]: 223 if start: 224 f() 225 for f in registered[gdb.events.new_objfile]: 226 f() 227 for f in registered[gdb.events.before_prompt]: 228 f() 229 230 231 def on_reload() -> None: 232 for event, functions in registered.items(): 233 for function in functions: 234 event.disconnect(function) 235 registered[event] = [] 236 237 238 @new_objfile 239 def _start_newobjfile() -> None: 240 gdb.events.start.on_new_objfile() 241 242 243 @exit 244 def _start_exit() -> None: 245 gdb.events.start.on_exited() 246 247 248 @stop 249 def _start_stop() -> None: 250 gdb.events.start.on_stop() 251 252 253 @exit 254 def _reset_objfiles() -> None: 255 global objfile_cache 256 objfile_cache = {} 257 ``` Path: `pwndbg/gdblib/prompt.py` Content: ``` 1 from __future__ import annotations 2 3 from os import environ 4 5 import gdb 6 7 import pwndbg.decorators 8 import pwndbg.gdblib.events 9 import pwndbg.gdblib.functions 10 import pwndbg.lib.cache 11 import pwndbg.profiling 12 from pwndbg.color import disable_colors 13 from pwndbg.color import message 14 from pwndbg.lib.tips import color_tip 15 from pwndbg.lib.tips import get_tip_of_the_day 16 17 funcs_list_str = ", ".join(message.notice("$" + f.name) for f in pwndbg.gdblib.functions.functions) 18 19 num_pwndbg_cmds = sum( 20 1 for _ in filter(lambda c: not (c.shell or c.is_alias), pwndbg.commands.commands) 21 ) 22 num_shell_cmds = sum(1 for _ in filter(lambda c: c.shell, pwndbg.commands.commands)) 23 hint_lines = ( 24 "loaded %i pwndbg commands and %i shell commands. Type %s for a list." 25 % (num_pwndbg_cmds, num_shell_cmds, message.notice("pwndbg [--shell | --all] [filter]")), 26 f"created {funcs_list_str} GDB functions (can be used with print/break)", 27 ) 28 29 for line in hint_lines: 30 print(message.prompt("pwndbg: ") + message.system(line)) 31 32 # noinspection PyPackageRequirements 33 show_tip = pwndbg.gdblib.config.add_param( 34 "show-tips", True, "whether to display the tip of the day on startup" 35 ) 36 37 cur = None 38 39 40 def initial_hook(*a) -> None: 41 if show_tip and not pwndbg.decorators.first_prompt: 42 colored_tip = color_tip(get_tip_of_the_day()) 43 print( 44 message.prompt("------- tip of the day") 45 + message.system(" (disable with %s)" % message.notice("set show-tips off")) 46 + message.prompt(" -------") 47 ) 48 print(colored_tip) 49 pwndbg.decorators.first_prompt = True 50 51 prompt_hook(*a) 52 53 if environ.get("PWNDBG_PROFILE") == "1": 54 pwndbg.profiling.profiler.stop("pwndbg-first-prompt.pstats") 55 56 gdb.prompt_hook = prompt_hook 57 58 59 context_shown = False 60 61 62 def prompt_hook(*a) -> None: 63 global cur, context_shown 64 65 new = (gdb.selected_inferior(), gdb.selected_thread()) 66 67 if cur != new: 68 pwndbg.gdblib.events.after_reload(start=cur is None) 69 cur = new 70 71 if pwndbg.gdblib.proc.alive and pwndbg.gdblib.proc.thread_is_stopped and not context_shown: 72 pwndbg.commands.context.context() 73 context_shown = True 74 75 76 @pwndbg.gdblib.events.cont 77 def reset_context_shown(*a) -> None: 78 global context_shown 79 context_shown = False 80 81 82 @pwndbg.gdblib.config.trigger(message.config_prompt_color, disable_colors) 83 def set_prompt() -> None: 84 prompt = "pwndbg> " 85 86 if not disable_colors: 87 prompt = "\x02" + prompt + "\x01" # STX + prompt + SOH 88 prompt = message.prompt(prompt) 89 prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX 90 91 gdb.execute(f"set prompt {prompt}") 92 93 94 if pwndbg.gdblib.events.before_prompt_event.is_real_event: 95 gdb.prompt_hook = initial_hook 96 97 else: 98 # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook 99 def extended_prompt_hook(*a): 100 pwndbg.gdblib.events.before_prompt_event.invoke_callbacks() 101 return prompt_hook(*a) 102 103 gdb.prompt_hook = extended_prompt_hook 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/gdblib/events.py b/pwndbg/gdblib/events.py --- a/pwndbg/gdblib/events.py +++ b/pwndbg/gdblib/events.py @@ -67,44 +67,6 @@ gdb.events.start = StartEvent() - -class EventWrapper: - """ - Wrapper for GDB events which may not exist on older GDB versions but we still can - fire them manually (to invoke them you have to call `invoke_callbacks`). - """ - - def __init__(self, name: str) -> None: - self.name = name - - self._event = getattr(gdb.events, self.name, None) - self._is_real_event = self._event is not None - - def connect(self, func) -> None: - if self._event is not None: - self._event.connect(func) - - def disconnect(self, func) -> None: - if self._event is not None: - self._event.disconnect(func) - - @property - def is_real_event(self) -> bool: - return self._is_real_event - - def invoke_callbacks(self) -> None: - """ - As an optimization please don't call this if your GDB has this event (check `.is_real_event`). - """ - for f in registered[self]: - f() - - -# Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook -before_prompt_event = EventWrapper("before_prompt") -gdb.events.before_prompt = before_prompt_event - - # In order to support reloading, we must be able to re-fire # all 'objfile' and 'stop' events. registered: dict[Any, list[Callable]] = { diff --git a/pwndbg/gdblib/prompt.py b/pwndbg/gdblib/prompt.py --- a/pwndbg/gdblib/prompt.py +++ b/pwndbg/gdblib/prompt.py @@ -89,15 +89,4 @@ prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX gdb.execute(f"set prompt {prompt}") - - -if pwndbg.gdblib.events.before_prompt_event.is_real_event: - gdb.prompt_hook = initial_hook - -else: - # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook - def extended_prompt_hook(*a): - pwndbg.gdblib.events.before_prompt_event.invoke_callbacks() - return prompt_hook(*a) - - gdb.prompt_hook = extended_prompt_hook +gdb.prompt_hook = initial_hook
{"golden_diff": "diff --git a/pwndbg/gdblib/events.py b/pwndbg/gdblib/events.py\n--- a/pwndbg/gdblib/events.py\n+++ b/pwndbg/gdblib/events.py\n@@ -67,44 +67,6 @@\n \n gdb.events.start = StartEvent()\n \n-\n-class EventWrapper:\n- \"\"\"\n- Wrapper for GDB events which may not exist on older GDB versions but we still can\n- fire them manually (to invoke them you have to call `invoke_callbacks`).\n- \"\"\"\n-\n- def __init__(self, name: str) -> None:\n- self.name = name\n-\n- self._event = getattr(gdb.events, self.name, None)\n- self._is_real_event = self._event is not None\n-\n- def connect(self, func) -> None:\n- if self._event is not None:\n- self._event.connect(func)\n-\n- def disconnect(self, func) -> None:\n- if self._event is not None:\n- self._event.disconnect(func)\n-\n- @property\n- def is_real_event(self) -> bool:\n- return self._is_real_event\n-\n- def invoke_callbacks(self) -> None:\n- \"\"\"\n- As an optimization please don't call this if your GDB has this event (check `.is_real_event`).\n- \"\"\"\n- for f in registered[self]:\n- f()\n-\n-\n-# Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook\n-before_prompt_event = EventWrapper(\"before_prompt\")\n-gdb.events.before_prompt = before_prompt_event\n-\n-\n # In order to support reloading, we must be able to re-fire\n # all 'objfile' and 'stop' events.\n registered: dict[Any, list[Callable]] = {\ndiff --git a/pwndbg/gdblib/prompt.py b/pwndbg/gdblib/prompt.py\n--- a/pwndbg/gdblib/prompt.py\n+++ b/pwndbg/gdblib/prompt.py\n@@ -89,15 +89,4 @@\n prompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n \n gdb.execute(f\"set prompt {prompt}\")\n-\n-\n-if pwndbg.gdblib.events.before_prompt_event.is_real_event:\n- gdb.prompt_hook = initial_hook\n-\n-else:\n- # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook\n- def extended_prompt_hook(*a):\n- pwndbg.gdblib.events.before_prompt_event.invoke_callbacks()\n- return prompt_hook(*a)\n-\n- gdb.prompt_hook = extended_prompt_hook\n+gdb.prompt_hook = initial_hook\n", "issue": "Refactor EventWrapper\nIt seems we can maybe remove EventWrapper class now since we depend on GDB >= 9.2?\r\n\r\nhttps://github.com/pwndbg/pwndbg/issues/1854#issuecomment-1664527390\n", "before_files": [{"content": "\"\"\"\nEnables callbacks into functions to be automatically invoked\nwhen various events occur to the debuggee (e.g. STOP on SIGINT)\nby using a decorator.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport sys\nfrom functools import partial\nfrom functools import wraps\nfrom typing import Any\nfrom typing import Callable\n\nimport gdb\n\nfrom pwndbg.gdblib.config import config\n\ndebug = config.add_param(\"debug-events\", False, \"display internal event debugging info\")\n\n\n# There is no GDB way to get a notification when the binary itself\n# is loaded from disk, by the operating system, before absolutely\n# anything happens\n#\n# However, we get an Objfile event when the binary is loaded, before\n# its entry point is invoked.\n#\n# We also get an Objfile event when we load up GDB, so we need\n# to detect when the binary is running or not.\n#\n# Additionally, when attaching to a process running under QEMU, the\n# very first event which is fired is a 'stop' event. We need to\n# capture this so that we can fire off all of the 'start' events first.\nclass StartEvent:\n def __init__(self) -> None:\n self.registered: list[Callable] = []\n self.running = False\n\n def connect(self, function) -> None:\n if function not in self.registered:\n self.registered.append(function)\n\n def disconnect(self, function) -> None:\n if function in self.registered:\n self.registered.remove(function)\n\n def on_new_objfile(self) -> None:\n if self.running or not gdb.selected_thread():\n return\n\n self.running = True\n\n for function in self.registered:\n if debug:\n sys.stdout.write(\n \"{!r} {}.{}\\n\".format(\"start\", function.__module__, function.__name__)\n )\n function()\n\n def on_exited(self) -> None:\n self.running = False\n\n def on_stop(self) -> None:\n self.on_new_objfile()\n\n\ngdb.events.start = StartEvent()\n\n\nclass EventWrapper:\n \"\"\"\n Wrapper for GDB events which may not exist on older GDB versions but we still can\n fire them manually (to invoke them you have to call `invoke_callbacks`).\n \"\"\"\n\n def __init__(self, name: str) -> None:\n self.name = name\n\n self._event = getattr(gdb.events, self.name, None)\n self._is_real_event = self._event is not None\n\n def connect(self, func) -> None:\n if self._event is not None:\n self._event.connect(func)\n\n def disconnect(self, func) -> None:\n if self._event is not None:\n self._event.disconnect(func)\n\n @property\n def is_real_event(self) -> bool:\n return self._is_real_event\n\n def invoke_callbacks(self) -> None:\n \"\"\"\n As an optimization please don't call this if your GDB has this event (check `.is_real_event`).\n \"\"\"\n for f in registered[self]:\n f()\n\n\n# Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook\nbefore_prompt_event = EventWrapper(\"before_prompt\")\ngdb.events.before_prompt = before_prompt_event\n\n\n# In order to support reloading, we must be able to re-fire\n# all 'objfile' and 'stop' events.\nregistered: dict[Any, list[Callable]] = {\n gdb.events.exited: [],\n gdb.events.cont: [],\n gdb.events.new_objfile: [],\n gdb.events.stop: [],\n gdb.events.start: [],\n gdb.events.new_thread: [],\n gdb.events.before_prompt: [], # The real event might not exist, but we wrap it\n gdb.events.memory_changed: [],\n gdb.events.register_changed: [],\n}\n\n\n# When performing remote debugging, gdbserver is very noisy about which\n# objects are loaded. This greatly slows down the debugging session.\n# In order to combat this, we keep track of which objfiles have been loaded\n# this session, and only emit objfile events for each *new* file.\nobjfile_cache: dict[str, set[str]] = {}\n\n\ndef connect(func, event_handler, name=\"\"):\n if debug:\n print(\"Connecting\", func.__name__, event_handler)\n\n @wraps(func)\n def caller(*a):\n if debug:\n sys.stdout.write(f\"{name!r} {func.__module__}.{func.__name__} {a!r}\\n\")\n\n if a and isinstance(a[0], gdb.NewObjFileEvent):\n objfile = a[0].new_objfile\n handler = f\"{func.__module__}.{func.__name__}\"\n path = objfile.filename\n dispatched = objfile_cache.get(path, set())\n\n if handler in dispatched:\n return\n\n dispatched.add(handler)\n objfile_cache[path] = dispatched\n\n try:\n func()\n except Exception as e:\n import pwndbg.exception\n\n pwndbg.exception.handle()\n raise e\n\n registered[event_handler].append(caller)\n event_handler.connect(caller)\n return func\n\n\ndef exit(func):\n return connect(func, gdb.events.exited, \"exit\")\n\n\ndef cont(func):\n return connect(func, gdb.events.cont, \"cont\")\n\n\ndef new_objfile(func):\n return connect(func, gdb.events.new_objfile, \"obj\")\n\n\ndef stop(func):\n return connect(func, gdb.events.stop, \"stop\")\n\n\ndef start(func):\n return connect(func, gdb.events.start, \"start\")\n\n\ndef thread(func):\n return connect(func, gdb.events.new_thread, \"thread\")\n\n\nbefore_prompt = partial(connect, event_handler=gdb.events.before_prompt, name=\"before_prompt\")\n\n\ndef reg_changed(func):\n try:\n return connect(func, gdb.events.register_changed, \"reg_changed\")\n except AttributeError:\n return func\n\n\ndef mem_changed(func):\n try:\n return connect(func, gdb.events.memory_changed, \"mem_changed\")\n except AttributeError:\n return func\n\n\ndef log_objfiles(ofile=None) -> None:\n if not (debug and ofile):\n return\n\n name = ofile.new_objfile.filename\n\n print(\"objfile: %r\" % name)\n gdb.execute(\"info sharedlibrary\")\n\n\ngdb.events.new_objfile.connect(log_objfiles)\n\n\ndef after_reload(start=True) -> None:\n if gdb.selected_inferior().pid:\n for f in registered[gdb.events.stop]:\n f()\n for f in registered[gdb.events.start]:\n if start:\n f()\n for f in registered[gdb.events.new_objfile]:\n f()\n for f in registered[gdb.events.before_prompt]:\n f()\n\n\ndef on_reload() -> None:\n for event, functions in registered.items():\n for function in functions:\n event.disconnect(function)\n registered[event] = []\n\n\n@new_objfile\ndef _start_newobjfile() -> None:\n gdb.events.start.on_new_objfile()\n\n\n@exit\ndef _start_exit() -> None:\n gdb.events.start.on_exited()\n\n\n@stop\ndef _start_stop() -> None:\n gdb.events.start.on_stop()\n\n\n@exit\ndef _reset_objfiles() -> None:\n global objfile_cache\n objfile_cache = {}\n", "path": "pwndbg/gdblib/events.py"}, {"content": "from __future__ import annotations\n\nfrom os import environ\n\nimport gdb\n\nimport pwndbg.decorators\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.functions\nimport pwndbg.lib.cache\nimport pwndbg.profiling\nfrom pwndbg.color import disable_colors\nfrom pwndbg.color import message\nfrom pwndbg.lib.tips import color_tip\nfrom pwndbg.lib.tips import get_tip_of_the_day\n\nfuncs_list_str = \", \".join(message.notice(\"$\" + f.name) for f in pwndbg.gdblib.functions.functions)\n\nnum_pwndbg_cmds = sum(\n 1 for _ in filter(lambda c: not (c.shell or c.is_alias), pwndbg.commands.commands)\n)\nnum_shell_cmds = sum(1 for _ in filter(lambda c: c.shell, pwndbg.commands.commands))\nhint_lines = (\n \"loaded %i pwndbg commands and %i shell commands. Type %s for a list.\"\n % (num_pwndbg_cmds, num_shell_cmds, message.notice(\"pwndbg [--shell | --all] [filter]\")),\n f\"created {funcs_list_str} GDB functions (can be used with print/break)\",\n)\n\nfor line in hint_lines:\n print(message.prompt(\"pwndbg: \") + message.system(line))\n\n# noinspection PyPackageRequirements\nshow_tip = pwndbg.gdblib.config.add_param(\n \"show-tips\", True, \"whether to display the tip of the day on startup\"\n)\n\ncur = None\n\n\ndef initial_hook(*a) -> None:\n if show_tip and not pwndbg.decorators.first_prompt:\n colored_tip = color_tip(get_tip_of_the_day())\n print(\n message.prompt(\"------- tip of the day\")\n + message.system(\" (disable with %s)\" % message.notice(\"set show-tips off\"))\n + message.prompt(\" -------\")\n )\n print(colored_tip)\n pwndbg.decorators.first_prompt = True\n\n prompt_hook(*a)\n\n if environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-first-prompt.pstats\")\n\n gdb.prompt_hook = prompt_hook\n\n\ncontext_shown = False\n\n\ndef prompt_hook(*a) -> None:\n global cur, context_shown\n\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.gdblib.events.after_reload(start=cur is None)\n cur = new\n\n if pwndbg.gdblib.proc.alive and pwndbg.gdblib.proc.thread_is_stopped and not context_shown:\n pwndbg.commands.context.context()\n context_shown = True\n\n\[email protected]\ndef reset_context_shown(*a) -> None:\n global context_shown\n context_shown = False\n\n\[email protected](message.config_prompt_color, disable_colors)\ndef set_prompt() -> None:\n prompt = \"pwndbg> \"\n\n if not disable_colors:\n prompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\n prompt = message.prompt(prompt)\n prompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\n gdb.execute(f\"set prompt {prompt}\")\n\n\nif pwndbg.gdblib.events.before_prompt_event.is_real_event:\n gdb.prompt_hook = initial_hook\n\nelse:\n # Old GDBs doesn't have gdb.events.before_prompt, so we will emulate it using gdb.prompt_hook\n def extended_prompt_hook(*a):\n pwndbg.gdblib.events.before_prompt_event.invoke_callbacks()\n return prompt_hook(*a)\n\n gdb.prompt_hook = extended_prompt_hook\n", "path": "pwndbg/gdblib/prompt.py"}], "after_files": [{"content": "\"\"\"\nEnables callbacks into functions to be automatically invoked\nwhen various events occur to the debuggee (e.g. STOP on SIGINT)\nby using a decorator.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport sys\nfrom functools import partial\nfrom functools import wraps\nfrom typing import Any\nfrom typing import Callable\n\nimport gdb\n\nfrom pwndbg.gdblib.config import config\n\ndebug = config.add_param(\"debug-events\", False, \"display internal event debugging info\")\n\n\n# There is no GDB way to get a notification when the binary itself\n# is loaded from disk, by the operating system, before absolutely\n# anything happens\n#\n# However, we get an Objfile event when the binary is loaded, before\n# its entry point is invoked.\n#\n# We also get an Objfile event when we load up GDB, so we need\n# to detect when the binary is running or not.\n#\n# Additionally, when attaching to a process running under QEMU, the\n# very first event which is fired is a 'stop' event. We need to\n# capture this so that we can fire off all of the 'start' events first.\nclass StartEvent:\n def __init__(self) -> None:\n self.registered: list[Callable] = []\n self.running = False\n\n def connect(self, function) -> None:\n if function not in self.registered:\n self.registered.append(function)\n\n def disconnect(self, function) -> None:\n if function in self.registered:\n self.registered.remove(function)\n\n def on_new_objfile(self) -> None:\n if self.running or not gdb.selected_thread():\n return\n\n self.running = True\n\n for function in self.registered:\n if debug:\n sys.stdout.write(\n \"{!r} {}.{}\\n\".format(\"start\", function.__module__, function.__name__)\n )\n function()\n\n def on_exited(self) -> None:\n self.running = False\n\n def on_stop(self) -> None:\n self.on_new_objfile()\n\n\ngdb.events.start = StartEvent()\n\n# In order to support reloading, we must be able to re-fire\n# all 'objfile' and 'stop' events.\nregistered: dict[Any, list[Callable]] = {\n gdb.events.exited: [],\n gdb.events.cont: [],\n gdb.events.new_objfile: [],\n gdb.events.stop: [],\n gdb.events.start: [],\n gdb.events.new_thread: [],\n gdb.events.before_prompt: [], # The real event might not exist, but we wrap it\n gdb.events.memory_changed: [],\n gdb.events.register_changed: [],\n}\n\n\n# When performing remote debugging, gdbserver is very noisy about which\n# objects are loaded. This greatly slows down the debugging session.\n# In order to combat this, we keep track of which objfiles have been loaded\n# this session, and only emit objfile events for each *new* file.\nobjfile_cache: dict[str, set[str]] = {}\n\n\ndef connect(func, event_handler, name=\"\"):\n if debug:\n print(\"Connecting\", func.__name__, event_handler)\n\n @wraps(func)\n def caller(*a):\n if debug:\n sys.stdout.write(f\"{name!r} {func.__module__}.{func.__name__} {a!r}\\n\")\n\n if a and isinstance(a[0], gdb.NewObjFileEvent):\n objfile = a[0].new_objfile\n handler = f\"{func.__module__}.{func.__name__}\"\n path = objfile.filename\n dispatched = objfile_cache.get(path, set())\n\n if handler in dispatched:\n return\n\n dispatched.add(handler)\n objfile_cache[path] = dispatched\n\n try:\n func()\n except Exception as e:\n import pwndbg.exception\n\n pwndbg.exception.handle()\n raise e\n\n registered[event_handler].append(caller)\n event_handler.connect(caller)\n return func\n\n\ndef exit(func):\n return connect(func, gdb.events.exited, \"exit\")\n\n\ndef cont(func):\n return connect(func, gdb.events.cont, \"cont\")\n\n\ndef new_objfile(func):\n return connect(func, gdb.events.new_objfile, \"obj\")\n\n\ndef stop(func):\n return connect(func, gdb.events.stop, \"stop\")\n\n\ndef start(func):\n return connect(func, gdb.events.start, \"start\")\n\n\ndef thread(func):\n return connect(func, gdb.events.new_thread, \"thread\")\n\n\nbefore_prompt = partial(connect, event_handler=gdb.events.before_prompt, name=\"before_prompt\")\n\n\ndef reg_changed(func):\n try:\n return connect(func, gdb.events.register_changed, \"reg_changed\")\n except AttributeError:\n return func\n\n\ndef mem_changed(func):\n try:\n return connect(func, gdb.events.memory_changed, \"mem_changed\")\n except AttributeError:\n return func\n\n\ndef log_objfiles(ofile=None) -> None:\n if not (debug and ofile):\n return\n\n name = ofile.new_objfile.filename\n\n print(\"objfile: %r\" % name)\n gdb.execute(\"info sharedlibrary\")\n\n\ngdb.events.new_objfile.connect(log_objfiles)\n\n\ndef after_reload(start=True) -> None:\n if gdb.selected_inferior().pid:\n for f in registered[gdb.events.stop]:\n f()\n for f in registered[gdb.events.start]:\n if start:\n f()\n for f in registered[gdb.events.new_objfile]:\n f()\n for f in registered[gdb.events.before_prompt]:\n f()\n\n\ndef on_reload() -> None:\n for event, functions in registered.items():\n for function in functions:\n event.disconnect(function)\n registered[event] = []\n\n\n@new_objfile\ndef _start_newobjfile() -> None:\n gdb.events.start.on_new_objfile()\n\n\n@exit\ndef _start_exit() -> None:\n gdb.events.start.on_exited()\n\n\n@stop\ndef _start_stop() -> None:\n gdb.events.start.on_stop()\n\n\n@exit\ndef _reset_objfiles() -> None:\n global objfile_cache\n objfile_cache = {}\n", "path": "pwndbg/gdblib/events.py"}, {"content": "from __future__ import annotations\n\nfrom os import environ\n\nimport gdb\n\nimport pwndbg.decorators\nimport pwndbg.gdblib.events\nimport pwndbg.gdblib.functions\nimport pwndbg.lib.cache\nimport pwndbg.profiling\nfrom pwndbg.color import disable_colors\nfrom pwndbg.color import message\nfrom pwndbg.lib.tips import color_tip\nfrom pwndbg.lib.tips import get_tip_of_the_day\n\nfuncs_list_str = \", \".join(message.notice(\"$\" + f.name) for f in pwndbg.gdblib.functions.functions)\n\nnum_pwndbg_cmds = sum(\n 1 for _ in filter(lambda c: not (c.shell or c.is_alias), pwndbg.commands.commands)\n)\nnum_shell_cmds = sum(1 for _ in filter(lambda c: c.shell, pwndbg.commands.commands))\nhint_lines = (\n \"loaded %i pwndbg commands and %i shell commands. Type %s for a list.\"\n % (num_pwndbg_cmds, num_shell_cmds, message.notice(\"pwndbg [--shell | --all] [filter]\")),\n f\"created {funcs_list_str} GDB functions (can be used with print/break)\",\n)\n\nfor line in hint_lines:\n print(message.prompt(\"pwndbg: \") + message.system(line))\n\n# noinspection PyPackageRequirements\nshow_tip = pwndbg.gdblib.config.add_param(\n \"show-tips\", True, \"whether to display the tip of the day on startup\"\n)\n\ncur = None\n\n\ndef initial_hook(*a) -> None:\n if show_tip and not pwndbg.decorators.first_prompt:\n colored_tip = color_tip(get_tip_of_the_day())\n print(\n message.prompt(\"------- tip of the day\")\n + message.system(\" (disable with %s)\" % message.notice(\"set show-tips off\"))\n + message.prompt(\" -------\")\n )\n print(colored_tip)\n pwndbg.decorators.first_prompt = True\n\n prompt_hook(*a)\n\n if environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-first-prompt.pstats\")\n\n gdb.prompt_hook = prompt_hook\n\n\ncontext_shown = False\n\n\ndef prompt_hook(*a) -> None:\n global cur, context_shown\n\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.gdblib.events.after_reload(start=cur is None)\n cur = new\n\n if pwndbg.gdblib.proc.alive and pwndbg.gdblib.proc.thread_is_stopped and not context_shown:\n pwndbg.commands.context.context()\n context_shown = True\n\n\[email protected]\ndef reset_context_shown(*a) -> None:\n global context_shown\n context_shown = False\n\n\[email protected](message.config_prompt_color, disable_colors)\ndef set_prompt() -> None:\n prompt = \"pwndbg> \"\n\n if not disable_colors:\n prompt = \"\\x02\" + prompt + \"\\x01\" # STX + prompt + SOH\n prompt = message.prompt(prompt)\n prompt = \"\\x01\" + prompt + \"\\x02\" # SOH + prompt + STX\n\n gdb.execute(f\"set prompt {prompt}\")\ngdb.prompt_hook = initial_hook\n", "path": "pwndbg/gdblib/prompt.py"}]}
3,634
601
gh_patches_debug_25572
rasdani/github-patches
git_diff
freedomofpress__securedrop-5039
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tails NetworkManager hook still running under Python2 ## Description Despite the shebang line in the `securedrop_init.py` pointing to Python3, the script is still run via Python2 due to the calling script hardcoding `/usr/bin/python` in the NetworkManager hook: https://github.com/freedomofpress/securedrop/blob/1d8484e3d42f63b9cec68be14e587175fa01adfc/install_files/ansible-base/roles/tails-config/files/65-configure-tor-for-securedrop.sh#L17 ## Steps to Reproduce Review diffs and relevant files above. ## Expected Behavior All Tails 4 admin tooling is Python 3. ## Actual Behavior We're still calling the nm hook via Python 2. ## Comments The PR to resolve is simple, but we'll have to factor in the QA time to verify we haven't introduced side-effects. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `install_files/ansible-base/roles/tails-config/files/securedrop_init.py` Content: ``` 1 #!/usr/bin/python3 2 3 import grp 4 import os 5 import io 6 import pwd 7 import sys 8 import subprocess 9 10 from shutil import copyfile 11 12 13 # check for root 14 if os.geteuid() != 0: 15 sys.exit('You need to run this as root') 16 17 # paths 18 path_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions' 19 path_torrc_backup = '/etc/tor/torrc.bak' 20 path_torrc = '/etc/tor/torrc' 21 path_desktop = '/home/amnesia/Desktop/' 22 path_persistent_desktop = '/lib/live/mount/persistence/TailsData_unlocked/dotfiles/Desktop/' # noqa: E501 23 path_securedrop_root = '/home/amnesia/Persistent/securedrop' 24 path_securedrop_admin_venv = os.path.join(path_securedrop_root, 25 'admin/.venv3/bin/python') 26 path_securedrop_admin_init = os.path.join(path_securedrop_root, 27 'admin/securedrop_admin/__init__.py') 28 path_gui_updater = os.path.join(path_securedrop_root, 29 'journalist_gui/SecureDropUpdater') 30 31 paths_v3_authfiles = { 32 "app-journalist": os.path.join(path_securedrop_root, 33 'install_files/ansible-base/app-journalist.auth_private'), 34 "app-ssh": os.path.join(path_securedrop_root, 35 'install_files/ansible-base/app-ssh.auth_private'), 36 "mon-ssh": os.path.join(path_securedrop_root, 37 'install_files/ansible-base/mon-ssh.auth_private') 38 } 39 path_onion_auth_dir = '/var/lib/tor/onion_auth' 40 41 # load torrc_additions 42 if os.path.isfile(path_torrc_additions): 43 with io.open(path_torrc_additions) as f: 44 torrc_additions = f.read() 45 else: 46 sys.exit('Error opening {0} for reading'.format(path_torrc_additions)) 47 48 # load torrc 49 if os.path.isfile(path_torrc_backup): 50 with io.open(path_torrc_backup) as f: 51 torrc = f.read() 52 else: 53 if os.path.isfile(path_torrc): 54 with io.open(path_torrc) as f: 55 torrc = f.read() 56 else: 57 sys.exit('Error opening {0} for reading'.format(path_torrc)) 58 59 # save a backup 60 with io.open(path_torrc_backup, 'w') as f: 61 f.write(torrc) 62 63 # append the additions 64 with io.open(path_torrc, 'w') as f: 65 f.write(torrc + torrc_additions) 66 67 # check for v3 aths files 68 v3_authfiles_present = False 69 for f in paths_v3_authfiles.values(): 70 if os.path.isfile(f): 71 v3_authfiles_present = True 72 73 # if there are v3 authfiles, make dir and copy them into place 74 debian_tor_uid = pwd.getpwnam("debian-tor").pw_uid 75 debian_tor_gid = grp.getgrnam("debian-tor").gr_gid 76 77 if not os.path.isdir(path_onion_auth_dir): 78 os.mkdir(path_onion_auth_dir) 79 80 os.chmod(path_onion_auth_dir, 0o700) 81 os.chown(path_onion_auth_dir, debian_tor_uid, debian_tor_gid) 82 83 for key, f in paths_v3_authfiles.items(): 84 if os.path.isfile(f): 85 filename = os.path.basename(f) 86 new_f = os.path.join(path_onion_auth_dir, filename) 87 copyfile(f, new_f) 88 os.chmod(new_f, 0o400) 89 os.chown(new_f, debian_tor_uid, debian_tor_gid) 90 91 # restart tor 92 try: 93 subprocess.check_call(['systemctl', 'restart', '[email protected]']) 94 except subprocess.CalledProcessError: 95 sys.exit('Error restarting Tor') 96 97 # Turn off "automatic-decompression" in Nautilus to ensure the original 98 # submission filename is restored (see 99 # https://github.com/freedomofpress/securedrop/issues/1862#issuecomment-311519750). 100 subprocess.call(['/usr/bin/dconf', 'write', 101 '/org/gnome/nautilus/preferences/automatic-decompression', 102 'false']) 103 104 # Set journalist.desktop and source.desktop links as trusted with Nautilus (see 105 # https://github.com/freedomofpress/securedrop/issues/2586) 106 # set euid and env variables to amnesia user 107 amnesia_gid = grp.getgrnam('amnesia').gr_gid 108 amnesia_uid = pwd.getpwnam('amnesia').pw_uid 109 os.setresgid(amnesia_gid, amnesia_gid, -1) 110 os.setresuid(amnesia_uid, amnesia_uid, -1) 111 env = os.environ.copy() 112 env['XDG_CURRENT_DESKTOP'] = 'GNOME' 113 env['DESKTOP_SESSION'] = 'default' 114 env['DISPLAY'] = ':1' 115 env['XDG_RUNTIME_DIR'] = '/run/user/{}'.format(amnesia_uid) 116 env['XDG_DATA_DIR'] = '/usr/share/gnome:/usr/local/share/:/usr/share/' 117 env['HOME'] = '/home/amnesia' 118 env['LOGNAME'] = 'amnesia' 119 env['DBUS_SESSION_BUS_ADDRESS'] = 'unix:path=/run/user/{}/bus'.format( 120 amnesia_uid) 121 122 # remove existing shortcut, recreate symlink and change metadata attribute 123 # to trust .desktop 124 for shortcut in ['source.desktop', 'journalist.desktop']: 125 subprocess.call(['rm', path_desktop + shortcut], env=env) 126 subprocess.call(['ln', '-s', path_persistent_desktop + shortcut, 127 path_desktop + shortcut], env=env) 128 subprocess.call(['gio', 'set', path_desktop + shortcut, 129 'metadata::trusted', 'true'], env=env) 130 131 # in Tails 4, reload gnome-shell desktop icons extension to update with changes above 132 cmd = ["lsb_release", "--id", "--short"] 133 p = subprocess.check_output(cmd) 134 distro_id = p.rstrip() 135 if distro_id == 'Debian' and os.uname()[1] == 'amnesia': 136 subprocess.call(['gnome-shell-extension-tool', '-r', 'desktop-icons@csoriano'], env=env) 137 138 # reacquire uid0 and notify the user 139 os.setresuid(0, 0, -1) 140 os.setresgid(0, 0, -1) 141 subprocess.call(['tails-notify-user', 142 'SecureDrop successfully auto-configured!', 143 'You can now access the Journalist Interface.\n', 144 'If you are an admin, you can now SSH to the servers.']) 145 146 # As the amnesia user, check for SecureDrop workstation updates. 147 os.setresgid(amnesia_gid, amnesia_gid, -1) 148 os.setresuid(amnesia_uid, amnesia_uid, -1) 149 output = subprocess.check_output([path_securedrop_admin_venv, 150 path_securedrop_admin_init, 151 '--root', path_securedrop_root, 152 'check_for_updates'], env=env) 153 154 flag_location = "/home/amnesia/Persistent/.securedrop/securedrop_update.flag" 155 if 'Update needed' in output or os.path.exists(flag_location): 156 # Start the SecureDrop updater GUI. 157 subprocess.Popen(['python3', path_gui_updater], env=env) 158 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/install_files/ansible-base/roles/tails-config/files/securedrop_init.py b/install_files/ansible-base/roles/tails-config/files/securedrop_init.py --- a/install_files/ansible-base/roles/tails-config/files/securedrop_init.py +++ b/install_files/ansible-base/roles/tails-config/files/securedrop_init.py @@ -138,10 +138,10 @@ # reacquire uid0 and notify the user os.setresuid(0, 0, -1) os.setresgid(0, 0, -1) +success_message = 'You can now access the Journalist Interface.\nIf you are an admin, you can now SSH to the servers.' # noqa: E501 subprocess.call(['tails-notify-user', 'SecureDrop successfully auto-configured!', - 'You can now access the Journalist Interface.\n', - 'If you are an admin, you can now SSH to the servers.']) + success_message]) # As the amnesia user, check for SecureDrop workstation updates. os.setresgid(amnesia_gid, amnesia_gid, -1) @@ -152,6 +152,6 @@ 'check_for_updates'], env=env) flag_location = "/home/amnesia/Persistent/.securedrop/securedrop_update.flag" -if 'Update needed' in output or os.path.exists(flag_location): +if b'Update needed' in output or os.path.exists(flag_location): # Start the SecureDrop updater GUI. subprocess.Popen(['python3', path_gui_updater], env=env)
{"golden_diff": "diff --git a/install_files/ansible-base/roles/tails-config/files/securedrop_init.py b/install_files/ansible-base/roles/tails-config/files/securedrop_init.py\n--- a/install_files/ansible-base/roles/tails-config/files/securedrop_init.py\n+++ b/install_files/ansible-base/roles/tails-config/files/securedrop_init.py\n@@ -138,10 +138,10 @@\n # reacquire uid0 and notify the user\n os.setresuid(0, 0, -1)\n os.setresgid(0, 0, -1)\n+success_message = 'You can now access the Journalist Interface.\\nIf you are an admin, you can now SSH to the servers.' # noqa: E501\n subprocess.call(['tails-notify-user',\n 'SecureDrop successfully auto-configured!',\n- 'You can now access the Journalist Interface.\\n',\n- 'If you are an admin, you can now SSH to the servers.'])\n+ success_message])\n \n # As the amnesia user, check for SecureDrop workstation updates.\n os.setresgid(amnesia_gid, amnesia_gid, -1)\n@@ -152,6 +152,6 @@\n 'check_for_updates'], env=env)\n \n flag_location = \"/home/amnesia/Persistent/.securedrop/securedrop_update.flag\"\n-if 'Update needed' in output or os.path.exists(flag_location):\n+if b'Update needed' in output or os.path.exists(flag_location):\n # Start the SecureDrop updater GUI.\n subprocess.Popen(['python3', path_gui_updater], env=env)\n", "issue": "Tails NetworkManager hook still running under Python2\n## Description\r\nDespite the shebang line in the `securedrop_init.py` pointing to Python3, the script is still run via Python2 due to the calling script hardcoding `/usr/bin/python` in the NetworkManager hook:\r\n\r\nhttps://github.com/freedomofpress/securedrop/blob/1d8484e3d42f63b9cec68be14e587175fa01adfc/install_files/ansible-base/roles/tails-config/files/65-configure-tor-for-securedrop.sh#L17\r\n\r\n## Steps to Reproduce\r\n\r\nReview diffs and relevant files above.\r\n\r\n## Expected Behavior\r\n\r\nAll Tails 4 admin tooling is Python 3. \r\n\r\n## Actual Behavior\r\nWe're still calling the nm hook via Python 2. \r\n\r\n## Comments\r\nThe PR to resolve is simple, but we'll have to factor in the QA time to verify we haven't introduced side-effects.\r\n\n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport grp\nimport os\nimport io\nimport pwd\nimport sys\nimport subprocess\n\nfrom shutil import copyfile\n\n\n# check for root\nif os.geteuid() != 0:\n sys.exit('You need to run this as root')\n\n# paths\npath_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\npath_torrc_backup = '/etc/tor/torrc.bak'\npath_torrc = '/etc/tor/torrc'\npath_desktop = '/home/amnesia/Desktop/'\npath_persistent_desktop = '/lib/live/mount/persistence/TailsData_unlocked/dotfiles/Desktop/' # noqa: E501\npath_securedrop_root = '/home/amnesia/Persistent/securedrop'\npath_securedrop_admin_venv = os.path.join(path_securedrop_root,\n 'admin/.venv3/bin/python')\npath_securedrop_admin_init = os.path.join(path_securedrop_root,\n 'admin/securedrop_admin/__init__.py')\npath_gui_updater = os.path.join(path_securedrop_root,\n 'journalist_gui/SecureDropUpdater')\n\npaths_v3_authfiles = {\n \"app-journalist\": os.path.join(path_securedrop_root,\n 'install_files/ansible-base/app-journalist.auth_private'),\n \"app-ssh\": os.path.join(path_securedrop_root,\n 'install_files/ansible-base/app-ssh.auth_private'),\n \"mon-ssh\": os.path.join(path_securedrop_root,\n 'install_files/ansible-base/mon-ssh.auth_private')\n}\npath_onion_auth_dir = '/var/lib/tor/onion_auth'\n\n# load torrc_additions\nif os.path.isfile(path_torrc_additions):\n with io.open(path_torrc_additions) as f:\n torrc_additions = f.read()\nelse:\n sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n\n# load torrc\nif os.path.isfile(path_torrc_backup):\n with io.open(path_torrc_backup) as f:\n torrc = f.read()\nelse:\n if os.path.isfile(path_torrc):\n with io.open(path_torrc) as f:\n torrc = f.read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc))\n\n # save a backup\n with io.open(path_torrc_backup, 'w') as f:\n f.write(torrc)\n\n# append the additions\nwith io.open(path_torrc, 'w') as f:\n f.write(torrc + torrc_additions)\n\n# check for v3 aths files\nv3_authfiles_present = False\nfor f in paths_v3_authfiles.values():\n if os.path.isfile(f):\n v3_authfiles_present = True\n\n# if there are v3 authfiles, make dir and copy them into place\ndebian_tor_uid = pwd.getpwnam(\"debian-tor\").pw_uid\ndebian_tor_gid = grp.getgrnam(\"debian-tor\").gr_gid\n\nif not os.path.isdir(path_onion_auth_dir):\n os.mkdir(path_onion_auth_dir)\n\nos.chmod(path_onion_auth_dir, 0o700)\nos.chown(path_onion_auth_dir, debian_tor_uid, debian_tor_gid)\n\nfor key, f in paths_v3_authfiles.items():\n if os.path.isfile(f):\n filename = os.path.basename(f)\n new_f = os.path.join(path_onion_auth_dir, filename)\n copyfile(f, new_f)\n os.chmod(new_f, 0o400)\n os.chown(new_f, debian_tor_uid, debian_tor_gid)\n\n# restart tor\ntry:\n subprocess.check_call(['systemctl', 'restart', '[email protected]'])\nexcept subprocess.CalledProcessError:\n sys.exit('Error restarting Tor')\n\n# Turn off \"automatic-decompression\" in Nautilus to ensure the original\n# submission filename is restored (see\n# https://github.com/freedomofpress/securedrop/issues/1862#issuecomment-311519750).\nsubprocess.call(['/usr/bin/dconf', 'write',\n '/org/gnome/nautilus/preferences/automatic-decompression',\n 'false'])\n\n# Set journalist.desktop and source.desktop links as trusted with Nautilus (see\n# https://github.com/freedomofpress/securedrop/issues/2586)\n# set euid and env variables to amnesia user\namnesia_gid = grp.getgrnam('amnesia').gr_gid\namnesia_uid = pwd.getpwnam('amnesia').pw_uid\nos.setresgid(amnesia_gid, amnesia_gid, -1)\nos.setresuid(amnesia_uid, amnesia_uid, -1)\nenv = os.environ.copy()\nenv['XDG_CURRENT_DESKTOP'] = 'GNOME'\nenv['DESKTOP_SESSION'] = 'default'\nenv['DISPLAY'] = ':1'\nenv['XDG_RUNTIME_DIR'] = '/run/user/{}'.format(amnesia_uid)\nenv['XDG_DATA_DIR'] = '/usr/share/gnome:/usr/local/share/:/usr/share/'\nenv['HOME'] = '/home/amnesia'\nenv['LOGNAME'] = 'amnesia'\nenv['DBUS_SESSION_BUS_ADDRESS'] = 'unix:path=/run/user/{}/bus'.format(\n amnesia_uid)\n\n# remove existing shortcut, recreate symlink and change metadata attribute\n# to trust .desktop\nfor shortcut in ['source.desktop', 'journalist.desktop']:\n subprocess.call(['rm', path_desktop + shortcut], env=env)\n subprocess.call(['ln', '-s', path_persistent_desktop + shortcut,\n path_desktop + shortcut], env=env)\n subprocess.call(['gio', 'set', path_desktop + shortcut,\n 'metadata::trusted', 'true'], env=env)\n\n# in Tails 4, reload gnome-shell desktop icons extension to update with changes above\ncmd = [\"lsb_release\", \"--id\", \"--short\"]\np = subprocess.check_output(cmd)\ndistro_id = p.rstrip()\nif distro_id == 'Debian' and os.uname()[1] == 'amnesia':\n subprocess.call(['gnome-shell-extension-tool', '-r', 'desktop-icons@csoriano'], env=env)\n\n# reacquire uid0 and notify the user\nos.setresuid(0, 0, -1)\nos.setresgid(0, 0, -1)\nsubprocess.call(['tails-notify-user',\n 'SecureDrop successfully auto-configured!',\n 'You can now access the Journalist Interface.\\n',\n 'If you are an admin, you can now SSH to the servers.'])\n\n# As the amnesia user, check for SecureDrop workstation updates.\nos.setresgid(amnesia_gid, amnesia_gid, -1)\nos.setresuid(amnesia_uid, amnesia_uid, -1)\noutput = subprocess.check_output([path_securedrop_admin_venv,\n path_securedrop_admin_init,\n '--root', path_securedrop_root,\n 'check_for_updates'], env=env)\n\nflag_location = \"/home/amnesia/Persistent/.securedrop/securedrop_update.flag\"\nif 'Update needed' in output or os.path.exists(flag_location):\n # Start the SecureDrop updater GUI.\n subprocess.Popen(['python3', path_gui_updater], env=env)\n", "path": "install_files/ansible-base/roles/tails-config/files/securedrop_init.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport grp\nimport os\nimport io\nimport pwd\nimport sys\nimport subprocess\n\nfrom shutil import copyfile\n\n\n# check for root\nif os.geteuid() != 0:\n sys.exit('You need to run this as root')\n\n# paths\npath_torrc_additions = '/home/amnesia/Persistent/.securedrop/torrc_additions'\npath_torrc_backup = '/etc/tor/torrc.bak'\npath_torrc = '/etc/tor/torrc'\npath_desktop = '/home/amnesia/Desktop/'\npath_persistent_desktop = '/lib/live/mount/persistence/TailsData_unlocked/dotfiles/Desktop/' # noqa: E501\npath_securedrop_root = '/home/amnesia/Persistent/securedrop'\npath_securedrop_admin_venv = os.path.join(path_securedrop_root,\n 'admin/.venv3/bin/python')\npath_securedrop_admin_init = os.path.join(path_securedrop_root,\n 'admin/securedrop_admin/__init__.py')\npath_gui_updater = os.path.join(path_securedrop_root,\n 'journalist_gui/SecureDropUpdater')\n\npaths_v3_authfiles = {\n \"app-journalist\": os.path.join(path_securedrop_root,\n 'install_files/ansible-base/app-journalist.auth_private'),\n \"app-ssh\": os.path.join(path_securedrop_root,\n 'install_files/ansible-base/app-ssh.auth_private'),\n \"mon-ssh\": os.path.join(path_securedrop_root,\n 'install_files/ansible-base/mon-ssh.auth_private')\n}\npath_onion_auth_dir = '/var/lib/tor/onion_auth'\n\n# load torrc_additions\nif os.path.isfile(path_torrc_additions):\n with io.open(path_torrc_additions) as f:\n torrc_additions = f.read()\nelse:\n sys.exit('Error opening {0} for reading'.format(path_torrc_additions))\n\n# load torrc\nif os.path.isfile(path_torrc_backup):\n with io.open(path_torrc_backup) as f:\n torrc = f.read()\nelse:\n if os.path.isfile(path_torrc):\n with io.open(path_torrc) as f:\n torrc = f.read()\n else:\n sys.exit('Error opening {0} for reading'.format(path_torrc))\n\n # save a backup\n with io.open(path_torrc_backup, 'w') as f:\n f.write(torrc)\n\n# append the additions\nwith io.open(path_torrc, 'w') as f:\n f.write(torrc + torrc_additions)\n\n# check for v3 aths files\nv3_authfiles_present = False\nfor f in paths_v3_authfiles.values():\n if os.path.isfile(f):\n v3_authfiles_present = True\n\n# if there are v3 authfiles, make dir and copy them into place\ndebian_tor_uid = pwd.getpwnam(\"debian-tor\").pw_uid\ndebian_tor_gid = grp.getgrnam(\"debian-tor\").gr_gid\n\nif not os.path.isdir(path_onion_auth_dir):\n os.mkdir(path_onion_auth_dir)\n\nos.chmod(path_onion_auth_dir, 0o700)\nos.chown(path_onion_auth_dir, debian_tor_uid, debian_tor_gid)\n\nfor key, f in paths_v3_authfiles.items():\n if os.path.isfile(f):\n filename = os.path.basename(f)\n new_f = os.path.join(path_onion_auth_dir, filename)\n copyfile(f, new_f)\n os.chmod(new_f, 0o400)\n os.chown(new_f, debian_tor_uid, debian_tor_gid)\n\n# restart tor\ntry:\n subprocess.check_call(['systemctl', 'restart', '[email protected]'])\nexcept subprocess.CalledProcessError:\n sys.exit('Error restarting Tor')\n\n# Turn off \"automatic-decompression\" in Nautilus to ensure the original\n# submission filename is restored (see\n# https://github.com/freedomofpress/securedrop/issues/1862#issuecomment-311519750).\nsubprocess.call(['/usr/bin/dconf', 'write',\n '/org/gnome/nautilus/preferences/automatic-decompression',\n 'false'])\n\n# Set journalist.desktop and source.desktop links as trusted with Nautilus (see\n# https://github.com/freedomofpress/securedrop/issues/2586)\n# set euid and env variables to amnesia user\namnesia_gid = grp.getgrnam('amnesia').gr_gid\namnesia_uid = pwd.getpwnam('amnesia').pw_uid\nos.setresgid(amnesia_gid, amnesia_gid, -1)\nos.setresuid(amnesia_uid, amnesia_uid, -1)\nenv = os.environ.copy()\nenv['XDG_CURRENT_DESKTOP'] = 'GNOME'\nenv['DESKTOP_SESSION'] = 'default'\nenv['DISPLAY'] = ':1'\nenv['XDG_RUNTIME_DIR'] = '/run/user/{}'.format(amnesia_uid)\nenv['XDG_DATA_DIR'] = '/usr/share/gnome:/usr/local/share/:/usr/share/'\nenv['HOME'] = '/home/amnesia'\nenv['LOGNAME'] = 'amnesia'\nenv['DBUS_SESSION_BUS_ADDRESS'] = 'unix:path=/run/user/{}/bus'.format(\n amnesia_uid)\n\n# remove existing shortcut, recreate symlink and change metadata attribute\n# to trust .desktop\nfor shortcut in ['source.desktop', 'journalist.desktop']:\n subprocess.call(['rm', path_desktop + shortcut], env=env)\n subprocess.call(['ln', '-s', path_persistent_desktop + shortcut,\n path_desktop + shortcut], env=env)\n subprocess.call(['gio', 'set', path_desktop + shortcut,\n 'metadata::trusted', 'true'], env=env)\n\n# in Tails 4, reload gnome-shell desktop icons extension to update with changes above\ncmd = [\"lsb_release\", \"--id\", \"--short\"]\np = subprocess.check_output(cmd)\ndistro_id = p.rstrip()\nif distro_id == 'Debian' and os.uname()[1] == 'amnesia':\n subprocess.call(['gnome-shell-extension-tool', '-r', 'desktop-icons@csoriano'], env=env)\n\n# reacquire uid0 and notify the user\nos.setresuid(0, 0, -1)\nos.setresgid(0, 0, -1)\nsuccess_message = 'You can now access the Journalist Interface.\\nIf you are an admin, you can now SSH to the servers.' # noqa: E501\nsubprocess.call(['tails-notify-user',\n 'SecureDrop successfully auto-configured!',\n success_message])\n\n# As the amnesia user, check for SecureDrop workstation updates.\nos.setresgid(amnesia_gid, amnesia_gid, -1)\nos.setresuid(amnesia_uid, amnesia_uid, -1)\noutput = subprocess.check_output([path_securedrop_admin_venv,\n path_securedrop_admin_init,\n '--root', path_securedrop_root,\n 'check_for_updates'], env=env)\n\nflag_location = \"/home/amnesia/Persistent/.securedrop/securedrop_update.flag\"\nif b'Update needed' in output or os.path.exists(flag_location):\n # Start the SecureDrop updater GUI.\n subprocess.Popen(['python3', path_gui_updater], env=env)\n", "path": "install_files/ansible-base/roles/tails-config/files/securedrop_init.py"}]}
2,450
349
gh_patches_debug_1586
rasdani/github-patches
git_diff
numpy__numpy-12376
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- import_array hides true cause of import errors When compiling for Python 3, it would be useful to utilize exception chaining to explain why "numpy.core.multiarray failed to import". I think you can use [PyException_SetCause](https://docs.python.org/3/c-api/exceptions.html#c.PyException_SetCause) to do this? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `numpy/core/code_generators/generate_numpy_api.py` Content: ``` 1 from __future__ import division, print_function 2 3 import os 4 import genapi 5 6 from genapi import \ 7 TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi 8 9 import numpy_api 10 11 # use annotated api when running under cpychecker 12 h_template = r""" 13 #if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE) 14 15 typedef struct { 16 PyObject_HEAD 17 npy_bool obval; 18 } PyBoolScalarObject; 19 20 extern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type; 21 extern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type; 22 extern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2]; 23 24 %s 25 26 #else 27 28 #if defined(PY_ARRAY_UNIQUE_SYMBOL) 29 #define PyArray_API PY_ARRAY_UNIQUE_SYMBOL 30 #endif 31 32 #if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY) 33 extern void **PyArray_API; 34 #else 35 #if defined(PY_ARRAY_UNIQUE_SYMBOL) 36 void **PyArray_API; 37 #else 38 static void **PyArray_API=NULL; 39 #endif 40 #endif 41 42 %s 43 44 #if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT) 45 static int 46 _import_array(void) 47 { 48 int st; 49 PyObject *numpy = PyImport_ImportModule("numpy.core._multiarray_umath"); 50 PyObject *c_api = NULL; 51 52 if (numpy == NULL) { 53 PyErr_SetString(PyExc_ImportError, "numpy.core._multiarray_umath failed to import"); 54 return -1; 55 } 56 c_api = PyObject_GetAttrString(numpy, "_ARRAY_API"); 57 Py_DECREF(numpy); 58 if (c_api == NULL) { 59 PyErr_SetString(PyExc_AttributeError, "_ARRAY_API not found"); 60 return -1; 61 } 62 63 #if PY_VERSION_HEX >= 0x03000000 64 if (!PyCapsule_CheckExact(c_api)) { 65 PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCapsule object"); 66 Py_DECREF(c_api); 67 return -1; 68 } 69 PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL); 70 #else 71 if (!PyCObject_Check(c_api)) { 72 PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is not PyCObject object"); 73 Py_DECREF(c_api); 74 return -1; 75 } 76 PyArray_API = (void **)PyCObject_AsVoidPtr(c_api); 77 #endif 78 Py_DECREF(c_api); 79 if (PyArray_API == NULL) { 80 PyErr_SetString(PyExc_RuntimeError, "_ARRAY_API is NULL pointer"); 81 return -1; 82 } 83 84 /* Perform runtime check of C API version */ 85 if (NPY_VERSION != PyArray_GetNDArrayCVersion()) { 86 PyErr_Format(PyExc_RuntimeError, "module compiled against "\ 87 "ABI version 0x%%x but this version of numpy is 0x%%x", \ 88 (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion()); 89 return -1; 90 } 91 if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) { 92 PyErr_Format(PyExc_RuntimeError, "module compiled against "\ 93 "API version 0x%%x but this version of numpy is 0x%%x", \ 94 (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion()); 95 return -1; 96 } 97 98 /* 99 * Perform runtime check of endianness and check it matches the one set by 100 * the headers (npy_endian.h) as a safeguard 101 */ 102 st = PyArray_GetEndianness(); 103 if (st == NPY_CPU_UNKNOWN_ENDIAN) { 104 PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as unknown endian"); 105 return -1; 106 } 107 #if NPY_BYTE_ORDER == NPY_BIG_ENDIAN 108 if (st != NPY_CPU_BIG) { 109 PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ 110 "big endian, but detected different endianness at runtime"); 111 return -1; 112 } 113 #elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN 114 if (st != NPY_CPU_LITTLE) { 115 PyErr_Format(PyExc_RuntimeError, "FATAL: module compiled as "\ 116 "little endian, but detected different endianness at runtime"); 117 return -1; 118 } 119 #endif 120 121 return 0; 122 } 123 124 #if PY_VERSION_HEX >= 0x03000000 125 #define NUMPY_IMPORT_ARRAY_RETVAL NULL 126 #else 127 #define NUMPY_IMPORT_ARRAY_RETVAL 128 #endif 129 130 #define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return NUMPY_IMPORT_ARRAY_RETVAL; } } 131 132 #define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, "numpy.core.multiarray failed to import"); return ret; } } 133 134 #define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } } 135 136 #endif 137 138 #endif 139 """ 140 141 142 c_template = r""" 143 /* These pointers will be stored in the C-object for use in other 144 extension modules 145 */ 146 147 void *PyArray_API[] = { 148 %s 149 }; 150 """ 151 152 c_api_header = """ 153 =========== 154 NumPy C-API 155 =========== 156 """ 157 158 def generate_api(output_dir, force=False): 159 basename = 'multiarray_api' 160 161 h_file = os.path.join(output_dir, '__%s.h' % basename) 162 c_file = os.path.join(output_dir, '__%s.c' % basename) 163 d_file = os.path.join(output_dir, '%s.txt' % basename) 164 targets = (h_file, c_file, d_file) 165 166 sources = numpy_api.multiarray_api 167 168 if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])): 169 return targets 170 else: 171 do_generate_api(targets, sources) 172 173 return targets 174 175 def do_generate_api(targets, sources): 176 header_file = targets[0] 177 c_file = targets[1] 178 doc_file = targets[2] 179 180 global_vars = sources[0] 181 scalar_bool_values = sources[1] 182 types_api = sources[2] 183 multiarray_funcs = sources[3] 184 185 multiarray_api = sources[:] 186 187 module_list = [] 188 extension_list = [] 189 init_list = [] 190 191 # Check multiarray api indexes 192 multiarray_api_index = genapi.merge_api_dicts(multiarray_api) 193 genapi.check_api_dict(multiarray_api_index) 194 195 numpyapi_list = genapi.get_api_functions('NUMPY_API', 196 multiarray_funcs) 197 ordered_funcs_api = genapi.order_dict(multiarray_funcs) 198 199 # Create dict name -> *Api instance 200 api_name = 'PyArray_API' 201 multiarray_api_dict = {} 202 for f in numpyapi_list: 203 name = f.name 204 index = multiarray_funcs[name][0] 205 annotations = multiarray_funcs[name][1:] 206 multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations, 207 f.return_type, 208 f.args, api_name) 209 210 for name, val in global_vars.items(): 211 index, type = val 212 multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name) 213 214 for name, val in scalar_bool_values.items(): 215 index = val[0] 216 multiarray_api_dict[name] = BoolValuesApi(name, index, api_name) 217 218 for name, val in types_api.items(): 219 index = val[0] 220 multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name) 221 222 if len(multiarray_api_dict) != len(multiarray_api_index): 223 keys_dict = set(multiarray_api_dict.keys()) 224 keys_index = set(multiarray_api_index.keys()) 225 raise AssertionError( 226 "Multiarray API size mismatch - " 227 "index has extra keys {}, dict has extra keys {}" 228 .format(keys_index - keys_dict, keys_dict - keys_index) 229 ) 230 231 extension_list = [] 232 for name, index in genapi.order_dict(multiarray_api_index): 233 api_item = multiarray_api_dict[name] 234 extension_list.append(api_item.define_from_array_api_string()) 235 init_list.append(api_item.array_api_define()) 236 module_list.append(api_item.internal_define()) 237 238 # Write to header 239 s = h_template % ('\n'.join(module_list), '\n'.join(extension_list)) 240 genapi.write_file(header_file, s) 241 242 # Write to c-code 243 s = c_template % ',\n'.join(init_list) 244 genapi.write_file(c_file, s) 245 246 # write to documentation 247 s = c_api_header 248 for func in numpyapi_list: 249 s += func.to_ReST() 250 s += '\n\n' 251 genapi.write_file(doc_file, s) 252 253 return targets 254 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py --- a/numpy/core/code_generators/generate_numpy_api.py +++ b/numpy/core/code_generators/generate_numpy_api.py @@ -50,7 +50,6 @@ PyObject *c_api = NULL; if (numpy == NULL) { - PyErr_SetString(PyExc_ImportError, "numpy.core._multiarray_umath failed to import"); return -1; } c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
{"golden_diff": "diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py\n--- a/numpy/core/code_generators/generate_numpy_api.py\n+++ b/numpy/core/code_generators/generate_numpy_api.py\n@@ -50,7 +50,6 @@\n PyObject *c_api = NULL;\n \n if (numpy == NULL) {\n- PyErr_SetString(PyExc_ImportError, \"numpy.core._multiarray_umath failed to import\");\n return -1;\n }\n c_api = PyObject_GetAttrString(numpy, \"_ARRAY_API\");\n", "issue": "import_array hides true cause of import errors\nWhen compiling for Python 3, it would be useful to utilize exception chaining to explain why \"numpy.core.multiarray failed to import\". I think you can use [PyException_SetCause](https://docs.python.org/3/c-api/exceptions.html#c.PyException_SetCause) to do this?\n\n", "before_files": [{"content": "from __future__ import division, print_function\n\nimport os\nimport genapi\n\nfrom genapi import \\\n TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi\n\nimport numpy_api\n\n# use annotated api when running under cpychecker\nh_template = r\"\"\"\n#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)\n\ntypedef struct {\n PyObject_HEAD\n npy_bool obval;\n} PyBoolScalarObject;\n\nextern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;\nextern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;\nextern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];\n\n%s\n\n#else\n\n#if defined(PY_ARRAY_UNIQUE_SYMBOL)\n#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL\n#endif\n\n#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)\nextern void **PyArray_API;\n#else\n#if defined(PY_ARRAY_UNIQUE_SYMBOL)\nvoid **PyArray_API;\n#else\nstatic void **PyArray_API=NULL;\n#endif\n#endif\n\n%s\n\n#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)\nstatic int\n_import_array(void)\n{\n int st;\n PyObject *numpy = PyImport_ImportModule(\"numpy.core._multiarray_umath\");\n PyObject *c_api = NULL;\n\n if (numpy == NULL) {\n PyErr_SetString(PyExc_ImportError, \"numpy.core._multiarray_umath failed to import\");\n return -1;\n }\n c_api = PyObject_GetAttrString(numpy, \"_ARRAY_API\");\n Py_DECREF(numpy);\n if (c_api == NULL) {\n PyErr_SetString(PyExc_AttributeError, \"_ARRAY_API not found\");\n return -1;\n }\n\n#if PY_VERSION_HEX >= 0x03000000\n if (!PyCapsule_CheckExact(c_api)) {\n PyErr_SetString(PyExc_RuntimeError, \"_ARRAY_API is not PyCapsule object\");\n Py_DECREF(c_api);\n return -1;\n }\n PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);\n#else\n if (!PyCObject_Check(c_api)) {\n PyErr_SetString(PyExc_RuntimeError, \"_ARRAY_API is not PyCObject object\");\n Py_DECREF(c_api);\n return -1;\n }\n PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);\n#endif\n Py_DECREF(c_api);\n if (PyArray_API == NULL) {\n PyErr_SetString(PyExc_RuntimeError, \"_ARRAY_API is NULL pointer\");\n return -1;\n }\n\n /* Perform runtime check of C API version */\n if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {\n PyErr_Format(PyExc_RuntimeError, \"module compiled against \"\\\n \"ABI version 0x%%x but this version of numpy is 0x%%x\", \\\n (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());\n return -1;\n }\n if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {\n PyErr_Format(PyExc_RuntimeError, \"module compiled against \"\\\n \"API version 0x%%x but this version of numpy is 0x%%x\", \\\n (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());\n return -1;\n }\n\n /*\n * Perform runtime check of endianness and check it matches the one set by\n * the headers (npy_endian.h) as a safeguard\n */\n st = PyArray_GetEndianness();\n if (st == NPY_CPU_UNKNOWN_ENDIAN) {\n PyErr_Format(PyExc_RuntimeError, \"FATAL: module compiled as unknown endian\");\n return -1;\n }\n#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN\n if (st != NPY_CPU_BIG) {\n PyErr_Format(PyExc_RuntimeError, \"FATAL: module compiled as \"\\\n \"big endian, but detected different endianness at runtime\");\n return -1;\n }\n#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN\n if (st != NPY_CPU_LITTLE) {\n PyErr_Format(PyExc_RuntimeError, \"FATAL: module compiled as \"\\\n \"little endian, but detected different endianness at runtime\");\n return -1;\n }\n#endif\n\n return 0;\n}\n\n#if PY_VERSION_HEX >= 0x03000000\n#define NUMPY_IMPORT_ARRAY_RETVAL NULL\n#else\n#define NUMPY_IMPORT_ARRAY_RETVAL\n#endif\n\n#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, \"numpy.core.multiarray failed to import\"); return NUMPY_IMPORT_ARRAY_RETVAL; } }\n\n#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, \"numpy.core.multiarray failed to import\"); return ret; } }\n\n#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }\n\n#endif\n\n#endif\n\"\"\"\n\n\nc_template = r\"\"\"\n/* These pointers will be stored in the C-object for use in other\n extension modules\n*/\n\nvoid *PyArray_API[] = {\n%s\n};\n\"\"\"\n\nc_api_header = \"\"\"\n===========\nNumPy C-API\n===========\n\"\"\"\n\ndef generate_api(output_dir, force=False):\n basename = 'multiarray_api'\n\n h_file = os.path.join(output_dir, '__%s.h' % basename)\n c_file = os.path.join(output_dir, '__%s.c' % basename)\n d_file = os.path.join(output_dir, '%s.txt' % basename)\n targets = (h_file, c_file, d_file)\n\n sources = numpy_api.multiarray_api\n\n if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):\n return targets\n else:\n do_generate_api(targets, sources)\n\n return targets\n\ndef do_generate_api(targets, sources):\n header_file = targets[0]\n c_file = targets[1]\n doc_file = targets[2]\n\n global_vars = sources[0]\n scalar_bool_values = sources[1]\n types_api = sources[2]\n multiarray_funcs = sources[3]\n\n multiarray_api = sources[:]\n\n module_list = []\n extension_list = []\n init_list = []\n\n # Check multiarray api indexes\n multiarray_api_index = genapi.merge_api_dicts(multiarray_api)\n genapi.check_api_dict(multiarray_api_index)\n\n numpyapi_list = genapi.get_api_functions('NUMPY_API',\n multiarray_funcs)\n ordered_funcs_api = genapi.order_dict(multiarray_funcs)\n\n # Create dict name -> *Api instance\n api_name = 'PyArray_API'\n multiarray_api_dict = {}\n for f in numpyapi_list:\n name = f.name\n index = multiarray_funcs[name][0]\n annotations = multiarray_funcs[name][1:]\n multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,\n f.return_type,\n f.args, api_name)\n\n for name, val in global_vars.items():\n index, type = val\n multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)\n\n for name, val in scalar_bool_values.items():\n index = val[0]\n multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)\n\n for name, val in types_api.items():\n index = val[0]\n multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)\n\n if len(multiarray_api_dict) != len(multiarray_api_index):\n keys_dict = set(multiarray_api_dict.keys())\n keys_index = set(multiarray_api_index.keys())\n raise AssertionError(\n \"Multiarray API size mismatch - \"\n \"index has extra keys {}, dict has extra keys {}\"\n .format(keys_index - keys_dict, keys_dict - keys_index)\n )\n\n extension_list = []\n for name, index in genapi.order_dict(multiarray_api_index):\n api_item = multiarray_api_dict[name]\n extension_list.append(api_item.define_from_array_api_string())\n init_list.append(api_item.array_api_define())\n module_list.append(api_item.internal_define())\n\n # Write to header\n s = h_template % ('\\n'.join(module_list), '\\n'.join(extension_list))\n genapi.write_file(header_file, s)\n\n # Write to c-code\n s = c_template % ',\\n'.join(init_list)\n genapi.write_file(c_file, s)\n\n # write to documentation\n s = c_api_header\n for func in numpyapi_list:\n s += func.to_ReST()\n s += '\\n\\n'\n genapi.write_file(doc_file, s)\n\n return targets\n", "path": "numpy/core/code_generators/generate_numpy_api.py"}], "after_files": [{"content": "from __future__ import division, print_function\n\nimport os\nimport genapi\n\nfrom genapi import \\\n TypeApi, GlobalVarApi, FunctionApi, BoolValuesApi\n\nimport numpy_api\n\n# use annotated api when running under cpychecker\nh_template = r\"\"\"\n#if defined(_MULTIARRAYMODULE) || defined(WITH_CPYCHECKER_STEALS_REFERENCE_TO_ARG_ATTRIBUTE)\n\ntypedef struct {\n PyObject_HEAD\n npy_bool obval;\n} PyBoolScalarObject;\n\nextern NPY_NO_EXPORT PyTypeObject PyArrayMapIter_Type;\nextern NPY_NO_EXPORT PyTypeObject PyArrayNeighborhoodIter_Type;\nextern NPY_NO_EXPORT PyBoolScalarObject _PyArrayScalar_BoolValues[2];\n\n%s\n\n#else\n\n#if defined(PY_ARRAY_UNIQUE_SYMBOL)\n#define PyArray_API PY_ARRAY_UNIQUE_SYMBOL\n#endif\n\n#if defined(NO_IMPORT) || defined(NO_IMPORT_ARRAY)\nextern void **PyArray_API;\n#else\n#if defined(PY_ARRAY_UNIQUE_SYMBOL)\nvoid **PyArray_API;\n#else\nstatic void **PyArray_API=NULL;\n#endif\n#endif\n\n%s\n\n#if !defined(NO_IMPORT_ARRAY) && !defined(NO_IMPORT)\nstatic int\n_import_array(void)\n{\n int st;\n PyObject *numpy = PyImport_ImportModule(\"numpy.core._multiarray_umath\");\n PyObject *c_api = NULL;\n\n if (numpy == NULL) {\n return -1;\n }\n c_api = PyObject_GetAttrString(numpy, \"_ARRAY_API\");\n Py_DECREF(numpy);\n if (c_api == NULL) {\n PyErr_SetString(PyExc_AttributeError, \"_ARRAY_API not found\");\n return -1;\n }\n\n#if PY_VERSION_HEX >= 0x03000000\n if (!PyCapsule_CheckExact(c_api)) {\n PyErr_SetString(PyExc_RuntimeError, \"_ARRAY_API is not PyCapsule object\");\n Py_DECREF(c_api);\n return -1;\n }\n PyArray_API = (void **)PyCapsule_GetPointer(c_api, NULL);\n#else\n if (!PyCObject_Check(c_api)) {\n PyErr_SetString(PyExc_RuntimeError, \"_ARRAY_API is not PyCObject object\");\n Py_DECREF(c_api);\n return -1;\n }\n PyArray_API = (void **)PyCObject_AsVoidPtr(c_api);\n#endif\n Py_DECREF(c_api);\n if (PyArray_API == NULL) {\n PyErr_SetString(PyExc_RuntimeError, \"_ARRAY_API is NULL pointer\");\n return -1;\n }\n\n /* Perform runtime check of C API version */\n if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {\n PyErr_Format(PyExc_RuntimeError, \"module compiled against \"\\\n \"ABI version 0x%%x but this version of numpy is 0x%%x\", \\\n (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());\n return -1;\n }\n if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {\n PyErr_Format(PyExc_RuntimeError, \"module compiled against \"\\\n \"API version 0x%%x but this version of numpy is 0x%%x\", \\\n (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());\n return -1;\n }\n\n /*\n * Perform runtime check of endianness and check it matches the one set by\n * the headers (npy_endian.h) as a safeguard\n */\n st = PyArray_GetEndianness();\n if (st == NPY_CPU_UNKNOWN_ENDIAN) {\n PyErr_Format(PyExc_RuntimeError, \"FATAL: module compiled as unknown endian\");\n return -1;\n }\n#if NPY_BYTE_ORDER == NPY_BIG_ENDIAN\n if (st != NPY_CPU_BIG) {\n PyErr_Format(PyExc_RuntimeError, \"FATAL: module compiled as \"\\\n \"big endian, but detected different endianness at runtime\");\n return -1;\n }\n#elif NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN\n if (st != NPY_CPU_LITTLE) {\n PyErr_Format(PyExc_RuntimeError, \"FATAL: module compiled as \"\\\n \"little endian, but detected different endianness at runtime\");\n return -1;\n }\n#endif\n\n return 0;\n}\n\n#if PY_VERSION_HEX >= 0x03000000\n#define NUMPY_IMPORT_ARRAY_RETVAL NULL\n#else\n#define NUMPY_IMPORT_ARRAY_RETVAL\n#endif\n\n#define import_array() {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, \"numpy.core.multiarray failed to import\"); return NUMPY_IMPORT_ARRAY_RETVAL; } }\n\n#define import_array1(ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, \"numpy.core.multiarray failed to import\"); return ret; } }\n\n#define import_array2(msg, ret) {if (_import_array() < 0) {PyErr_Print(); PyErr_SetString(PyExc_ImportError, msg); return ret; } }\n\n#endif\n\n#endif\n\"\"\"\n\n\nc_template = r\"\"\"\n/* These pointers will be stored in the C-object for use in other\n extension modules\n*/\n\nvoid *PyArray_API[] = {\n%s\n};\n\"\"\"\n\nc_api_header = \"\"\"\n===========\nNumPy C-API\n===========\n\"\"\"\n\ndef generate_api(output_dir, force=False):\n basename = 'multiarray_api'\n\n h_file = os.path.join(output_dir, '__%s.h' % basename)\n c_file = os.path.join(output_dir, '__%s.c' % basename)\n d_file = os.path.join(output_dir, '%s.txt' % basename)\n targets = (h_file, c_file, d_file)\n\n sources = numpy_api.multiarray_api\n\n if (not force and not genapi.should_rebuild(targets, [numpy_api.__file__, __file__])):\n return targets\n else:\n do_generate_api(targets, sources)\n\n return targets\n\ndef do_generate_api(targets, sources):\n header_file = targets[0]\n c_file = targets[1]\n doc_file = targets[2]\n\n global_vars = sources[0]\n scalar_bool_values = sources[1]\n types_api = sources[2]\n multiarray_funcs = sources[3]\n\n multiarray_api = sources[:]\n\n module_list = []\n extension_list = []\n init_list = []\n\n # Check multiarray api indexes\n multiarray_api_index = genapi.merge_api_dicts(multiarray_api)\n genapi.check_api_dict(multiarray_api_index)\n\n numpyapi_list = genapi.get_api_functions('NUMPY_API',\n multiarray_funcs)\n ordered_funcs_api = genapi.order_dict(multiarray_funcs)\n\n # Create dict name -> *Api instance\n api_name = 'PyArray_API'\n multiarray_api_dict = {}\n for f in numpyapi_list:\n name = f.name\n index = multiarray_funcs[name][0]\n annotations = multiarray_funcs[name][1:]\n multiarray_api_dict[f.name] = FunctionApi(f.name, index, annotations,\n f.return_type,\n f.args, api_name)\n\n for name, val in global_vars.items():\n index, type = val\n multiarray_api_dict[name] = GlobalVarApi(name, index, type, api_name)\n\n for name, val in scalar_bool_values.items():\n index = val[0]\n multiarray_api_dict[name] = BoolValuesApi(name, index, api_name)\n\n for name, val in types_api.items():\n index = val[0]\n multiarray_api_dict[name] = TypeApi(name, index, 'PyTypeObject', api_name)\n\n if len(multiarray_api_dict) != len(multiarray_api_index):\n keys_dict = set(multiarray_api_dict.keys())\n keys_index = set(multiarray_api_index.keys())\n raise AssertionError(\n \"Multiarray API size mismatch - \"\n \"index has extra keys {}, dict has extra keys {}\"\n .format(keys_index - keys_dict, keys_dict - keys_index)\n )\n\n extension_list = []\n for name, index in genapi.order_dict(multiarray_api_index):\n api_item = multiarray_api_dict[name]\n extension_list.append(api_item.define_from_array_api_string())\n init_list.append(api_item.array_api_define())\n module_list.append(api_item.internal_define())\n\n # Write to header\n s = h_template % ('\\n'.join(module_list), '\\n'.join(extension_list))\n genapi.write_file(header_file, s)\n\n # Write to c-code\n s = c_template % ',\\n'.join(init_list)\n genapi.write_file(c_file, s)\n\n # write to documentation\n s = c_api_header\n for func in numpyapi_list:\n s += func.to_ReST()\n s += '\\n\\n'\n genapi.write_file(doc_file, s)\n\n return targets\n", "path": "numpy/core/code_generators/generate_numpy_api.py"}]}
2,973
129
gh_patches_debug_2978
rasdani/github-patches
git_diff
frappe__frappe-20434
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enable Scheduler from desk Feature to enable scheduler from desk. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `frappe/utils/scheduler.py` Content: ``` 1 # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors 2 # License: MIT. See LICENSE 3 """ 4 Events: 5 always 6 daily 7 monthly 8 weekly 9 """ 10 11 # imports - standard imports 12 import os 13 import time 14 from typing import NoReturn 15 16 # imports - module imports 17 import frappe 18 from frappe.installer import update_site_config 19 from frappe.utils import cint, get_datetime, get_sites, now_datetime 20 from frappe.utils.background_jobs import get_jobs 21 22 DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S" 23 24 25 def cprint(*args, **kwargs): 26 """Prints only if called from STDOUT""" 27 try: 28 os.get_terminal_size() 29 print(*args, **kwargs) 30 except Exception: 31 pass 32 33 34 def start_scheduler() -> NoReturn: 35 """Run enqueue_events_for_all_sites based on scheduler tick. 36 Specify scheduler_interval in seconds in common_site_config.json""" 37 38 tick = cint(frappe.get_conf().scheduler_tick_interval) or 60 39 40 while True: 41 time.sleep(tick) 42 enqueue_events_for_all_sites() 43 44 45 def enqueue_events_for_all_sites() -> None: 46 """Loop through sites and enqueue events that are not already queued""" 47 48 if os.path.exists(os.path.join(".", ".restarting")): 49 # Don't add task to queue if webserver is in restart mode 50 return 51 52 with frappe.init_site(): 53 sites = get_sites() 54 55 for site in sites: 56 try: 57 enqueue_events_for_site(site=site) 58 except Exception: 59 frappe.logger("scheduler").debug(f"Failed to enqueue events for site: {site}", exc_info=True) 60 61 62 def enqueue_events_for_site(site: str) -> None: 63 def log_exc(): 64 frappe.logger("scheduler").error(f"Exception in Enqueue Events for Site {site}", exc_info=True) 65 66 try: 67 frappe.init(site=site) 68 frappe.connect() 69 if is_scheduler_inactive(): 70 return 71 72 enqueue_events(site=site) 73 74 frappe.logger("scheduler").debug(f"Queued events for site {site}") 75 except Exception as e: 76 if frappe.db.is_access_denied(e): 77 frappe.logger("scheduler").debug(f"Access denied for site {site}") 78 log_exc() 79 80 finally: 81 frappe.destroy() 82 83 84 def enqueue_events(site: str) -> list[str] | None: 85 if schedule_jobs_based_on_activity(): 86 enqueued_jobs = [] 87 for job_type in frappe.get_all("Scheduled Job Type", ("name", "method"), {"stopped": 0}): 88 job_type = frappe.get_cached_doc("Scheduled Job Type", job_type.name) 89 if _enqueued := job_type.enqueue(): 90 enqueued_jobs.append(job_type.method) 91 92 return enqueued_jobs 93 94 95 def is_scheduler_inactive(verbose=True) -> bool: 96 if frappe.local.conf.maintenance_mode: 97 if verbose: 98 cprint(f"{frappe.local.site}: Maintenance mode is ON") 99 return True 100 101 if frappe.local.conf.pause_scheduler: 102 if verbose: 103 cprint(f"{frappe.local.site}: frappe.conf.pause_scheduler is SET") 104 return True 105 106 if is_scheduler_disabled(verbose=verbose): 107 return True 108 109 return False 110 111 112 def is_scheduler_disabled(verbose=True) -> bool: 113 if frappe.conf.disable_scheduler: 114 if verbose: 115 cprint(f"{frappe.local.site}: frappe.conf.disable_scheduler is SET") 116 return True 117 118 scheduler_disabled = not frappe.utils.cint( 119 frappe.db.get_single_value("System Settings", "enable_scheduler") 120 ) 121 if scheduler_disabled: 122 if verbose: 123 cprint(f"{frappe.local.site}: SystemSettings.enable_scheduler is UNSET") 124 return scheduler_disabled 125 126 127 def toggle_scheduler(enable): 128 frappe.db.set_single_value("System Settings", "enable_scheduler", int(enable)) 129 130 131 def enable_scheduler(): 132 toggle_scheduler(True) 133 134 135 def disable_scheduler(): 136 toggle_scheduler(False) 137 138 139 def schedule_jobs_based_on_activity(check_time=None): 140 """Returns True for active sites defined by Activity Log 141 Returns True for inactive sites once in 24 hours""" 142 if is_dormant(check_time=check_time): 143 # ensure last job is one day old 144 last_job_timestamp = _get_last_modified_timestamp("Scheduled Job Log") 145 if not last_job_timestamp: 146 return True 147 else: 148 if ((check_time or now_datetime()) - last_job_timestamp).total_seconds() >= 86400: 149 # one day is passed since jobs are run, so lets do this 150 return True 151 else: 152 # schedulers run in the last 24 hours, do nothing 153 return False 154 else: 155 # site active, lets run the jobs 156 return True 157 158 159 def is_dormant(check_time=None): 160 last_activity_log_timestamp = _get_last_modified_timestamp("Activity Log") 161 since = (frappe.get_system_settings("dormant_days") or 4) * 86400 162 if not last_activity_log_timestamp: 163 return True 164 if ((check_time or now_datetime()) - last_activity_log_timestamp).total_seconds() >= since: 165 return True 166 return False 167 168 169 def _get_last_modified_timestamp(doctype): 170 timestamp = frappe.db.get_value( 171 doctype, filters={}, fieldname="modified", order_by="modified desc" 172 ) 173 if timestamp: 174 return get_datetime(timestamp) 175 176 177 @frappe.whitelist() 178 def activate_scheduler(): 179 if is_scheduler_disabled(): 180 enable_scheduler() 181 if frappe.conf.pause_scheduler: 182 update_site_config("pause_scheduler", 0) 183 184 185 @frappe.whitelist() 186 def get_scheduler_status(): 187 if is_scheduler_inactive(): 188 return {"status": "inactive"} 189 return {"status": "active"} 190 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/frappe/utils/scheduler.py b/frappe/utils/scheduler.py --- a/frappe/utils/scheduler.py +++ b/frappe/utils/scheduler.py @@ -176,6 +176,11 @@ @frappe.whitelist() def activate_scheduler(): + frappe.only_for("Administrator") + + if frappe.local.conf.maintenance_mode: + frappe.throw(frappe._("Scheduler can not be re-enabled when maintenance mode is active.")) + if is_scheduler_disabled(): enable_scheduler() if frappe.conf.pause_scheduler:
{"golden_diff": "diff --git a/frappe/utils/scheduler.py b/frappe/utils/scheduler.py\n--- a/frappe/utils/scheduler.py\n+++ b/frappe/utils/scheduler.py\n@@ -176,6 +176,11 @@\n \n @frappe.whitelist()\n def activate_scheduler():\n+\tfrappe.only_for(\"Administrator\")\n+\n+\tif frappe.local.conf.maintenance_mode:\n+\t\tfrappe.throw(frappe._(\"Scheduler can not be re-enabled when maintenance mode is active.\"))\n+\n \tif is_scheduler_disabled():\n \t\tenable_scheduler()\n \tif frappe.conf.pause_scheduler:\n", "issue": "Enable Scheduler from desk\nFeature to enable scheduler from desk.\n", "before_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\"\"\"\nEvents:\n\talways\n\tdaily\n\tmonthly\n\tweekly\n\"\"\"\n\n# imports - standard imports\nimport os\nimport time\nfrom typing import NoReturn\n\n# imports - module imports\nimport frappe\nfrom frappe.installer import update_site_config\nfrom frappe.utils import cint, get_datetime, get_sites, now_datetime\nfrom frappe.utils.background_jobs import get_jobs\n\nDATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n\ndef cprint(*args, **kwargs):\n\t\"\"\"Prints only if called from STDOUT\"\"\"\n\ttry:\n\t\tos.get_terminal_size()\n\t\tprint(*args, **kwargs)\n\texcept Exception:\n\t\tpass\n\n\ndef start_scheduler() -> NoReturn:\n\t\"\"\"Run enqueue_events_for_all_sites based on scheduler tick.\n\tSpecify scheduler_interval in seconds in common_site_config.json\"\"\"\n\n\ttick = cint(frappe.get_conf().scheduler_tick_interval) or 60\n\n\twhile True:\n\t\ttime.sleep(tick)\n\t\tenqueue_events_for_all_sites()\n\n\ndef enqueue_events_for_all_sites() -> None:\n\t\"\"\"Loop through sites and enqueue events that are not already queued\"\"\"\n\n\tif os.path.exists(os.path.join(\".\", \".restarting\")):\n\t\t# Don't add task to queue if webserver is in restart mode\n\t\treturn\n\n\twith frappe.init_site():\n\t\tsites = get_sites()\n\n\tfor site in sites:\n\t\ttry:\n\t\t\tenqueue_events_for_site(site=site)\n\t\texcept Exception:\n\t\t\tfrappe.logger(\"scheduler\").debug(f\"Failed to enqueue events for site: {site}\", exc_info=True)\n\n\ndef enqueue_events_for_site(site: str) -> None:\n\tdef log_exc():\n\t\tfrappe.logger(\"scheduler\").error(f\"Exception in Enqueue Events for Site {site}\", exc_info=True)\n\n\ttry:\n\t\tfrappe.init(site=site)\n\t\tfrappe.connect()\n\t\tif is_scheduler_inactive():\n\t\t\treturn\n\n\t\tenqueue_events(site=site)\n\n\t\tfrappe.logger(\"scheduler\").debug(f\"Queued events for site {site}\")\n\texcept Exception as e:\n\t\tif frappe.db.is_access_denied(e):\n\t\t\tfrappe.logger(\"scheduler\").debug(f\"Access denied for site {site}\")\n\t\tlog_exc()\n\n\tfinally:\n\t\tfrappe.destroy()\n\n\ndef enqueue_events(site: str) -> list[str] | None:\n\tif schedule_jobs_based_on_activity():\n\t\tenqueued_jobs = []\n\t\tfor job_type in frappe.get_all(\"Scheduled Job Type\", (\"name\", \"method\"), {\"stopped\": 0}):\n\t\t\tjob_type = frappe.get_cached_doc(\"Scheduled Job Type\", job_type.name)\n\t\t\tif _enqueued := job_type.enqueue():\n\t\t\t\tenqueued_jobs.append(job_type.method)\n\n\t\treturn enqueued_jobs\n\n\ndef is_scheduler_inactive(verbose=True) -> bool:\n\tif frappe.local.conf.maintenance_mode:\n\t\tif verbose:\n\t\t\tcprint(f\"{frappe.local.site}: Maintenance mode is ON\")\n\t\treturn True\n\n\tif frappe.local.conf.pause_scheduler:\n\t\tif verbose:\n\t\t\tcprint(f\"{frappe.local.site}: frappe.conf.pause_scheduler is SET\")\n\t\treturn True\n\n\tif is_scheduler_disabled(verbose=verbose):\n\t\treturn True\n\n\treturn False\n\n\ndef is_scheduler_disabled(verbose=True) -> bool:\n\tif frappe.conf.disable_scheduler:\n\t\tif verbose:\n\t\t\tcprint(f\"{frappe.local.site}: frappe.conf.disable_scheduler is SET\")\n\t\treturn True\n\n\tscheduler_disabled = not frappe.utils.cint(\n\t\tfrappe.db.get_single_value(\"System Settings\", \"enable_scheduler\")\n\t)\n\tif scheduler_disabled:\n\t\tif verbose:\n\t\t\tcprint(f\"{frappe.local.site}: SystemSettings.enable_scheduler is UNSET\")\n\treturn scheduler_disabled\n\n\ndef toggle_scheduler(enable):\n\tfrappe.db.set_single_value(\"System Settings\", \"enable_scheduler\", int(enable))\n\n\ndef enable_scheduler():\n\ttoggle_scheduler(True)\n\n\ndef disable_scheduler():\n\ttoggle_scheduler(False)\n\n\ndef schedule_jobs_based_on_activity(check_time=None):\n\t\"\"\"Returns True for active sites defined by Activity Log\n\tReturns True for inactive sites once in 24 hours\"\"\"\n\tif is_dormant(check_time=check_time):\n\t\t# ensure last job is one day old\n\t\tlast_job_timestamp = _get_last_modified_timestamp(\"Scheduled Job Log\")\n\t\tif not last_job_timestamp:\n\t\t\treturn True\n\t\telse:\n\t\t\tif ((check_time or now_datetime()) - last_job_timestamp).total_seconds() >= 86400:\n\t\t\t\t# one day is passed since jobs are run, so lets do this\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\t# schedulers run in the last 24 hours, do nothing\n\t\t\t\treturn False\n\telse:\n\t\t# site active, lets run the jobs\n\t\treturn True\n\n\ndef is_dormant(check_time=None):\n\tlast_activity_log_timestamp = _get_last_modified_timestamp(\"Activity Log\")\n\tsince = (frappe.get_system_settings(\"dormant_days\") or 4) * 86400\n\tif not last_activity_log_timestamp:\n\t\treturn True\n\tif ((check_time or now_datetime()) - last_activity_log_timestamp).total_seconds() >= since:\n\t\treturn True\n\treturn False\n\n\ndef _get_last_modified_timestamp(doctype):\n\ttimestamp = frappe.db.get_value(\n\t\tdoctype, filters={}, fieldname=\"modified\", order_by=\"modified desc\"\n\t)\n\tif timestamp:\n\t\treturn get_datetime(timestamp)\n\n\[email protected]()\ndef activate_scheduler():\n\tif is_scheduler_disabled():\n\t\tenable_scheduler()\n\tif frappe.conf.pause_scheduler:\n\t\tupdate_site_config(\"pause_scheduler\", 0)\n\n\[email protected]()\ndef get_scheduler_status():\n\tif is_scheduler_inactive():\n\t\treturn {\"status\": \"inactive\"}\n\treturn {\"status\": \"active\"}\n", "path": "frappe/utils/scheduler.py"}], "after_files": [{"content": "# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: MIT. See LICENSE\n\"\"\"\nEvents:\n\talways\n\tdaily\n\tmonthly\n\tweekly\n\"\"\"\n\n# imports - standard imports\nimport os\nimport time\nfrom typing import NoReturn\n\n# imports - module imports\nimport frappe\nfrom frappe.installer import update_site_config\nfrom frappe.utils import cint, get_datetime, get_sites, now_datetime\nfrom frappe.utils.background_jobs import get_jobs\n\nDATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n\n\ndef cprint(*args, **kwargs):\n\t\"\"\"Prints only if called from STDOUT\"\"\"\n\ttry:\n\t\tos.get_terminal_size()\n\t\tprint(*args, **kwargs)\n\texcept Exception:\n\t\tpass\n\n\ndef start_scheduler() -> NoReturn:\n\t\"\"\"Run enqueue_events_for_all_sites based on scheduler tick.\n\tSpecify scheduler_interval in seconds in common_site_config.json\"\"\"\n\n\ttick = cint(frappe.get_conf().scheduler_tick_interval) or 60\n\n\twhile True:\n\t\ttime.sleep(tick)\n\t\tenqueue_events_for_all_sites()\n\n\ndef enqueue_events_for_all_sites() -> None:\n\t\"\"\"Loop through sites and enqueue events that are not already queued\"\"\"\n\n\tif os.path.exists(os.path.join(\".\", \".restarting\")):\n\t\t# Don't add task to queue if webserver is in restart mode\n\t\treturn\n\n\twith frappe.init_site():\n\t\tsites = get_sites()\n\n\tfor site in sites:\n\t\ttry:\n\t\t\tenqueue_events_for_site(site=site)\n\t\texcept Exception:\n\t\t\tfrappe.logger(\"scheduler\").debug(f\"Failed to enqueue events for site: {site}\", exc_info=True)\n\n\ndef enqueue_events_for_site(site: str) -> None:\n\tdef log_exc():\n\t\tfrappe.logger(\"scheduler\").error(f\"Exception in Enqueue Events for Site {site}\", exc_info=True)\n\n\ttry:\n\t\tfrappe.init(site=site)\n\t\tfrappe.connect()\n\t\tif is_scheduler_inactive():\n\t\t\treturn\n\n\t\tenqueue_events(site=site)\n\n\t\tfrappe.logger(\"scheduler\").debug(f\"Queued events for site {site}\")\n\texcept Exception as e:\n\t\tif frappe.db.is_access_denied(e):\n\t\t\tfrappe.logger(\"scheduler\").debug(f\"Access denied for site {site}\")\n\t\tlog_exc()\n\n\tfinally:\n\t\tfrappe.destroy()\n\n\ndef enqueue_events(site: str) -> list[str] | None:\n\tif schedule_jobs_based_on_activity():\n\t\tenqueued_jobs = []\n\t\tfor job_type in frappe.get_all(\"Scheduled Job Type\", (\"name\", \"method\"), {\"stopped\": 0}):\n\t\t\tjob_type = frappe.get_cached_doc(\"Scheduled Job Type\", job_type.name)\n\t\t\tif _enqueued := job_type.enqueue():\n\t\t\t\tenqueued_jobs.append(job_type.method)\n\n\t\treturn enqueued_jobs\n\n\ndef is_scheduler_inactive(verbose=True) -> bool:\n\tif frappe.local.conf.maintenance_mode:\n\t\tif verbose:\n\t\t\tcprint(f\"{frappe.local.site}: Maintenance mode is ON\")\n\t\treturn True\n\n\tif frappe.local.conf.pause_scheduler:\n\t\tif verbose:\n\t\t\tcprint(f\"{frappe.local.site}: frappe.conf.pause_scheduler is SET\")\n\t\treturn True\n\n\tif is_scheduler_disabled(verbose=verbose):\n\t\treturn True\n\n\treturn False\n\n\ndef is_scheduler_disabled(verbose=True) -> bool:\n\tif frappe.conf.disable_scheduler:\n\t\tif verbose:\n\t\t\tcprint(f\"{frappe.local.site}: frappe.conf.disable_scheduler is SET\")\n\t\treturn True\n\n\tscheduler_disabled = not frappe.utils.cint(\n\t\tfrappe.db.get_single_value(\"System Settings\", \"enable_scheduler\")\n\t)\n\tif scheduler_disabled:\n\t\tif verbose:\n\t\t\tcprint(f\"{frappe.local.site}: SystemSettings.enable_scheduler is UNSET\")\n\treturn scheduler_disabled\n\n\ndef toggle_scheduler(enable):\n\tfrappe.db.set_single_value(\"System Settings\", \"enable_scheduler\", int(enable))\n\n\ndef enable_scheduler():\n\ttoggle_scheduler(True)\n\n\ndef disable_scheduler():\n\ttoggle_scheduler(False)\n\n\ndef schedule_jobs_based_on_activity(check_time=None):\n\t\"\"\"Returns True for active sites defined by Activity Log\n\tReturns True for inactive sites once in 24 hours\"\"\"\n\tif is_dormant(check_time=check_time):\n\t\t# ensure last job is one day old\n\t\tlast_job_timestamp = _get_last_modified_timestamp(\"Scheduled Job Log\")\n\t\tif not last_job_timestamp:\n\t\t\treturn True\n\t\telse:\n\t\t\tif ((check_time or now_datetime()) - last_job_timestamp).total_seconds() >= 86400:\n\t\t\t\t# one day is passed since jobs are run, so lets do this\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\t# schedulers run in the last 24 hours, do nothing\n\t\t\t\treturn False\n\telse:\n\t\t# site active, lets run the jobs\n\t\treturn True\n\n\ndef is_dormant(check_time=None):\n\tlast_activity_log_timestamp = _get_last_modified_timestamp(\"Activity Log\")\n\tsince = (frappe.get_system_settings(\"dormant_days\") or 4) * 86400\n\tif not last_activity_log_timestamp:\n\t\treturn True\n\tif ((check_time or now_datetime()) - last_activity_log_timestamp).total_seconds() >= since:\n\t\treturn True\n\treturn False\n\n\ndef _get_last_modified_timestamp(doctype):\n\ttimestamp = frappe.db.get_value(\n\t\tdoctype, filters={}, fieldname=\"modified\", order_by=\"modified desc\"\n\t)\n\tif timestamp:\n\t\treturn get_datetime(timestamp)\n\n\[email protected]()\ndef activate_scheduler():\n\tfrappe.only_for(\"Administrator\")\n\n\tif frappe.local.conf.maintenance_mode:\n\t\tfrappe.throw(frappe._(\"Scheduler can not be re-enabled when maintenance mode is active.\"))\n\n\tif is_scheduler_disabled():\n\t\tenable_scheduler()\n\tif frappe.conf.pause_scheduler:\n\t\tupdate_site_config(\"pause_scheduler\", 0)\n\n\[email protected]()\ndef get_scheduler_status():\n\tif is_scheduler_inactive():\n\t\treturn {\"status\": \"inactive\"}\n\treturn {\"status\": \"active\"}\n", "path": "frappe/utils/scheduler.py"}]}
2,031
126
gh_patches_debug_24938
rasdani/github-patches
git_diff
edgedb__edgedb-3127
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Drop support for type union operator for scalars. At the moment it's possible to apply type union operator to scalars, like `int64 | bigint`. This functions less intuitively than the same operation for object types. ``` edgedb> select 5 is int64; {true} edgedb> select 5 is (int64 | bigint); {false} edgedb> select (introspect (int64 | float32).name); {'std::float64'} ``` The issue is that what the operation seems to do is answer the question of "what type would be the result set if you try to UNION values of these two disparate types?" For object types, all objects are fundamentally related and therefore compatible enough to combine into a single set. The union type may not have all the links and properties directly accessible, but the underlying objects are of their specific types and remain unchanged. In practice it can meaningfully be used to denote a link which can validly take otherwise mostly unrelated object types (say `link attachment -> File | Link | Receipt`). For scalar types we _cast_ the values, changing them, in order to produce a homogeneous set. So all values in the resulting set are strictly of a specific scalar type without any connection to their "former" type. This makes finding practical usage for scalar type unions much harder. In addition to the above, we might want to introduce discriminated unions of scalars and the way the current `|` works with scalars could be counter-productive to that. For all of the above reasons we can restrict `|` and `&` to only work for object types and potentially relax this in the future as the need arises. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `edb/edgeql/compiler/typegen.py` Content: ``` 1 # 2 # This source file is part of the EdgeDB open source project. 3 # 4 # Copyright 2008-present MagicStack Inc. and the EdgeDB authors. 5 # 6 # Licensed under the Apache License, Version 2.0 (the "License"); 7 # you may not use this file except in compliance with the License. 8 # You may obtain a copy of the License at 9 # 10 # http://www.apache.org/licenses/LICENSE-2.0 11 # 12 # Unless required by applicable law or agreed to in writing, software 13 # distributed under the License is distributed on an "AS IS" BASIS, 14 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 # See the License for the specific language governing permissions and 16 # limitations under the License. 17 # 18 19 20 """EdgeQL compiler type-related helpers.""" 21 22 23 from __future__ import annotations 24 25 from typing import * 26 27 from edb import errors 28 29 from edb.ir import ast as irast 30 from edb.ir import typeutils as irtyputils 31 from edb.ir import utils as irutils 32 33 from edb.schema import abc as s_abc 34 from edb.schema import pointers as s_pointers 35 from edb.schema import types as s_types 36 from edb.schema import utils as s_utils 37 38 from edb.edgeql import ast as qlast 39 40 from . import context 41 from . import dispatch 42 from . import schemactx 43 from . import setgen 44 45 46 def type_to_ql_typeref( 47 t: s_types.Type, 48 *, 49 _name: Optional[str] = None, 50 ctx: context.ContextLevel, 51 ) -> qlast.TypeExpr: 52 return s_utils.typeref_to_ast( 53 ctx.env.schema, 54 t, 55 disambiguate_std='std' in ctx.modaliases, 56 ) 57 58 59 def ql_typeexpr_to_ir_typeref( 60 ql_t: qlast.TypeExpr, *, 61 ctx: context.ContextLevel) -> irast.TypeRef: 62 63 stype = ql_typeexpr_to_type(ql_t, ctx=ctx) 64 return irtyputils.type_to_typeref( 65 ctx.env.schema, stype, cache=ctx.env.type_ref_cache 66 ) 67 68 69 def ql_typeexpr_to_type( 70 ql_t: qlast.TypeExpr, *, 71 ctx: context.ContextLevel) -> s_types.Type: 72 73 types = _ql_typeexpr_to_type(ql_t, ctx=ctx) 74 if len(types) > 1: 75 return schemactx.get_union_type(types, ctx=ctx) 76 else: 77 return types[0] 78 79 80 def _ql_typeexpr_to_type( 81 ql_t: qlast.TypeExpr, *, 82 ctx: context.ContextLevel) -> List[s_types.Type]: 83 84 if isinstance(ql_t, qlast.TypeOf): 85 with ctx.new() as subctx: 86 # Use an empty scope tree, to avoid polluting things pointlessly 87 subctx.path_scope = irast.ScopeTreeNode() 88 ir_set = dispatch.compile(ql_t.expr, ctx=subctx) 89 stype = setgen.get_set_type(ir_set, ctx=subctx) 90 91 return [stype] 92 93 elif isinstance(ql_t, qlast.TypeOp): 94 if ql_t.op == '|': 95 return (_ql_typeexpr_to_type(ql_t.left, ctx=ctx) + 96 _ql_typeexpr_to_type(ql_t.right, ctx=ctx)) 97 98 raise errors.UnsupportedFeatureError( 99 f'type operator {ql_t.op!r} is not implemented', 100 context=ql_t.context) 101 102 elif isinstance(ql_t, qlast.TypeName): 103 return [_ql_typename_to_type(ql_t, ctx=ctx)] 104 105 else: 106 raise errors.EdgeQLSyntaxError("Unexpected type expression", 107 context=ql_t.context) 108 109 110 def _ql_typename_to_type( 111 ql_t: qlast.TypeName, *, 112 ctx: context.ContextLevel) -> s_types.Type: 113 if ql_t.subtypes: 114 assert isinstance(ql_t.maintype, qlast.ObjectRef) 115 coll = s_types.Collection.get_class(ql_t.maintype.name) 116 ct: s_types.Type 117 118 if issubclass(coll, s_abc.Tuple): 119 t_subtypes = {} 120 named = False 121 for si, st in enumerate(ql_t.subtypes): 122 if st.name: 123 named = True 124 type_name = st.name 125 else: 126 type_name = str(si) 127 128 t_subtypes[type_name] = ql_typeexpr_to_type(st, ctx=ctx) 129 130 ctx.env.schema, ct = coll.from_subtypes( 131 ctx.env.schema, t_subtypes, {'named': named}) 132 return ct 133 else: 134 a_subtypes = [] 135 for st in ql_t.subtypes: 136 a_subtypes.append(ql_typeexpr_to_type(st, ctx=ctx)) 137 138 ctx.env.schema, ct = coll.from_subtypes(ctx.env.schema, a_subtypes) 139 return ct 140 else: 141 return schemactx.get_schema_type(ql_t.maintype, ctx=ctx) 142 143 144 @overload 145 def ptrcls_from_ptrref( # NoQA: F811 146 ptrref: irast.PointerRef, *, 147 ctx: context.ContextLevel, 148 ) -> s_pointers.Pointer: 149 ... 150 151 152 @overload 153 def ptrcls_from_ptrref( # NoQA: F811 154 ptrref: irast.TupleIndirectionPointerRef, *, 155 ctx: context.ContextLevel, 156 ) -> irast.TupleIndirectionLink: 157 ... 158 159 160 @overload 161 def ptrcls_from_ptrref( # NoQA: F811 162 ptrref: irast.TypeIntersectionPointerRef, *, 163 ctx: context.ContextLevel, 164 ) -> irast.TypeIntersectionLink: 165 ... 166 167 168 @overload 169 def ptrcls_from_ptrref( # NoQA: F811 170 ptrref: irast.BasePointerRef, *, 171 ctx: context.ContextLevel, 172 ) -> s_pointers.PointerLike: 173 ... 174 175 176 def ptrcls_from_ptrref( # NoQA: F811 177 ptrref: irast.BasePointerRef, *, 178 ctx: context.ContextLevel, 179 ) -> s_pointers.PointerLike: 180 181 cached = ctx.env.ptr_ref_cache.get_ptrcls_for_ref(ptrref) 182 if cached is not None: 183 return cached 184 185 ctx.env.schema, ptr = irtyputils.ptrcls_from_ptrref( 186 ptrref, schema=ctx.env.schema) 187 188 return ptr 189 190 191 def collapse_type_intersection_rptr( 192 ir_set: irast.Set, *, 193 ctx: context.ContextLevel, 194 ) -> Tuple[irast.Set, List[s_pointers.Pointer]]: 195 196 ind_prefix, ind_ptrs = irutils.collapse_type_intersection(ir_set) 197 if not ind_ptrs: 198 return ir_set, [] 199 200 rptr_specialization: Set[irast.PointerRef] = set() 201 for ind_ptr in ind_ptrs: 202 for ind_ptr in ind_ptrs: 203 if ind_ptr.ptrref.rptr_specialization: 204 rptr_specialization.update( 205 ind_ptr.ptrref.rptr_specialization) 206 elif ( 207 not ind_ptr.ptrref.is_empty 208 and ind_ptr.source.rptr is not None 209 ): 210 assert isinstance(ind_ptr.source.rptr.ptrref, irast.PointerRef) 211 rptr_specialization.add(ind_ptr.source.rptr.ptrref) 212 213 ptrs = [ptrcls_from_ptrref(ptrref, ctx=ctx) 214 for ptrref in rptr_specialization] 215 216 return ind_prefix, ptrs 217 218 219 def type_to_typeref( 220 t: s_types.Type, 221 env: context.Environment, 222 ) -> irast.TypeRef: 223 schema = env.schema 224 cache = env.type_ref_cache 225 expr_type = t.get_expr_type(env.schema) 226 include_descendants = ( 227 expr_type is s_types.ExprType.Update 228 or expr_type is s_types.ExprType.Delete 229 ) 230 include_ancestors = ( 231 expr_type is s_types.ExprType.Insert 232 or expr_type is s_types.ExprType.Update 233 or expr_type is s_types.ExprType.Delete 234 ) 235 return irtyputils.type_to_typeref( 236 schema, 237 t, 238 include_descendants=include_descendants, 239 include_ancestors=include_ancestors, 240 cache=cache, 241 ) 242 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/edb/edgeql/compiler/typegen.py b/edb/edgeql/compiler/typegen.py --- a/edb/edgeql/compiler/typegen.py +++ b/edb/edgeql/compiler/typegen.py @@ -92,8 +92,27 @@ elif isinstance(ql_t, qlast.TypeOp): if ql_t.op == '|': - return (_ql_typeexpr_to_type(ql_t.left, ctx=ctx) + - _ql_typeexpr_to_type(ql_t.right, ctx=ctx)) + # We need to validate that type ops are applied only to + # object types. So we check the base case here, when the + # left or right operand is a single type, because if it's + # a longer list, then we know that it was already composed + # of "|" or "&", or it is the result of inference by + # "typeof" and is a list of object types anyway. + left = _ql_typeexpr_to_type(ql_t.left, ctx=ctx) + right = _ql_typeexpr_to_type(ql_t.right, ctx=ctx) + + if len(left) == 1 and not left[0].is_object_type(): + raise errors.UnsupportedFeatureError( + f'cannot use type operator {ql_t.op!r} with non-object ' + f'type {left[0].get_displayname(ctx.env.schema)}', + context=ql_t.left.context) + if len(right) == 1 and not right[0].is_object_type(): + raise errors.UnsupportedFeatureError( + f'cannot use type operator {ql_t.op!r} with non-object ' + f'type {right[0].get_displayname(ctx.env.schema)}', + context=ql_t.right.context) + + return left + right raise errors.UnsupportedFeatureError( f'type operator {ql_t.op!r} is not implemented',
{"golden_diff": "diff --git a/edb/edgeql/compiler/typegen.py b/edb/edgeql/compiler/typegen.py\n--- a/edb/edgeql/compiler/typegen.py\n+++ b/edb/edgeql/compiler/typegen.py\n@@ -92,8 +92,27 @@\n \n elif isinstance(ql_t, qlast.TypeOp):\n if ql_t.op == '|':\n- return (_ql_typeexpr_to_type(ql_t.left, ctx=ctx) +\n- _ql_typeexpr_to_type(ql_t.right, ctx=ctx))\n+ # We need to validate that type ops are applied only to\n+ # object types. So we check the base case here, when the\n+ # left or right operand is a single type, because if it's\n+ # a longer list, then we know that it was already composed\n+ # of \"|\" or \"&\", or it is the result of inference by\n+ # \"typeof\" and is a list of object types anyway.\n+ left = _ql_typeexpr_to_type(ql_t.left, ctx=ctx)\n+ right = _ql_typeexpr_to_type(ql_t.right, ctx=ctx)\n+\n+ if len(left) == 1 and not left[0].is_object_type():\n+ raise errors.UnsupportedFeatureError(\n+ f'cannot use type operator {ql_t.op!r} with non-object '\n+ f'type {left[0].get_displayname(ctx.env.schema)}',\n+ context=ql_t.left.context)\n+ if len(right) == 1 and not right[0].is_object_type():\n+ raise errors.UnsupportedFeatureError(\n+ f'cannot use type operator {ql_t.op!r} with non-object '\n+ f'type {right[0].get_displayname(ctx.env.schema)}',\n+ context=ql_t.right.context)\n+\n+ return left + right\n \n raise errors.UnsupportedFeatureError(\n f'type operator {ql_t.op!r} is not implemented',\n", "issue": "Drop support for type union operator for scalars.\nAt the moment it's possible to apply type union operator to scalars, like `int64 | bigint`. This functions less intuitively than the same operation for object types.\r\n```\r\nedgedb> select 5 is int64;\r\n{true}\r\nedgedb> select 5 is (int64 | bigint);\r\n{false}\r\nedgedb> select (introspect (int64 | float32).name);\r\n{'std::float64'}\r\n```\r\nThe issue is that what the operation seems to do is answer the question of \"what type would be the result set if you try to UNION values of these two disparate types?\"\r\n\r\nFor object types, all objects are fundamentally related and therefore compatible enough to combine into a single set. The union type may not have all the links and properties directly accessible, but the underlying objects are of their specific types and remain unchanged. In practice it can meaningfully be used to denote a link which can validly take otherwise mostly unrelated object types (say `link attachment -> File | Link | Receipt`).\r\n\r\nFor scalar types we _cast_ the values, changing them, in order to produce a homogeneous set. So all values in the resulting set are strictly of a specific scalar type without any connection to their \"former\" type. This makes finding practical usage for scalar type unions much harder.\r\n\r\nIn addition to the above, we might want to introduce discriminated unions of scalars and the way the current `|` works with scalars could be counter-productive to that.\r\n\r\nFor all of the above reasons we can restrict `|` and `&` to only work for object types and potentially relax this in the future as the need arises.\n", "before_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"EdgeQL compiler type-related helpers.\"\"\"\n\n\nfrom __future__ import annotations\n\nfrom typing import *\n\nfrom edb import errors\n\nfrom edb.ir import ast as irast\nfrom edb.ir import typeutils as irtyputils\nfrom edb.ir import utils as irutils\n\nfrom edb.schema import abc as s_abc\nfrom edb.schema import pointers as s_pointers\nfrom edb.schema import types as s_types\nfrom edb.schema import utils as s_utils\n\nfrom edb.edgeql import ast as qlast\n\nfrom . import context\nfrom . import dispatch\nfrom . import schemactx\nfrom . import setgen\n\n\ndef type_to_ql_typeref(\n t: s_types.Type,\n *,\n _name: Optional[str] = None,\n ctx: context.ContextLevel,\n) -> qlast.TypeExpr:\n return s_utils.typeref_to_ast(\n ctx.env.schema,\n t,\n disambiguate_std='std' in ctx.modaliases,\n )\n\n\ndef ql_typeexpr_to_ir_typeref(\n ql_t: qlast.TypeExpr, *,\n ctx: context.ContextLevel) -> irast.TypeRef:\n\n stype = ql_typeexpr_to_type(ql_t, ctx=ctx)\n return irtyputils.type_to_typeref(\n ctx.env.schema, stype, cache=ctx.env.type_ref_cache\n )\n\n\ndef ql_typeexpr_to_type(\n ql_t: qlast.TypeExpr, *,\n ctx: context.ContextLevel) -> s_types.Type:\n\n types = _ql_typeexpr_to_type(ql_t, ctx=ctx)\n if len(types) > 1:\n return schemactx.get_union_type(types, ctx=ctx)\n else:\n return types[0]\n\n\ndef _ql_typeexpr_to_type(\n ql_t: qlast.TypeExpr, *,\n ctx: context.ContextLevel) -> List[s_types.Type]:\n\n if isinstance(ql_t, qlast.TypeOf):\n with ctx.new() as subctx:\n # Use an empty scope tree, to avoid polluting things pointlessly\n subctx.path_scope = irast.ScopeTreeNode()\n ir_set = dispatch.compile(ql_t.expr, ctx=subctx)\n stype = setgen.get_set_type(ir_set, ctx=subctx)\n\n return [stype]\n\n elif isinstance(ql_t, qlast.TypeOp):\n if ql_t.op == '|':\n return (_ql_typeexpr_to_type(ql_t.left, ctx=ctx) +\n _ql_typeexpr_to_type(ql_t.right, ctx=ctx))\n\n raise errors.UnsupportedFeatureError(\n f'type operator {ql_t.op!r} is not implemented',\n context=ql_t.context)\n\n elif isinstance(ql_t, qlast.TypeName):\n return [_ql_typename_to_type(ql_t, ctx=ctx)]\n\n else:\n raise errors.EdgeQLSyntaxError(\"Unexpected type expression\",\n context=ql_t.context)\n\n\ndef _ql_typename_to_type(\n ql_t: qlast.TypeName, *,\n ctx: context.ContextLevel) -> s_types.Type:\n if ql_t.subtypes:\n assert isinstance(ql_t.maintype, qlast.ObjectRef)\n coll = s_types.Collection.get_class(ql_t.maintype.name)\n ct: s_types.Type\n\n if issubclass(coll, s_abc.Tuple):\n t_subtypes = {}\n named = False\n for si, st in enumerate(ql_t.subtypes):\n if st.name:\n named = True\n type_name = st.name\n else:\n type_name = str(si)\n\n t_subtypes[type_name] = ql_typeexpr_to_type(st, ctx=ctx)\n\n ctx.env.schema, ct = coll.from_subtypes(\n ctx.env.schema, t_subtypes, {'named': named})\n return ct\n else:\n a_subtypes = []\n for st in ql_t.subtypes:\n a_subtypes.append(ql_typeexpr_to_type(st, ctx=ctx))\n\n ctx.env.schema, ct = coll.from_subtypes(ctx.env.schema, a_subtypes)\n return ct\n else:\n return schemactx.get_schema_type(ql_t.maintype, ctx=ctx)\n\n\n@overload\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.PointerRef, *,\n ctx: context.ContextLevel,\n) -> s_pointers.Pointer:\n ...\n\n\n@overload\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.TupleIndirectionPointerRef, *,\n ctx: context.ContextLevel,\n) -> irast.TupleIndirectionLink:\n ...\n\n\n@overload\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.TypeIntersectionPointerRef, *,\n ctx: context.ContextLevel,\n) -> irast.TypeIntersectionLink:\n ...\n\n\n@overload\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.BasePointerRef, *,\n ctx: context.ContextLevel,\n) -> s_pointers.PointerLike:\n ...\n\n\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.BasePointerRef, *,\n ctx: context.ContextLevel,\n) -> s_pointers.PointerLike:\n\n cached = ctx.env.ptr_ref_cache.get_ptrcls_for_ref(ptrref)\n if cached is not None:\n return cached\n\n ctx.env.schema, ptr = irtyputils.ptrcls_from_ptrref(\n ptrref, schema=ctx.env.schema)\n\n return ptr\n\n\ndef collapse_type_intersection_rptr(\n ir_set: irast.Set, *,\n ctx: context.ContextLevel,\n) -> Tuple[irast.Set, List[s_pointers.Pointer]]:\n\n ind_prefix, ind_ptrs = irutils.collapse_type_intersection(ir_set)\n if not ind_ptrs:\n return ir_set, []\n\n rptr_specialization: Set[irast.PointerRef] = set()\n for ind_ptr in ind_ptrs:\n for ind_ptr in ind_ptrs:\n if ind_ptr.ptrref.rptr_specialization:\n rptr_specialization.update(\n ind_ptr.ptrref.rptr_specialization)\n elif (\n not ind_ptr.ptrref.is_empty\n and ind_ptr.source.rptr is not None\n ):\n assert isinstance(ind_ptr.source.rptr.ptrref, irast.PointerRef)\n rptr_specialization.add(ind_ptr.source.rptr.ptrref)\n\n ptrs = [ptrcls_from_ptrref(ptrref, ctx=ctx)\n for ptrref in rptr_specialization]\n\n return ind_prefix, ptrs\n\n\ndef type_to_typeref(\n t: s_types.Type,\n env: context.Environment,\n) -> irast.TypeRef:\n schema = env.schema\n cache = env.type_ref_cache\n expr_type = t.get_expr_type(env.schema)\n include_descendants = (\n expr_type is s_types.ExprType.Update\n or expr_type is s_types.ExprType.Delete\n )\n include_ancestors = (\n expr_type is s_types.ExprType.Insert\n or expr_type is s_types.ExprType.Update\n or expr_type is s_types.ExprType.Delete\n )\n return irtyputils.type_to_typeref(\n schema,\n t,\n include_descendants=include_descendants,\n include_ancestors=include_ancestors,\n cache=cache,\n )\n", "path": "edb/edgeql/compiler/typegen.py"}], "after_files": [{"content": "#\n# This source file is part of the EdgeDB open source project.\n#\n# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\n\"\"\"EdgeQL compiler type-related helpers.\"\"\"\n\n\nfrom __future__ import annotations\n\nfrom typing import *\n\nfrom edb import errors\n\nfrom edb.ir import ast as irast\nfrom edb.ir import typeutils as irtyputils\nfrom edb.ir import utils as irutils\n\nfrom edb.schema import abc as s_abc\nfrom edb.schema import pointers as s_pointers\nfrom edb.schema import types as s_types\nfrom edb.schema import utils as s_utils\n\nfrom edb.edgeql import ast as qlast\n\nfrom . import context\nfrom . import dispatch\nfrom . import schemactx\nfrom . import setgen\n\n\ndef type_to_ql_typeref(\n t: s_types.Type,\n *,\n _name: Optional[str] = None,\n ctx: context.ContextLevel,\n) -> qlast.TypeExpr:\n return s_utils.typeref_to_ast(\n ctx.env.schema,\n t,\n disambiguate_std='std' in ctx.modaliases,\n )\n\n\ndef ql_typeexpr_to_ir_typeref(\n ql_t: qlast.TypeExpr, *,\n ctx: context.ContextLevel) -> irast.TypeRef:\n\n stype = ql_typeexpr_to_type(ql_t, ctx=ctx)\n return irtyputils.type_to_typeref(\n ctx.env.schema, stype, cache=ctx.env.type_ref_cache\n )\n\n\ndef ql_typeexpr_to_type(\n ql_t: qlast.TypeExpr, *,\n ctx: context.ContextLevel) -> s_types.Type:\n\n types = _ql_typeexpr_to_type(ql_t, ctx=ctx)\n if len(types) > 1:\n return schemactx.get_union_type(types, ctx=ctx)\n else:\n return types[0]\n\n\ndef _ql_typeexpr_to_type(\n ql_t: qlast.TypeExpr, *,\n ctx: context.ContextLevel) -> List[s_types.Type]:\n\n if isinstance(ql_t, qlast.TypeOf):\n with ctx.new() as subctx:\n # Use an empty scope tree, to avoid polluting things pointlessly\n subctx.path_scope = irast.ScopeTreeNode()\n ir_set = dispatch.compile(ql_t.expr, ctx=subctx)\n stype = setgen.get_set_type(ir_set, ctx=subctx)\n\n return [stype]\n\n elif isinstance(ql_t, qlast.TypeOp):\n if ql_t.op == '|':\n # We need to validate that type ops are applied only to\n # object types. So we check the base case here, when the\n # left or right operand is a single type, because if it's\n # a longer list, then we know that it was already composed\n # of \"|\" or \"&\", or it is the result of inference by\n # \"typeof\" and is a list of object types anyway.\n left = _ql_typeexpr_to_type(ql_t.left, ctx=ctx)\n right = _ql_typeexpr_to_type(ql_t.right, ctx=ctx)\n\n if len(left) == 1 and not left[0].is_object_type():\n raise errors.UnsupportedFeatureError(\n f'cannot use type operator {ql_t.op!r} with non-object '\n f'type {left[0].get_displayname(ctx.env.schema)}',\n context=ql_t.left.context)\n if len(right) == 1 and not right[0].is_object_type():\n raise errors.UnsupportedFeatureError(\n f'cannot use type operator {ql_t.op!r} with non-object '\n f'type {right[0].get_displayname(ctx.env.schema)}',\n context=ql_t.right.context)\n\n return left + right\n\n raise errors.UnsupportedFeatureError(\n f'type operator {ql_t.op!r} is not implemented',\n context=ql_t.context)\n\n elif isinstance(ql_t, qlast.TypeName):\n return [_ql_typename_to_type(ql_t, ctx=ctx)]\n\n else:\n raise errors.EdgeQLSyntaxError(\"Unexpected type expression\",\n context=ql_t.context)\n\n\ndef _ql_typename_to_type(\n ql_t: qlast.TypeName, *,\n ctx: context.ContextLevel) -> s_types.Type:\n if ql_t.subtypes:\n assert isinstance(ql_t.maintype, qlast.ObjectRef)\n coll = s_types.Collection.get_class(ql_t.maintype.name)\n ct: s_types.Type\n\n if issubclass(coll, s_abc.Tuple):\n t_subtypes = {}\n named = False\n for si, st in enumerate(ql_t.subtypes):\n if st.name:\n named = True\n type_name = st.name\n else:\n type_name = str(si)\n\n t_subtypes[type_name] = ql_typeexpr_to_type(st, ctx=ctx)\n\n ctx.env.schema, ct = coll.from_subtypes(\n ctx.env.schema, t_subtypes, {'named': named})\n return ct\n else:\n a_subtypes = []\n for st in ql_t.subtypes:\n a_subtypes.append(ql_typeexpr_to_type(st, ctx=ctx))\n\n ctx.env.schema, ct = coll.from_subtypes(ctx.env.schema, a_subtypes)\n return ct\n else:\n return schemactx.get_schema_type(ql_t.maintype, ctx=ctx)\n\n\n@overload\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.PointerRef, *,\n ctx: context.ContextLevel,\n) -> s_pointers.Pointer:\n ...\n\n\n@overload\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.TupleIndirectionPointerRef, *,\n ctx: context.ContextLevel,\n) -> irast.TupleIndirectionLink:\n ...\n\n\n@overload\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.TypeIntersectionPointerRef, *,\n ctx: context.ContextLevel,\n) -> irast.TypeIntersectionLink:\n ...\n\n\n@overload\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.BasePointerRef, *,\n ctx: context.ContextLevel,\n) -> s_pointers.PointerLike:\n ...\n\n\ndef ptrcls_from_ptrref( # NoQA: F811\n ptrref: irast.BasePointerRef, *,\n ctx: context.ContextLevel,\n) -> s_pointers.PointerLike:\n\n cached = ctx.env.ptr_ref_cache.get_ptrcls_for_ref(ptrref)\n if cached is not None:\n return cached\n\n ctx.env.schema, ptr = irtyputils.ptrcls_from_ptrref(\n ptrref, schema=ctx.env.schema)\n\n return ptr\n\n\ndef collapse_type_intersection_rptr(\n ir_set: irast.Set, *,\n ctx: context.ContextLevel,\n) -> Tuple[irast.Set, List[s_pointers.Pointer]]:\n\n ind_prefix, ind_ptrs = irutils.collapse_type_intersection(ir_set)\n if not ind_ptrs:\n return ir_set, []\n\n rptr_specialization: Set[irast.PointerRef] = set()\n for ind_ptr in ind_ptrs:\n for ind_ptr in ind_ptrs:\n if ind_ptr.ptrref.rptr_specialization:\n rptr_specialization.update(\n ind_ptr.ptrref.rptr_specialization)\n elif (\n not ind_ptr.ptrref.is_empty\n and ind_ptr.source.rptr is not None\n ):\n assert isinstance(ind_ptr.source.rptr.ptrref, irast.PointerRef)\n rptr_specialization.add(ind_ptr.source.rptr.ptrref)\n\n ptrs = [ptrcls_from_ptrref(ptrref, ctx=ctx)\n for ptrref in rptr_specialization]\n\n return ind_prefix, ptrs\n\n\ndef type_to_typeref(\n t: s_types.Type,\n env: context.Environment,\n) -> irast.TypeRef:\n schema = env.schema\n cache = env.type_ref_cache\n expr_type = t.get_expr_type(env.schema)\n include_descendants = (\n expr_type is s_types.ExprType.Update\n or expr_type is s_types.ExprType.Delete\n )\n include_ancestors = (\n expr_type is s_types.ExprType.Insert\n or expr_type is s_types.ExprType.Update\n or expr_type is s_types.ExprType.Delete\n )\n return irtyputils.type_to_typeref(\n schema,\n t,\n include_descendants=include_descendants,\n include_ancestors=include_ancestors,\n cache=cache,\n )\n", "path": "edb/edgeql/compiler/typegen.py"}]}
3,008
431
gh_patches_debug_31889
rasdani/github-patches
git_diff
pre-commit__pre-commit-575
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- git unadd changes lost if hook fails on windows ``` D:\CubeadProjects\devops [test +0 ~2 -0 | +0 ~1 -0 !]> git cm "asd" [WARNING] Unstaged files detected. [INFO] Stashing unstaged files to C:\Users\56929\.pre-commit\patch1501482991. run pylint...............................................................Failed hookid: python-pylint ************* Module install C: 10, 0: Exactly one space required around assignment a=1 ^ (bad-whitespace) C: 46, 0: Line too long (108/100) (line-too-long) W: 39, 4: Unused variable 'stylelint_root' (unused-variable) W: 37, 4: Unused variable 'node_root' (unused-variable) W: 24, 8: Unused variable 'checks' (unused-variable) [WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes... An unexpected error has occurred: CalledProcessError: Command: ('C:\\Program Files\\Git\\mingw64\\libexec\\git-core\\git.exe', 'apply', 'C:\\Users\\56929\\.pre-commit\\patch1501483011') Return code: 1 Expected return code: 0 Output: (none) Errors: error: patch failed: svnchecker_stylelint_support/checks/Stylelint.py:20 error: svnchecker_stylelint_support/checks/Stylelint.py: patch does not apply Check the log at ~/.pre-commit/pre-commit.log ``` ### ~/.pre-commit/pre-commit.log ``` An unexpected error has occurred: CalledProcessError: Command: ('C:\\Program Files\\Git\\mingw64\\libexec\\git-core\\git.exe', 'apply', 'C:\\Users\\56929\\.pre-commit\\patch1501483011') Return code: 1 Expected return code: 0 Output: (none) Errors: error: patch failed: svnchecker_stylelint_support/checks/Stylelint.py:20 error: svnchecker_stylelint_support/checks/Stylelint.py: patch does not apply Traceback (most recent call last): File "c:\python27\lib\site-packages\pre_commit\error_handler.py", line 48, in error_handler yield File "c:\python27\lib\site-packages\pre_commit\main.py", line 231, in main return run(runner, args) File "c:\python27\lib\site-packages\pre_commit\commands\run.py", line 273, in run return _run_hooks(repo_hooks, args, environ) File "c:\python27\lib\contextlib.py", line 24, in __exit__ self.gen.next() File "c:\python27\lib\site-packages\pre_commit\staged_files_only.py", line 58, in staged_files_only cmd_runner.run(('git', 'apply', patch_filename), encoding=None) File "c:\python27\lib\site-packages\pre_commit\prefixed_command_runner.py", line 38, in run return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs) File "c:\python27\lib\site-packages\pre_commit\util.py", line 189, in cmd_output returncode, cmd, retcode, output=(stdout, stderr), CalledProcessError: Command: ('C:\\Program Files\\Git\\mingw64\\libexec\\git-core\\git.exe', 'apply', 'C:\\Users\\56929\\.pre-commit\\patch1501483011') Return code: 1 Expected return code: 0 Output: (none) Errors: error: patch failed: svnchecker_stylelint_support/checks/Stylelint.py:20 error: svnchecker_stylelint_support/checks/Stylelint.py: patch does not apply ``` Then, I open the patch file. (C:\\Users\\56929\\.pre-commit\\patch1501483011),it looks like ```diff diff --git a/svnchecker_stylelint_support/checks/Stylelint.py b/svnchecker_stylelint_support/checks/Stylelint.py index 4422b4d..f85ecb1 100644 --- a/svnchecker_stylelint_support/checks/Stylelint.py +++ b/svnchecker_stylelint_support/checks/Stylelint.py @@ -20,3 +20,5 @@ def run(transaction, config): return ('{}\n{}'.format(stdoutdata, stderrdata), 1)^M ^M return ("", 0)^M ^M ^M ^M ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/staged_files_only.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import contextlib 4 import io 5 import logging 6 import time 7 8 from pre_commit.util import CalledProcessError 9 10 11 logger = logging.getLogger('pre_commit') 12 13 14 @contextlib.contextmanager 15 def staged_files_only(cmd_runner): 16 """Clear any unstaged changes from the git working directory inside this 17 context. 18 19 Args: 20 cmd_runner - PrefixedCommandRunner 21 """ 22 # Determine if there are unstaged files 23 tree = cmd_runner.run(('git', 'write-tree'))[1].strip() 24 retcode, diff_stdout_binary, _ = cmd_runner.run( 25 ( 26 'git', 'diff-index', '--ignore-submodules', '--binary', 27 '--exit-code', '--no-color', '--no-ext-diff', tree, '--', 28 ), 29 retcode=None, 30 encoding=None, 31 ) 32 if retcode and diff_stdout_binary.strip(): 33 patch_filename = cmd_runner.path('patch{}'.format(int(time.time()))) 34 logger.warning('Unstaged files detected.') 35 logger.info( 36 'Stashing unstaged files to {}.'.format(patch_filename), 37 ) 38 # Save the current unstaged changes as a patch 39 with io.open(patch_filename, 'wb') as patch_file: 40 patch_file.write(diff_stdout_binary) 41 42 # Clear the working directory of unstaged changes 43 cmd_runner.run(('git', 'checkout', '--', '.')) 44 try: 45 yield 46 finally: 47 # Try to apply the patch we saved 48 try: 49 cmd_runner.run( 50 ('git', 'apply', '--whitespace=nowarn', patch_filename), 51 encoding=None, 52 ) 53 except CalledProcessError: 54 logger.warning( 55 'Stashed changes conflicted with hook auto-fixes... ' 56 'Rolling back fixes...', 57 ) 58 # We failed to apply the patch, presumably due to fixes made 59 # by hooks. 60 # Roll back the changes made by hooks. 61 cmd_runner.run(('git', 'checkout', '--', '.')) 62 cmd_runner.run( 63 ('git', 'apply', patch_filename, '--whitespace=nowarn'), 64 encoding=None, 65 ) 66 logger.info('Restored changes from {}.'.format(patch_filename)) 67 else: 68 # There weren't any staged files so we don't need to do anything 69 # special 70 yield 71 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py --- a/pre_commit/staged_files_only.py +++ b/pre_commit/staged_files_only.py @@ -11,6 +11,16 @@ logger = logging.getLogger('pre_commit') +def _git_apply(cmd_runner, patch): + args = ('apply', '--whitespace=nowarn', patch) + try: + cmd_runner.run(('git',) + args, encoding=None) + except CalledProcessError: + # Retry with autocrlf=false -- see #570 + cmd = ('git', '-c', 'core.autocrlf=false') + args + cmd_runner.run(cmd, encoding=None) + + @contextlib.contextmanager def staged_files_only(cmd_runner): """Clear any unstaged changes from the git working directory inside this @@ -46,10 +56,7 @@ finally: # Try to apply the patch we saved try: - cmd_runner.run( - ('git', 'apply', '--whitespace=nowarn', patch_filename), - encoding=None, - ) + _git_apply(cmd_runner, patch_filename) except CalledProcessError: logger.warning( 'Stashed changes conflicted with hook auto-fixes... ' @@ -59,10 +66,7 @@ # by hooks. # Roll back the changes made by hooks. cmd_runner.run(('git', 'checkout', '--', '.')) - cmd_runner.run( - ('git', 'apply', patch_filename, '--whitespace=nowarn'), - encoding=None, - ) + _git_apply(cmd_runner, patch_filename) logger.info('Restored changes from {}.'.format(patch_filename)) else: # There weren't any staged files so we don't need to do anything
{"golden_diff": "diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -11,6 +11,16 @@\n logger = logging.getLogger('pre_commit')\n \n \n+def _git_apply(cmd_runner, patch):\n+ args = ('apply', '--whitespace=nowarn', patch)\n+ try:\n+ cmd_runner.run(('git',) + args, encoding=None)\n+ except CalledProcessError:\n+ # Retry with autocrlf=false -- see #570\n+ cmd = ('git', '-c', 'core.autocrlf=false') + args\n+ cmd_runner.run(cmd, encoding=None)\n+\n+\n @contextlib.contextmanager\n def staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n@@ -46,10 +56,7 @@\n finally:\n # Try to apply the patch we saved\n try:\n- cmd_runner.run(\n- ('git', 'apply', '--whitespace=nowarn', patch_filename),\n- encoding=None,\n- )\n+ _git_apply(cmd_runner, patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n@@ -59,10 +66,7 @@\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(('git', 'checkout', '--', '.'))\n- cmd_runner.run(\n- ('git', 'apply', patch_filename, '--whitespace=nowarn'),\n- encoding=None,\n- )\n+ _git_apply(cmd_runner, patch_filename)\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n", "issue": "git unadd changes lost if hook fails on windows\n```\r\nD:\\CubeadProjects\\devops [test +0 ~2 -0 | +0 ~1 -0 !]> git cm \"asd\"\r\n[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to C:\\Users\\56929\\.pre-commit\\patch1501482991.\r\nrun pylint...............................................................Failed\r\nhookid: python-pylint\r\n\r\n************* Module install\r\nC: 10, 0: Exactly one space required around assignment\r\na=1\r\n ^ (bad-whitespace)\r\nC: 46, 0: Line too long (108/100) (line-too-long)\r\nW: 39, 4: Unused variable 'stylelint_root' (unused-variable)\r\nW: 37, 4: Unused variable 'node_root' (unused-variable)\r\nW: 24, 8: Unused variable 'checks' (unused-variable)\r\n\r\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('C:\\\\Program Files\\\\Git\\\\mingw64\\\\libexec\\\\git-core\\\\git.exe', 'apply', 'C:\\\\Users\\\\56929\\\\.pre-commit\\\\patch1501483011')\r\nReturn code: 1\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors:\r\n error: patch failed: svnchecker_stylelint_support/checks/Stylelint.py:20\r\n error: svnchecker_stylelint_support/checks/Stylelint.py: patch does not apply\r\n\r\n\r\nCheck the log at ~/.pre-commit/pre-commit.log\r\n```\r\n\r\n### ~/.pre-commit/pre-commit.log\r\n```\r\nAn unexpected error has occurred: CalledProcessError: Command: ('C:\\\\Program Files\\\\Git\\\\mingw64\\\\libexec\\\\git-core\\\\git.exe', 'apply', 'C:\\\\Users\\\\56929\\\\.pre-commit\\\\patch1501483011')\r\nReturn code: 1\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors: \r\n error: patch failed: svnchecker_stylelint_support/checks/Stylelint.py:20\r\n error: svnchecker_stylelint_support/checks/Stylelint.py: patch does not apply\r\n \r\n\r\nTraceback (most recent call last):\r\n File \"c:\\python27\\lib\\site-packages\\pre_commit\\error_handler.py\", line 48, in error_handler\r\n yield\r\n File \"c:\\python27\\lib\\site-packages\\pre_commit\\main.py\", line 231, in main\r\n return run(runner, args)\r\n File \"c:\\python27\\lib\\site-packages\\pre_commit\\commands\\run.py\", line 273, in run\r\n return _run_hooks(repo_hooks, args, environ)\r\n File \"c:\\python27\\lib\\contextlib.py\", line 24, in __exit__\r\n self.gen.next()\r\n File \"c:\\python27\\lib\\site-packages\\pre_commit\\staged_files_only.py\", line 58, in staged_files_only\r\n cmd_runner.run(('git', 'apply', patch_filename), encoding=None)\r\n File \"c:\\python27\\lib\\site-packages\\pre_commit\\prefixed_command_runner.py\", line 38, in run\r\n return cmd_output(*replaced_cmd, __popen=self.__popen, **kwargs)\r\n File \"c:\\python27\\lib\\site-packages\\pre_commit\\util.py\", line 189, in cmd_output\r\n returncode, cmd, retcode, output=(stdout, stderr),\r\nCalledProcessError: Command: ('C:\\\\Program Files\\\\Git\\\\mingw64\\\\libexec\\\\git-core\\\\git.exe', 'apply', 'C:\\\\Users\\\\56929\\\\.pre-commit\\\\patch1501483011')\r\nReturn code: 1\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors: \r\n error: patch failed: svnchecker_stylelint_support/checks/Stylelint.py:20\r\n error: svnchecker_stylelint_support/checks/Stylelint.py: patch does not apply\r\n```\r\nThen, I open the patch file. (C:\\\\Users\\\\56929\\\\.pre-commit\\\\patch1501483011),it looks like \r\n\r\n```diff\r\ndiff --git a/svnchecker_stylelint_support/checks/Stylelint.py b/svnchecker_stylelint_support/checks/Stylelint.py\r\nindex 4422b4d..f85ecb1 100644\r\n--- a/svnchecker_stylelint_support/checks/Stylelint.py\r\n+++ b/svnchecker_stylelint_support/checks/Stylelint.py\r\n@@ -20,3 +20,5 @@ def run(transaction, config):\r\n return ('{}\\n{}'.format(stdoutdata, stderrdata), 1)^M\r\n^M\r\n return (\"\", 0)^M\r\n^M\r\n^M\r\n^M\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n tree = cmd_runner.run(('git', 'write-tree'))[1].strip()\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n (\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n ),\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = cmd_runner.path('patch{}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(('git', 'checkout', '--', '.'))\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n cmd_runner.run(\n ('git', 'apply', '--whitespace=nowarn', patch_filename),\n encoding=None,\n )\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(('git', 'checkout', '--', '.'))\n cmd_runner.run(\n ('git', 'apply', patch_filename, '--whitespace=nowarn'),\n encoding=None,\n )\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport time\n\nfrom pre_commit.util import CalledProcessError\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(cmd_runner, patch):\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_runner.run(('git',) + args, encoding=None)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd = ('git', '-c', 'core.autocrlf=false') + args\n cmd_runner.run(cmd, encoding=None)\n\n\[email protected]\ndef staged_files_only(cmd_runner):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n\n Args:\n cmd_runner - PrefixedCommandRunner\n \"\"\"\n # Determine if there are unstaged files\n tree = cmd_runner.run(('git', 'write-tree'))[1].strip()\n retcode, diff_stdout_binary, _ = cmd_runner.run(\n (\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n ),\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = cmd_runner.path('patch{}'.format(int(time.time())))\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_runner.run(('git', 'checkout', '--', '.'))\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(cmd_runner, patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_runner.run(('git', 'checkout', '--', '.'))\n _git_apply(cmd_runner, patch_filename)\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}]}
1,994
406
gh_patches_debug_7053
rasdani/github-patches
git_diff
zulip__zulip-21237
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docs: make links equally browsable on both GitHub and ReadTheDocs Once upstream bug https://github.com/readthedocs/recommonmark/issues/179 is fixed, we can replace the `.html` part in links of the form `file_name.html#anchor` with `.md`. This is a followup to https://github.com/zulip/zulip/pull/13232. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `version.py` Content: ``` 1 import os 2 3 ZULIP_VERSION = "5.0-dev+git" 4 5 # Add information on number of commits and commit hash to version, if available 6 zulip_git_version_file = os.path.join( 7 os.path.dirname(os.path.abspath(__file__)), "zulip-git-version" 8 ) 9 lines = [ZULIP_VERSION, ""] 10 if os.path.exists(zulip_git_version_file): 11 with open(zulip_git_version_file) as f: 12 lines = f.readlines() + ["", ""] 13 ZULIP_VERSION = lines.pop(0).strip() 14 ZULIP_MERGE_BASE = lines.pop(0).strip() 15 16 LATEST_MAJOR_VERSION = "4.0" 17 LATEST_RELEASE_VERSION = "4.10" 18 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2021/05/13/zulip-4-0-released/" 19 20 # Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be 21 # prevented from connecting to the Zulip server. Versions above 22 # DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have 23 # a banner at the top of the page asking the user to upgrade. 24 DESKTOP_MINIMUM_VERSION = "5.2.0" 25 DESKTOP_WARNING_VERSION = "5.4.3" 26 27 # Bump the API_FEATURE_LEVEL whenever an API change is made 28 # that clients might want to condition on. If we forget at 29 # the time we make the change, then bump it later as soon 30 # as we notice; clients using API_FEATURE_LEVEL will just not 31 # use the new feature/API until the bump. 32 # 33 # Changes should be accompanied by documentation explaining what the 34 # new level means in templates/zerver/api/changelog.md, as well as 35 # "**Changes**" entries in the endpoint's documentation in `zulip.yaml`. 36 API_FEATURE_LEVEL = 117 37 38 # Bump the minor PROVISION_VERSION to indicate that folks should provision 39 # only when going from an old version of the code to a newer version. Bump 40 # the major version to indicate that folks should provision in both 41 # directions. 42 43 # Typically, 44 # * adding a dependency only requires a minor version bump; 45 # * removing a dependency requires a major version bump; 46 # * upgrading a dependency requires a major version bump, unless the 47 # upgraded dependency is backwards compatible with all of our 48 # historical commits sharing the same major version, in which case a 49 # minor version bump suffices. 50 51 PROVISION_VERSION = "179.0" 52 ``` Path: `docs/conf.py` Content: ``` 1 # For documentation on Sphinx configuration options, see: 2 # https://www.sphinx-doc.org/en/master/usage/configuration.html 3 # https://myst-parser.readthedocs.io/en/latest/sphinx/reference.html 4 # https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html 5 6 import os 7 import sys 8 from typing import Any 9 10 sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))) 11 from version import LATEST_RELEASE_VERSION, ZULIP_VERSION 12 13 on_rtd = os.environ.get("READTHEDOCS") == "True" 14 15 # General configuration 16 17 extensions = [ 18 "myst_parser", 19 "sphinx_rtd_theme", 20 ] 21 templates_path = ["_templates"] 22 project = "Zulip" 23 copyright = "2012–2015 Dropbox, Inc., 2015–2021 Kandra Labs, Inc., and contributors" 24 author = "The Zulip Team" 25 version = ZULIP_VERSION 26 release = ZULIP_VERSION 27 exclude_patterns = ["_build", "README.md"] 28 suppress_warnings = [ 29 "myst.header", 30 ] 31 pygments_style = "sphinx" 32 33 # Options for Markdown parser 34 35 myst_enable_extensions = [ 36 "colon_fence", 37 "substitution", 38 ] 39 myst_substitutions = { 40 "LATEST_RELEASE_VERSION": LATEST_RELEASE_VERSION, 41 } 42 43 # Options for HTML output 44 45 html_theme = "sphinx_rtd_theme" 46 html_theme_options = { 47 "collapse_navigation": not on_rtd, # makes local builds much faster 48 "logo_only": True, 49 } 50 html_logo = "images/zulip-logo.svg" 51 html_static_path = ["_static"] 52 53 54 def setup(app: Any) -> None: 55 # overrides for wide tables in RTD theme 56 app.add_css_file("theme_overrides.css") # path relative to _static 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -36,6 +36,7 @@ "colon_fence", "substitution", ] +myst_heading_anchors = 6 myst_substitutions = { "LATEST_RELEASE_VERSION": LATEST_RELEASE_VERSION, } diff --git a/version.py b/version.py --- a/version.py +++ b/version.py @@ -48,4 +48,4 @@ # historical commits sharing the same major version, in which case a # minor version bump suffices. -PROVISION_VERSION = "179.0" +PROVISION_VERSION = "180.0"
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -36,6 +36,7 @@\n \"colon_fence\",\n \"substitution\",\n ]\n+myst_heading_anchors = 6\n myst_substitutions = {\n \"LATEST_RELEASE_VERSION\": LATEST_RELEASE_VERSION,\n }\ndiff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -48,4 +48,4 @@\n # historical commits sharing the same major version, in which case a\n # minor version bump suffices.\n \n-PROVISION_VERSION = \"179.0\"\n+PROVISION_VERSION = \"180.0\"\n", "issue": "docs: make links equally browsable on both GitHub and ReadTheDocs\nOnce upstream bug https://github.com/readthedocs/recommonmark/issues/179 is fixed, we can replace the `.html` part in links of the form `file_name.html#anchor` with `.md`.\r\n\r\nThis is a followup to https://github.com/zulip/zulip/pull/13232.\n", "before_files": [{"content": "import os\n\nZULIP_VERSION = \"5.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = f.readlines() + [\"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"4.0\"\nLATEST_RELEASE_VERSION = \"4.10\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2021/05/13/zulip-4-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.2.0\"\nDESKTOP_WARNING_VERSION = \"5.4.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in templates/zerver/api/changelog.md, as well as\n# \"**Changes**\" entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 117\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = \"179.0\"\n", "path": "version.py"}, {"content": "# For documentation on Sphinx configuration options, see:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n# https://myst-parser.readthedocs.io/en/latest/sphinx/reference.html\n# https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html\n\nimport os\nimport sys\nfrom typing import Any\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nfrom version import LATEST_RELEASE_VERSION, ZULIP_VERSION\n\non_rtd = os.environ.get(\"READTHEDOCS\") == \"True\"\n\n# General configuration\n\nextensions = [\n \"myst_parser\",\n \"sphinx_rtd_theme\",\n]\ntemplates_path = [\"_templates\"]\nproject = \"Zulip\"\ncopyright = \"2012\u20132015 Dropbox, Inc., 2015\u20132021 Kandra Labs, Inc., and contributors\"\nauthor = \"The Zulip Team\"\nversion = ZULIP_VERSION\nrelease = ZULIP_VERSION\nexclude_patterns = [\"_build\", \"README.md\"]\nsuppress_warnings = [\n \"myst.header\",\n]\npygments_style = \"sphinx\"\n\n# Options for Markdown parser\n\nmyst_enable_extensions = [\n \"colon_fence\",\n \"substitution\",\n]\nmyst_substitutions = {\n \"LATEST_RELEASE_VERSION\": LATEST_RELEASE_VERSION,\n}\n\n# Options for HTML output\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_options = {\n \"collapse_navigation\": not on_rtd, # makes local builds much faster\n \"logo_only\": True,\n}\nhtml_logo = \"images/zulip-logo.svg\"\nhtml_static_path = [\"_static\"]\n\n\ndef setup(app: Any) -> None:\n # overrides for wide tables in RTD theme\n app.add_css_file(\"theme_overrides.css\") # path relative to _static\n", "path": "docs/conf.py"}], "after_files": [{"content": "import os\n\nZULIP_VERSION = \"5.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = f.readlines() + [\"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"4.0\"\nLATEST_RELEASE_VERSION = \"4.9\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2021/05/13/zulip-4-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.2.0\"\nDESKTOP_WARNING_VERSION = \"5.4.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in templates/zerver/api/changelog.md, as well as\n# \"**Changes**\" entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 116\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = \"180.0\"\n", "path": "version.py"}, {"content": "# For documentation on Sphinx configuration options, see:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n# https://myst-parser.readthedocs.io/en/latest/sphinx/reference.html\n# https://sphinx-rtd-theme.readthedocs.io/en/stable/configuring.html\n\nimport os\nimport sys\nfrom typing import Any\n\nsys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), \"..\")))\nfrom version import LATEST_RELEASE_VERSION, ZULIP_VERSION\n\non_rtd = os.environ.get(\"READTHEDOCS\") == \"True\"\n\n# General configuration\n\nextensions = [\n \"myst_parser\",\n \"sphinx_rtd_theme\",\n]\ntemplates_path = [\"_templates\"]\nproject = \"Zulip\"\ncopyright = \"2012\u20132015 Dropbox, Inc., 2015\u20132021 Kandra Labs, Inc., and contributors\"\nauthor = \"The Zulip Team\"\nversion = ZULIP_VERSION\nrelease = ZULIP_VERSION\nexclude_patterns = [\"_build\", \"README.md\"]\nsuppress_warnings = [\n \"myst.header\",\n]\npygments_style = \"sphinx\"\n\n# Options for Markdown parser\n\nmyst_enable_extensions = [\n \"colon_fence\",\n \"substitution\",\n]\nmyst_heading_anchors = 6\nmyst_substitutions = {\n \"LATEST_RELEASE_VERSION\": LATEST_RELEASE_VERSION,\n}\n\n# Options for HTML output\n\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_options = {\n \"collapse_navigation\": not on_rtd, # makes local builds much faster\n \"logo_only\": True,\n}\nhtml_logo = \"images/zulip-logo.svg\"\nhtml_static_path = [\"_static\"]\n\n\ndef setup(app: Any) -> None:\n # overrides for wide tables in RTD theme\n app.add_css_file(\"theme_overrides.css\") # path relative to _static\n", "path": "docs/conf.py"}]}
1,508
154
gh_patches_debug_22323
rasdani/github-patches
git_diff
scikit-hep__pyhf-937
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Document simplemodels API # Description In discussion today with @coolalexzb, I realized that the [`pyhf.simplemodels`](https://github.com/scikit-hep/pyhf/blob/79984be837ef6e53bdd12a82163c34d47d507dba/src/pyhf/simplemodels.py) API is not documented in our docs. Even thought this isn't something we want people to really use, we still show it in our examples and so it needs documentation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pyhf/simplemodels.py` Content: ``` 1 from . import Model 2 3 4 def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None): 5 spec = { 6 'channels': [ 7 { 8 'name': 'singlechannel', 9 'samples': [ 10 { 11 'name': 'signal', 12 'data': signal_data, 13 'modifiers': [ 14 {'name': 'mu', 'type': 'normfactor', 'data': None} 15 ], 16 }, 17 { 18 'name': 'background', 19 'data': bkg_data, 20 'modifiers': [ 21 { 22 'name': 'uncorr_bkguncrt', 23 'type': 'shapesys', 24 'data': bkg_uncerts, 25 } 26 ], 27 }, 28 ], 29 } 30 ] 31 } 32 return Model(spec, batch_size=batch_size) 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py --- a/src/pyhf/simplemodels.py +++ b/src/pyhf/simplemodels.py @@ -2,6 +2,38 @@ def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None): + """ + Construct a simple single channel :class:`~pyhf.pdf.Model` with a + :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated + background uncertainty. + + Example: + >>> import pyhf + >>> pyhf.set_backend("numpy") + >>> model = pyhf.simplemodels.hepdata_like( + ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0] + ... ) + >>> model.schema + 'model.json' + >>> model.config.channels + ['singlechannel'] + >>> model.config.samples + ['background', 'signal'] + >>> model.config.parameters + ['mu', 'uncorr_bkguncrt'] + >>> model.expected_data(model.config.suggested_init()) + array([ 62. , 63. , 277.77777778, 55.18367347]) + + Args: + signal_data (`list`): The data in the signal sample + bkg_data (`list`): The data in the background sample + bkg_uncerts (`list`): The statistical uncertainty on the background sample counts + batch_size (`None` or `int`): Number of simultaneous (batched) Models to compute + + Returns: + ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema + + """ spec = { 'channels': [ {
{"golden_diff": "diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py\n--- a/src/pyhf/simplemodels.py\n+++ b/src/pyhf/simplemodels.py\n@@ -2,6 +2,38 @@\n \n \n def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n+ \"\"\"\n+ Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n+ :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated\n+ background uncertainty.\n+\n+ Example:\n+ >>> import pyhf\n+ >>> pyhf.set_backend(\"numpy\")\n+ >>> model = pyhf.simplemodels.hepdata_like(\n+ ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n+ ... )\n+ >>> model.schema\n+ 'model.json'\n+ >>> model.config.channels\n+ ['singlechannel']\n+ >>> model.config.samples\n+ ['background', 'signal']\n+ >>> model.config.parameters\n+ ['mu', 'uncorr_bkguncrt']\n+ >>> model.expected_data(model.config.suggested_init())\n+ array([ 62. , 63. , 277.77777778, 55.18367347])\n+\n+ Args:\n+ signal_data (`list`): The data in the signal sample\n+ bkg_data (`list`): The data in the background sample\n+ bkg_uncerts (`list`): The statistical uncertainty on the background sample counts\n+ batch_size (`None` or `int`): Number of simultaneous (batched) Models to compute\n+\n+ Returns:\n+ ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema\n+\n+ \"\"\"\n spec = {\n 'channels': [\n {\n", "issue": "Document simplemodels API\n# Description\r\n\r\nIn discussion today with @coolalexzb, I realized that the [`pyhf.simplemodels`](https://github.com/scikit-hep/pyhf/blob/79984be837ef6e53bdd12a82163c34d47d507dba/src/pyhf/simplemodels.py) API is not documented in our docs. Even thought this isn't something we want people to really use, we still show it in our examples and so it needs documentation.\n", "before_files": [{"content": "from . import Model\n\n\ndef hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n spec = {\n 'channels': [\n {\n 'name': 'singlechannel',\n 'samples': [\n {\n 'name': 'signal',\n 'data': signal_data,\n 'modifiers': [\n {'name': 'mu', 'type': 'normfactor', 'data': None}\n ],\n },\n {\n 'name': 'background',\n 'data': bkg_data,\n 'modifiers': [\n {\n 'name': 'uncorr_bkguncrt',\n 'type': 'shapesys',\n 'data': bkg_uncerts,\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n", "path": "src/pyhf/simplemodels.py"}], "after_files": [{"content": "from . import Model\n\n\ndef hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n \"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated\n background uncertainty.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.hepdata_like(\n ... signal_data=[12.0, 11.0], bkg_data=[50.0, 52.0], bkg_uncerts=[3.0, 7.0]\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['singlechannel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['mu', 'uncorr_bkguncrt']\n >>> model.expected_data(model.config.suggested_init())\n array([ 62. , 63. , 277.77777778, 55.18367347])\n\n Args:\n signal_data (`list`): The data in the signal sample\n bkg_data (`list`): The data in the background sample\n bkg_uncerts (`list`): The statistical uncertainty on the background sample counts\n batch_size (`None` or `int`): Number of simultaneous (batched) Models to compute\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema\n\n \"\"\"\n spec = {\n 'channels': [\n {\n 'name': 'singlechannel',\n 'samples': [\n {\n 'name': 'signal',\n 'data': signal_data,\n 'modifiers': [\n {'name': 'mu', 'type': 'normfactor', 'data': None}\n ],\n },\n {\n 'name': 'background',\n 'data': bkg_data,\n 'modifiers': [\n {\n 'name': 'uncorr_bkguncrt',\n 'type': 'shapesys',\n 'data': bkg_uncerts,\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n", "path": "src/pyhf/simplemodels.py"}]}
603
437
gh_patches_debug_22770
rasdani/github-patches
git_diff
DataDog__dd-trace-py-334
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error log still occurs when tracer is disabled (Django) The tracer is logging the following error when disabled: > 2017-07-05 12:54:36,552:[none]:[ddtrace.writer:134]:ERROR cannot send services: [Errno 111] Connection refused This is occurring when integrated with Django with the following configuration: ```python DATADOG_TRACE = { 'ENABLED': False } ``` From reading the [documentation](http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.django) which states: > ENABLED (default: not django_settings.DEBUG): defines if the tracer is enabled or not. If set to false, the code is still instrumented but no spans are sent to the trace agent. This setting cannot be changed at runtime and a restart is required. By default the tracer is disabled when in DEBUG mode, enabled otherwise. It seems this log should not occur. If no spans are sent to the trace agent then presumably a connection should not be established? Package Info ------------------ > datadog==0.15.0 > ddtrace==0.8.5 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/django/apps.py` Content: ``` 1 import logging 2 3 # 3rd party 4 from django.apps import AppConfig 5 6 # project 7 from .db import patch_db 8 from .conf import settings 9 from .cache import patch_cache 10 from .templates import patch_template 11 from .middleware import insert_exception_middleware 12 13 from ...ext import AppTypes 14 15 16 log = logging.getLogger(__name__) 17 18 19 class TracerConfig(AppConfig): 20 name = 'ddtrace.contrib.django' 21 label = 'datadog_django' 22 23 def ready(self): 24 """ 25 Ready is called as soon as the registry is fully populated. 26 Tracing capabilities must be enabled in this function so that 27 all Django internals are properly configured. 28 """ 29 tracer = settings.TRACER 30 31 if settings.TAGS: 32 tracer.set_tags(settings.TAGS) 33 34 # define the service details 35 tracer.set_service_info( 36 app='django', 37 app_type=AppTypes.web, 38 service=settings.DEFAULT_SERVICE, 39 ) 40 41 # configure the tracer instance 42 # TODO[manu]: we may use configure() but because it creates a new 43 # AgentWriter, it breaks all tests. The configure() behavior must 44 # be changed to use it in this integration 45 tracer.enabled = settings.ENABLED 46 tracer.writer.api.hostname = settings.AGENT_HOSTNAME 47 tracer.writer.api.port = settings.AGENT_PORT 48 49 if settings.AUTO_INSTRUMENT: 50 # trace Django internals 51 insert_exception_middleware() 52 try: 53 patch_db(tracer) 54 except Exception: 55 log.exception('error patching Django database connections') 56 57 try: 58 patch_template(tracer) 59 except Exception: 60 log.exception('error patching Django template rendering') 61 62 try: 63 patch_cache(tracer) 64 except Exception: 65 log.exception('error patching Django cache') 66 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/django/apps.py b/ddtrace/contrib/django/apps.py --- a/ddtrace/contrib/django/apps.py +++ b/ddtrace/contrib/django/apps.py @@ -31,13 +31,6 @@ if settings.TAGS: tracer.set_tags(settings.TAGS) - # define the service details - tracer.set_service_info( - app='django', - app_type=AppTypes.web, - service=settings.DEFAULT_SERVICE, - ) - # configure the tracer instance # TODO[manu]: we may use configure() but because it creates a new # AgentWriter, it breaks all tests. The configure() behavior must @@ -46,6 +39,13 @@ tracer.writer.api.hostname = settings.AGENT_HOSTNAME tracer.writer.api.port = settings.AGENT_PORT + # define the service details + tracer.set_service_info( + app='django', + app_type=AppTypes.web, + service=settings.DEFAULT_SERVICE, + ) + if settings.AUTO_INSTRUMENT: # trace Django internals insert_exception_middleware()
{"golden_diff": "diff --git a/ddtrace/contrib/django/apps.py b/ddtrace/contrib/django/apps.py\n--- a/ddtrace/contrib/django/apps.py\n+++ b/ddtrace/contrib/django/apps.py\n@@ -31,13 +31,6 @@\n if settings.TAGS:\n tracer.set_tags(settings.TAGS)\n \n- # define the service details\n- tracer.set_service_info(\n- app='django',\n- app_type=AppTypes.web,\n- service=settings.DEFAULT_SERVICE,\n- )\n-\n # configure the tracer instance\n # TODO[manu]: we may use configure() but because it creates a new\n # AgentWriter, it breaks all tests. The configure() behavior must\n@@ -46,6 +39,13 @@\n tracer.writer.api.hostname = settings.AGENT_HOSTNAME\n tracer.writer.api.port = settings.AGENT_PORT\n \n+ # define the service details\n+ tracer.set_service_info(\n+ app='django',\n+ app_type=AppTypes.web,\n+ service=settings.DEFAULT_SERVICE,\n+ )\n+\n if settings.AUTO_INSTRUMENT:\n # trace Django internals\n insert_exception_middleware()\n", "issue": "Error log still occurs when tracer is disabled (Django)\nThe tracer is logging the following error when disabled:\r\n\r\n> 2017-07-05 12:54:36,552:[none]:[ddtrace.writer:134]:ERROR cannot send services: [Errno 111] Connection refused\r\n\r\nThis is occurring when integrated with Django with the following configuration:\r\n\r\n```python\r\nDATADOG_TRACE = {\r\n 'ENABLED': False\r\n}\r\n```\r\nFrom reading the [documentation](http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.django) which states:\r\n> ENABLED (default: not django_settings.DEBUG): defines if the tracer is enabled or not. If set to false, the code is still instrumented but no spans are sent to the trace agent. This setting cannot be changed at runtime and a restart is required. By default the tracer is disabled when in DEBUG mode, enabled otherwise.\r\n\r\nIt seems this log should not occur. If no spans are sent to the trace agent then presumably a connection should not be established?\r\n\r\nPackage Info\r\n------------------\r\n\r\n> datadog==0.15.0\r\n> ddtrace==0.8.5 \r\n\n", "before_files": [{"content": "import logging\n\n# 3rd party\nfrom django.apps import AppConfig\n\n# project\nfrom .db import patch_db\nfrom .conf import settings\nfrom .cache import patch_cache\nfrom .templates import patch_template\nfrom .middleware import insert_exception_middleware\n\nfrom ...ext import AppTypes\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TracerConfig(AppConfig):\n name = 'ddtrace.contrib.django'\n label = 'datadog_django'\n\n def ready(self):\n \"\"\"\n Ready is called as soon as the registry is fully populated.\n Tracing capabilities must be enabled in this function so that\n all Django internals are properly configured.\n \"\"\"\n tracer = settings.TRACER\n\n if settings.TAGS:\n tracer.set_tags(settings.TAGS)\n\n # define the service details\n tracer.set_service_info(\n app='django',\n app_type=AppTypes.web,\n service=settings.DEFAULT_SERVICE,\n )\n\n # configure the tracer instance\n # TODO[manu]: we may use configure() but because it creates a new\n # AgentWriter, it breaks all tests. The configure() behavior must\n # be changed to use it in this integration\n tracer.enabled = settings.ENABLED\n tracer.writer.api.hostname = settings.AGENT_HOSTNAME\n tracer.writer.api.port = settings.AGENT_PORT\n\n if settings.AUTO_INSTRUMENT:\n # trace Django internals\n insert_exception_middleware()\n try:\n patch_db(tracer)\n except Exception:\n log.exception('error patching Django database connections')\n\n try:\n patch_template(tracer)\n except Exception:\n log.exception('error patching Django template rendering')\n\n try:\n patch_cache(tracer)\n except Exception:\n log.exception('error patching Django cache')\n", "path": "ddtrace/contrib/django/apps.py"}], "after_files": [{"content": "import logging\n\n# 3rd party\nfrom django.apps import AppConfig\n\n# project\nfrom .db import patch_db\nfrom .conf import settings\nfrom .cache import patch_cache\nfrom .templates import patch_template\nfrom .middleware import insert_exception_middleware\n\nfrom ...ext import AppTypes\n\n\nlog = logging.getLogger(__name__)\n\n\nclass TracerConfig(AppConfig):\n name = 'ddtrace.contrib.django'\n label = 'datadog_django'\n\n def ready(self):\n \"\"\"\n Ready is called as soon as the registry is fully populated.\n Tracing capabilities must be enabled in this function so that\n all Django internals are properly configured.\n \"\"\"\n tracer = settings.TRACER\n\n if settings.TAGS:\n tracer.set_tags(settings.TAGS)\n\n # configure the tracer instance\n # TODO[manu]: we may use configure() but because it creates a new\n # AgentWriter, it breaks all tests. The configure() behavior must\n # be changed to use it in this integration\n tracer.enabled = settings.ENABLED\n tracer.writer.api.hostname = settings.AGENT_HOSTNAME\n tracer.writer.api.port = settings.AGENT_PORT\n\n # define the service details\n tracer.set_service_info(\n app='django',\n app_type=AppTypes.web,\n service=settings.DEFAULT_SERVICE,\n )\n\n if settings.AUTO_INSTRUMENT:\n # trace Django internals\n insert_exception_middleware()\n try:\n patch_db(tracer)\n except Exception:\n log.exception('error patching Django database connections')\n\n try:\n patch_template(tracer)\n except Exception:\n log.exception('error patching Django template rendering')\n\n try:\n patch_cache(tracer)\n except Exception:\n log.exception('error patching Django cache')\n", "path": "ddtrace/contrib/django/apps.py"}]}
1,031
254
gh_patches_debug_16404
rasdani/github-patches
git_diff
airctic__icevision-796
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- show_results with yolov5 + lightning training throws error ## 🐛 Bug **Describe the bug** `show_results` with yolo don't work if the model was training with pytorch lightning **To Reproduce** Train a yolo model with pytorch-lightning and try to call `show_results` **Full stacktrace** ```python --------------------------------------------------------------------------- RuntimeError Traceback (most recent call last) <ipython-input-19-2cf4276b061d> in <module>() ----> 1 model_type.show_results(model, valid_ds, detection_threshold=.5) 9 frames /usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/show_results.py in show_results(model, dataset, detection_threshold, num_samples, ncols, denormalize_fn, show, device) 41 show=show, 42 detection_threshold=detection_threshold, ---> 43 device=device, 44 ) 45 /usr/local/lib/python3.7/dist-packages/icevision/models/base_show_results.py in base_show_results(predict_fn, model, dataset, num_samples, ncols, denormalize_fn, show, **predict_kwargs) 19 ) -> None: 20 records = random.choices(dataset, k=num_samples) ---> 21 preds = predict_fn(model, records, **predict_kwargs) 22 23 show_preds( /usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/prediction.py in predict(model, dataset, detection_threshold, keep_images, device) 48 detection_threshold=detection_threshold, 49 keep_images=keep_images, ---> 50 device=device, 51 ) 52 /usr/local/lib/python3.7/dist-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs) 24 def decorate_context(*args, **kwargs): 25 with self.__class__(): ---> 26 return func(*args, **kwargs) 27 return cast(F, decorate_context) 28 /usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/prediction.py in _predict_batch(model, batch, records, detection_threshold, keep_images, device) 24 model = model.eval().to(device) 25 ---> 26 raw_preds = model(batch)[0] 27 return convert_raw_predictions( 28 batch=batch, /usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 725 result = self._slow_forward(*input, **kwargs) 726 else: --> 727 result = self.forward(*input, **kwargs) 728 for hook in itertools.chain( 729 _global_forward_hooks.values(), /usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward(self, x, augment, profile) 121 return torch.cat(y, 1), None # augmented inference, train 122 else: --> 123 return self.forward_once(x, profile) # single-scale inference, train 124 125 def forward_once(self, x, profile=False): /usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward_once(self, x, profile) 137 print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type)) 138 --> 139 x = m(x) # run 140 y.append(x if m.i in self.save else None) # save output 141 /usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs) 725 result = self._slow_forward(*input, **kwargs) 726 else: --> 727 result = self.forward(*input, **kwargs) 728 for hook in itertools.chain( 729 _global_forward_hooks.values(), /usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward(self, x) 52 53 y = x[i].sigmoid() ---> 54 y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy 55 y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh 56 z.append(y.view(bs, -1, self.no)) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `icevision/models/ultralytics/yolov5/prediction.py` Content: ``` 1 __all__ = ["predict", "predict_dl", "convert_raw_predictions"] 2 3 from icevision.imports import * 4 from icevision.utils import * 5 from icevision.core import * 6 from icevision.data import * 7 from icevision.models.utils import _predict_dl 8 from icevision.models.ultralytics.yolov5.dataloaders import * 9 from yolov5.utils.general import non_max_suppression 10 11 12 @torch.no_grad() 13 def _predict_batch( 14 model: nn.Module, 15 batch: Sequence[torch.Tensor], 16 records: Sequence[BaseRecord], 17 detection_threshold: float = 0.25, 18 nms_iou_threshold: float = 0.45, 19 keep_images: bool = False, 20 device: Optional[torch.device] = None, 21 ) -> List[Prediction]: 22 device = device or model_device(model) 23 24 batch = batch[0].to(device) 25 model = model.eval().to(device) 26 27 raw_preds = model(batch)[0] 28 return convert_raw_predictions( 29 batch=batch, 30 raw_preds=raw_preds, 31 records=records, 32 detection_threshold=detection_threshold, 33 nms_iou_threshold=nms_iou_threshold, 34 keep_images=keep_images, 35 ) 36 37 38 def predict( 39 model: nn.Module, 40 dataset: Dataset, 41 detection_threshold: float = 0.25, 42 nms_iou_threshold: float = 0.45, 43 keep_images: bool = False, 44 device: Optional[torch.device] = None, 45 ) -> List[Prediction]: 46 batch, records = build_infer_batch(dataset) 47 return _predict_batch( 48 model=model, 49 batch=batch, 50 records=records, 51 detection_threshold=detection_threshold, 52 nms_iou_threshold=nms_iou_threshold, 53 keep_images=keep_images, 54 device=device, 55 ) 56 57 58 def predict_dl( 59 model: nn.Module, 60 infer_dl: DataLoader, 61 show_pbar: bool = True, 62 keep_images: bool = False, 63 **predict_kwargs, 64 ): 65 return _predict_dl( 66 predict_fn=_predict_batch, 67 model=model, 68 infer_dl=infer_dl, 69 show_pbar=show_pbar, 70 keep_images=keep_images, 71 **predict_kwargs, 72 ) 73 74 75 def convert_raw_predictions( 76 batch, 77 raw_preds: torch.Tensor, 78 records: Sequence[BaseRecord], 79 detection_threshold: float, 80 nms_iou_threshold: float, 81 keep_images: bool = False, 82 ) -> List[Prediction]: 83 dets = non_max_suppression( 84 raw_preds, conf_thres=detection_threshold, iou_thres=nms_iou_threshold 85 ) 86 dets = [d.detach().cpu().numpy() for d in dets] 87 preds = [] 88 for det, record, tensor_image in zip(dets, records, batch): 89 90 pred = BaseRecord( 91 ( 92 ScoresRecordComponent(), 93 ImageRecordComponent(), 94 InstancesLabelsRecordComponent(), 95 BBoxesRecordComponent(), 96 ) 97 ) 98 99 pred.detection.set_class_map(record.detection.class_map) 100 pred.detection.set_labels_by_id(det[:, 5].astype(int)) 101 pred.detection.set_bboxes([BBox.from_xyxy(*xyxy) for xyxy in det[:, :4]]) 102 pred.detection.set_scores(det[:, 4]) 103 104 if keep_images: 105 record.set_img(tensor_to_image(tensor_image)) 106 107 preds.append(Prediction(pred=pred, ground_truth=record)) 108 109 return preds 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/icevision/models/ultralytics/yolov5/prediction.py b/icevision/models/ultralytics/yolov5/prediction.py --- a/icevision/models/ultralytics/yolov5/prediction.py +++ b/icevision/models/ultralytics/yolov5/prediction.py @@ -19,7 +19,17 @@ keep_images: bool = False, device: Optional[torch.device] = None, ) -> List[Prediction]: - device = device or model_device(model) + # device issue addressed on discord: https://discord.com/channels/735877944085446747/770279401791160400/832361687855923250 + if device is not None: + raise ValueError( + "For YOLOv5 device can only be specified during model creation, " + "for more info take a look at the discussion here: " + "https://discord.com/channels/735877944085446747/770279401791160400/832361687855923250" + ) + grid = model.model[-1].grid[-1] + # if `grid.numel() == 1` it means the grid isn't initialized yet and we can't + # trust it's device (will always be CPU) + device = grid.device if grid.numel() > 1 else model_device(model) batch = batch[0].to(device) model = model.eval().to(device)
{"golden_diff": "diff --git a/icevision/models/ultralytics/yolov5/prediction.py b/icevision/models/ultralytics/yolov5/prediction.py\n--- a/icevision/models/ultralytics/yolov5/prediction.py\n+++ b/icevision/models/ultralytics/yolov5/prediction.py\n@@ -19,7 +19,17 @@\n keep_images: bool = False,\n device: Optional[torch.device] = None,\n ) -> List[Prediction]:\n- device = device or model_device(model)\n+ # device issue addressed on discord: https://discord.com/channels/735877944085446747/770279401791160400/832361687855923250\n+ if device is not None:\n+ raise ValueError(\n+ \"For YOLOv5 device can only be specified during model creation, \"\n+ \"for more info take a look at the discussion here: \"\n+ \"https://discord.com/channels/735877944085446747/770279401791160400/832361687855923250\"\n+ )\n+ grid = model.model[-1].grid[-1]\n+ # if `grid.numel() == 1` it means the grid isn't initialized yet and we can't\n+ # trust it's device (will always be CPU)\n+ device = grid.device if grid.numel() > 1 else model_device(model)\n \n batch = batch[0].to(device)\n model = model.eval().to(device)\n", "issue": "show_results with yolov5 + lightning training throws error\n## \ud83d\udc1b Bug\r\n**Describe the bug**\r\n`show_results` with yolo don't work if the model was training with pytorch lightning\r\n\r\n**To Reproduce**\r\nTrain a yolo model with pytorch-lightning and try to call `show_results`\r\n\r\n**Full stacktrace**\r\n```python\r\n---------------------------------------------------------------------------\r\n\r\nRuntimeError Traceback (most recent call last)\r\n\r\n<ipython-input-19-2cf4276b061d> in <module>()\r\n----> 1 model_type.show_results(model, valid_ds, detection_threshold=.5)\r\n\r\n9 frames\r\n\r\n/usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/show_results.py in show_results(model, dataset, detection_threshold, num_samples, ncols, denormalize_fn, show, device)\r\n 41 show=show,\r\n 42 detection_threshold=detection_threshold,\r\n---> 43 device=device,\r\n 44 )\r\n 45 \r\n\r\n/usr/local/lib/python3.7/dist-packages/icevision/models/base_show_results.py in base_show_results(predict_fn, model, dataset, num_samples, ncols, denormalize_fn, show, **predict_kwargs)\r\n 19 ) -> None:\r\n 20 records = random.choices(dataset, k=num_samples)\r\n---> 21 preds = predict_fn(model, records, **predict_kwargs)\r\n 22 \r\n 23 show_preds(\r\n\r\n/usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/prediction.py in predict(model, dataset, detection_threshold, keep_images, device)\r\n 48 detection_threshold=detection_threshold,\r\n 49 keep_images=keep_images,\r\n---> 50 device=device,\r\n 51 )\r\n 52 \r\n\r\n/usr/local/lib/python3.7/dist-packages/torch/autograd/grad_mode.py in decorate_context(*args, **kwargs)\r\n 24 def decorate_context(*args, **kwargs):\r\n 25 with self.__class__():\r\n---> 26 return func(*args, **kwargs)\r\n 27 return cast(F, decorate_context)\r\n 28 \r\n\r\n/usr/local/lib/python3.7/dist-packages/icevision/models/ultralytics/yolov5/prediction.py in _predict_batch(model, batch, records, detection_threshold, keep_images, device)\r\n 24 model = model.eval().to(device)\r\n 25 \r\n---> 26 raw_preds = model(batch)[0]\r\n 27 return convert_raw_predictions(\r\n 28 batch=batch,\r\n\r\n/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)\r\n 725 result = self._slow_forward(*input, **kwargs)\r\n 726 else:\r\n--> 727 result = self.forward(*input, **kwargs)\r\n 728 for hook in itertools.chain(\r\n 729 _global_forward_hooks.values(),\r\n\r\n/usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward(self, x, augment, profile)\r\n 121 return torch.cat(y, 1), None # augmented inference, train\r\n 122 else:\r\n--> 123 return self.forward_once(x, profile) # single-scale inference, train\r\n 124 \r\n 125 def forward_once(self, x, profile=False):\r\n\r\n/usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward_once(self, x, profile)\r\n 137 print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))\r\n 138 \r\n--> 139 x = m(x) # run\r\n 140 y.append(x if m.i in self.save else None) # save output\r\n 141 \r\n\r\n/usr/local/lib/python3.7/dist-packages/torch/nn/modules/module.py in _call_impl(self, *input, **kwargs)\r\n 725 result = self._slow_forward(*input, **kwargs)\r\n 726 else:\r\n--> 727 result = self.forward(*input, **kwargs)\r\n 728 for hook in itertools.chain(\r\n 729 _global_forward_hooks.values(),\r\n\r\n/usr/local/lib/python3.7/dist-packages/yolov5/models/yolo.py in forward(self, x)\r\n 52 \r\n 53 y = x[i].sigmoid()\r\n---> 54 y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i]) * self.stride[i] # xy\r\n 55 y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh\r\n 56 z.append(y.view(bs, -1, self.no))\r\n\r\nRuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu!\r\n```\r\n\n", "before_files": [{"content": "__all__ = [\"predict\", \"predict_dl\", \"convert_raw_predictions\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.core import *\nfrom icevision.data import *\nfrom icevision.models.utils import _predict_dl\nfrom icevision.models.ultralytics.yolov5.dataloaders import *\nfrom yolov5.utils.general import non_max_suppression\n\n\[email protected]_grad()\ndef _predict_batch(\n model: nn.Module,\n batch: Sequence[torch.Tensor],\n records: Sequence[BaseRecord],\n detection_threshold: float = 0.25,\n nms_iou_threshold: float = 0.45,\n keep_images: bool = False,\n device: Optional[torch.device] = None,\n) -> List[Prediction]:\n device = device or model_device(model)\n\n batch = batch[0].to(device)\n model = model.eval().to(device)\n\n raw_preds = model(batch)[0]\n return convert_raw_predictions(\n batch=batch,\n raw_preds=raw_preds,\n records=records,\n detection_threshold=detection_threshold,\n nms_iou_threshold=nms_iou_threshold,\n keep_images=keep_images,\n )\n\n\ndef predict(\n model: nn.Module,\n dataset: Dataset,\n detection_threshold: float = 0.25,\n nms_iou_threshold: float = 0.45,\n keep_images: bool = False,\n device: Optional[torch.device] = None,\n) -> List[Prediction]:\n batch, records = build_infer_batch(dataset)\n return _predict_batch(\n model=model,\n batch=batch,\n records=records,\n detection_threshold=detection_threshold,\n nms_iou_threshold=nms_iou_threshold,\n keep_images=keep_images,\n device=device,\n )\n\n\ndef predict_dl(\n model: nn.Module,\n infer_dl: DataLoader,\n show_pbar: bool = True,\n keep_images: bool = False,\n **predict_kwargs,\n):\n return _predict_dl(\n predict_fn=_predict_batch,\n model=model,\n infer_dl=infer_dl,\n show_pbar=show_pbar,\n keep_images=keep_images,\n **predict_kwargs,\n )\n\n\ndef convert_raw_predictions(\n batch,\n raw_preds: torch.Tensor,\n records: Sequence[BaseRecord],\n detection_threshold: float,\n nms_iou_threshold: float,\n keep_images: bool = False,\n) -> List[Prediction]:\n dets = non_max_suppression(\n raw_preds, conf_thres=detection_threshold, iou_thres=nms_iou_threshold\n )\n dets = [d.detach().cpu().numpy() for d in dets]\n preds = []\n for det, record, tensor_image in zip(dets, records, batch):\n\n pred = BaseRecord(\n (\n ScoresRecordComponent(),\n ImageRecordComponent(),\n InstancesLabelsRecordComponent(),\n BBoxesRecordComponent(),\n )\n )\n\n pred.detection.set_class_map(record.detection.class_map)\n pred.detection.set_labels_by_id(det[:, 5].astype(int))\n pred.detection.set_bboxes([BBox.from_xyxy(*xyxy) for xyxy in det[:, :4]])\n pred.detection.set_scores(det[:, 4])\n\n if keep_images:\n record.set_img(tensor_to_image(tensor_image))\n\n preds.append(Prediction(pred=pred, ground_truth=record))\n\n return preds\n", "path": "icevision/models/ultralytics/yolov5/prediction.py"}], "after_files": [{"content": "__all__ = [\"predict\", \"predict_dl\", \"convert_raw_predictions\"]\n\nfrom icevision.imports import *\nfrom icevision.utils import *\nfrom icevision.core import *\nfrom icevision.data import *\nfrom icevision.models.utils import _predict_dl\nfrom icevision.models.ultralytics.yolov5.dataloaders import *\nfrom yolov5.utils.general import non_max_suppression\n\n\[email protected]_grad()\ndef _predict_batch(\n model: nn.Module,\n batch: Sequence[torch.Tensor],\n records: Sequence[BaseRecord],\n detection_threshold: float = 0.25,\n nms_iou_threshold: float = 0.45,\n keep_images: bool = False,\n device: Optional[torch.device] = None,\n) -> List[Prediction]:\n # device issue addressed on discord: https://discord.com/channels/735877944085446747/770279401791160400/832361687855923250\n if device is not None:\n raise ValueError(\n \"For YOLOv5 device can only be specified during model creation, \"\n \"for more info take a look at the discussion here: \"\n \"https://discord.com/channels/735877944085446747/770279401791160400/832361687855923250\"\n )\n grid = model.model[-1].grid[-1]\n # if `grid.numel() == 1` it means the grid isn't initialized yet and we can't\n # trust it's device (will always be CPU)\n device = grid.device if grid.numel() > 1 else model_device(model)\n\n batch = batch[0].to(device)\n model = model.eval().to(device)\n\n raw_preds = model(batch)[0]\n return convert_raw_predictions(\n batch=batch,\n raw_preds=raw_preds,\n records=records,\n detection_threshold=detection_threshold,\n nms_iou_threshold=nms_iou_threshold,\n keep_images=keep_images,\n )\n\n\ndef predict(\n model: nn.Module,\n dataset: Dataset,\n detection_threshold: float = 0.25,\n nms_iou_threshold: float = 0.45,\n keep_images: bool = False,\n device: Optional[torch.device] = None,\n) -> List[Prediction]:\n batch, records = build_infer_batch(dataset)\n return _predict_batch(\n model=model,\n batch=batch,\n records=records,\n detection_threshold=detection_threshold,\n nms_iou_threshold=nms_iou_threshold,\n keep_images=keep_images,\n device=device,\n )\n\n\ndef predict_dl(\n model: nn.Module,\n infer_dl: DataLoader,\n show_pbar: bool = True,\n keep_images: bool = False,\n **predict_kwargs,\n):\n return _predict_dl(\n predict_fn=_predict_batch,\n model=model,\n infer_dl=infer_dl,\n show_pbar=show_pbar,\n keep_images=keep_images,\n **predict_kwargs,\n )\n\n\ndef convert_raw_predictions(\n batch,\n raw_preds: torch.Tensor,\n records: Sequence[BaseRecord],\n detection_threshold: float,\n nms_iou_threshold: float,\n keep_images: bool = False,\n) -> List[Prediction]:\n dets = non_max_suppression(\n raw_preds, conf_thres=detection_threshold, iou_thres=nms_iou_threshold\n )\n dets = [d.detach().cpu().numpy() for d in dets]\n preds = []\n for det, record, tensor_image in zip(dets, records, batch):\n\n pred = BaseRecord(\n (\n ScoresRecordComponent(),\n ImageRecordComponent(),\n InstancesLabelsRecordComponent(),\n BBoxesRecordComponent(),\n )\n )\n\n pred.detection.set_class_map(record.detection.class_map)\n pred.detection.set_labels_by_id(det[:, 5].astype(int))\n pred.detection.set_bboxes([BBox.from_xyxy(*xyxy) for xyxy in det[:, :4]])\n pred.detection.set_scores(det[:, 4])\n\n if keep_images:\n record.set_img(tensor_to_image(tensor_image))\n\n preds.append(Prediction(pred=pred, ground_truth=record))\n\n return preds\n", "path": "icevision/models/ultralytics/yolov5/prediction.py"}]}
2,393
406
gh_patches_debug_6739
rasdani/github-patches
git_diff
TheAlgorithms__Python-5734
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Rewrite *other* fibonacci.py I opened issue #5665 to have `fibonacci.py` rewritten, but apparently there are multiple files with that name. The PR that @citharus made (#5677) revamps the file `dynamic_programming/fibonacci.py` (thanks for your contributions btw!) whereas this issue seeks to revamp the file `maths/fibonacci.py`. I'm opening this as a new issue since it's technically a different algorithm file and the two `fibonacci.py` files each use different algorithms to calculate the Fibonacci sequence. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `maths/fibonacci.py` Content: ``` 1 # fibonacci.py 2 """ 3 Calculates the Fibonacci sequence using iteration, recursion, and a simplified 4 form of Binet's formula 5 6 NOTE 1: the iterative and recursive functions are more accurate than the Binet's 7 formula function because the iterative function doesn't use floats 8 9 NOTE 2: the Binet's formula function is much more limited in the size of inputs 10 that it can handle due to the size limitations of Python floats 11 """ 12 13 from math import sqrt 14 from time import time 15 16 17 def time_func(func, *args, **kwargs): 18 """ 19 Times the execution of a function with parameters 20 """ 21 start = time() 22 output = func(*args, **kwargs) 23 end = time() 24 if int(end - start) > 0: 25 print(f"{func.__name__} runtime: {(end - start):0.4f} s") 26 else: 27 print(f"{func.__name__} runtime: {(end - start) * 1000:0.4f} ms") 28 return output 29 30 31 def fib_iterative(n: int) -> list[int]: 32 """ 33 Calculates the first n (0-indexed) Fibonacci numbers using iteration 34 >>> fib_iterative(0) 35 [0] 36 >>> fib_iterative(1) 37 [0, 1] 38 >>> fib_iterative(5) 39 [0, 1, 1, 2, 3, 5] 40 >>> fib_iterative(10) 41 [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] 42 >>> fib_iterative(-1) 43 Traceback (most recent call last): 44 ... 45 Exception: n is negative 46 """ 47 if n < 0: 48 raise Exception("n is negative") 49 if n == 0: 50 return [0] 51 fib = [0, 1] 52 for _ in range(n - 1): 53 fib.append(fib[-1] + fib[-2]) 54 return fib 55 56 57 def fib_recursive(n: int) -> list[int]: 58 """ 59 Calculates the first n (0-indexed) Fibonacci numbers using recursion 60 >>> fib_iterative(0) 61 [0] 62 >>> fib_iterative(1) 63 [0, 1] 64 >>> fib_iterative(5) 65 [0, 1, 1, 2, 3, 5] 66 >>> fib_iterative(10) 67 [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] 68 >>> fib_iterative(-1) 69 Traceback (most recent call last): 70 ... 71 Exception: n is negative 72 """ 73 74 def fib_recursive_term(i: int) -> int: 75 """ 76 Calculates the i-th (0-indexed) Fibonacci number using recursion 77 """ 78 if i < 0: 79 raise Exception("n is negative") 80 if i < 2: 81 return i 82 return fib_recursive_term(i - 1) + fib_recursive_term(i - 2) 83 84 if n < 0: 85 raise Exception("n is negative") 86 return [fib_recursive_term(i) for i in range(n + 1)] 87 88 89 def fib_binet(n: int) -> list[int]: 90 """ 91 Calculates the first n (0-indexed) Fibonacci numbers using a simplified form 92 of Binet's formula: 93 https://en.m.wikipedia.org/wiki/Fibonacci_number#Computation_by_rounding 94 95 NOTE 1: this function diverges from fib_iterative at around n = 71, likely 96 due to compounding floating-point arithmetic errors 97 98 NOTE 2: this function overflows on n >= 1475 because of the size limitations 99 of Python floats 100 >>> fib_binet(0) 101 [0] 102 >>> fib_binet(1) 103 [0, 1] 104 >>> fib_binet(5) 105 [0, 1, 1, 2, 3, 5] 106 >>> fib_binet(10) 107 [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55] 108 >>> fib_binet(-1) 109 Traceback (most recent call last): 110 ... 111 Exception: n is negative 112 >>> fib_binet(1475) 113 Traceback (most recent call last): 114 ... 115 Exception: n is too large 116 """ 117 if n < 0: 118 raise Exception("n is negative") 119 if n >= 1475: 120 raise Exception("n is too large") 121 sqrt_5 = sqrt(5) 122 phi = (1 + sqrt_5) / 2 123 return [round(phi ** i / sqrt_5) for i in range(n + 1)] 124 125 126 if __name__ == "__main__": 127 num = 20 128 time_func(fib_iterative, num) 129 time_func(fib_recursive, num) 130 time_func(fib_binet, num) 131 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/maths/fibonacci.py b/maths/fibonacci.py --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -95,8 +95,8 @@ NOTE 1: this function diverges from fib_iterative at around n = 71, likely due to compounding floating-point arithmetic errors - NOTE 2: this function overflows on n >= 1475 because of the size limitations - of Python floats + NOTE 2: this function doesn't accept n >= 1475 because it overflows + thereafter due to the size limitations of Python floats >>> fib_binet(0) [0] >>> fib_binet(1)
{"golden_diff": "diff --git a/maths/fibonacci.py b/maths/fibonacci.py\n--- a/maths/fibonacci.py\n+++ b/maths/fibonacci.py\n@@ -95,8 +95,8 @@\n NOTE 1: this function diverges from fib_iterative at around n = 71, likely\n due to compounding floating-point arithmetic errors\n \n- NOTE 2: this function overflows on n >= 1475 because of the size limitations\n- of Python floats\n+ NOTE 2: this function doesn't accept n >= 1475 because it overflows\n+ thereafter due to the size limitations of Python floats\n >>> fib_binet(0)\n [0]\n >>> fib_binet(1)\n", "issue": "Rewrite *other* fibonacci.py\nI opened issue #5665 to have `fibonacci.py` rewritten, but apparently there are multiple files with that name. The PR that @citharus made (#5677) revamps the file `dynamic_programming/fibonacci.py` (thanks for your contributions btw!) whereas this issue seeks to revamp the file `maths/fibonacci.py`.\r\n\r\nI'm opening this as a new issue since it's technically a different algorithm file and the two `fibonacci.py` files each use different algorithms to calculate the Fibonacci sequence.\n", "before_files": [{"content": "# fibonacci.py\n\"\"\"\nCalculates the Fibonacci sequence using iteration, recursion, and a simplified\nform of Binet's formula\n\nNOTE 1: the iterative and recursive functions are more accurate than the Binet's\nformula function because the iterative function doesn't use floats\n\nNOTE 2: the Binet's formula function is much more limited in the size of inputs\nthat it can handle due to the size limitations of Python floats\n\"\"\"\n\nfrom math import sqrt\nfrom time import time\n\n\ndef time_func(func, *args, **kwargs):\n \"\"\"\n Times the execution of a function with parameters\n \"\"\"\n start = time()\n output = func(*args, **kwargs)\n end = time()\n if int(end - start) > 0:\n print(f\"{func.__name__} runtime: {(end - start):0.4f} s\")\n else:\n print(f\"{func.__name__} runtime: {(end - start) * 1000:0.4f} ms\")\n return output\n\n\ndef fib_iterative(n: int) -> list[int]:\n \"\"\"\n Calculates the first n (0-indexed) Fibonacci numbers using iteration\n >>> fib_iterative(0)\n [0]\n >>> fib_iterative(1)\n [0, 1]\n >>> fib_iterative(5)\n [0, 1, 1, 2, 3, 5]\n >>> fib_iterative(10)\n [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n >>> fib_iterative(-1)\n Traceback (most recent call last):\n ...\n Exception: n is negative\n \"\"\"\n if n < 0:\n raise Exception(\"n is negative\")\n if n == 0:\n return [0]\n fib = [0, 1]\n for _ in range(n - 1):\n fib.append(fib[-1] + fib[-2])\n return fib\n\n\ndef fib_recursive(n: int) -> list[int]:\n \"\"\"\n Calculates the first n (0-indexed) Fibonacci numbers using recursion\n >>> fib_iterative(0)\n [0]\n >>> fib_iterative(1)\n [0, 1]\n >>> fib_iterative(5)\n [0, 1, 1, 2, 3, 5]\n >>> fib_iterative(10)\n [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n >>> fib_iterative(-1)\n Traceback (most recent call last):\n ...\n Exception: n is negative\n \"\"\"\n\n def fib_recursive_term(i: int) -> int:\n \"\"\"\n Calculates the i-th (0-indexed) Fibonacci number using recursion\n \"\"\"\n if i < 0:\n raise Exception(\"n is negative\")\n if i < 2:\n return i\n return fib_recursive_term(i - 1) + fib_recursive_term(i - 2)\n\n if n < 0:\n raise Exception(\"n is negative\")\n return [fib_recursive_term(i) for i in range(n + 1)]\n\n\ndef fib_binet(n: int) -> list[int]:\n \"\"\"\n Calculates the first n (0-indexed) Fibonacci numbers using a simplified form\n of Binet's formula:\n https://en.m.wikipedia.org/wiki/Fibonacci_number#Computation_by_rounding\n\n NOTE 1: this function diverges from fib_iterative at around n = 71, likely\n due to compounding floating-point arithmetic errors\n\n NOTE 2: this function overflows on n >= 1475 because of the size limitations\n of Python floats\n >>> fib_binet(0)\n [0]\n >>> fib_binet(1)\n [0, 1]\n >>> fib_binet(5)\n [0, 1, 1, 2, 3, 5]\n >>> fib_binet(10)\n [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n >>> fib_binet(-1)\n Traceback (most recent call last):\n ...\n Exception: n is negative\n >>> fib_binet(1475)\n Traceback (most recent call last):\n ...\n Exception: n is too large\n \"\"\"\n if n < 0:\n raise Exception(\"n is negative\")\n if n >= 1475:\n raise Exception(\"n is too large\")\n sqrt_5 = sqrt(5)\n phi = (1 + sqrt_5) / 2\n return [round(phi ** i / sqrt_5) for i in range(n + 1)]\n\n\nif __name__ == \"__main__\":\n num = 20\n time_func(fib_iterative, num)\n time_func(fib_recursive, num)\n time_func(fib_binet, num)\n", "path": "maths/fibonacci.py"}], "after_files": [{"content": "# fibonacci.py\n\"\"\"\nCalculates the Fibonacci sequence using iteration, recursion, and a simplified\nform of Binet's formula\n\nNOTE 1: the iterative and recursive functions are more accurate than the Binet's\nformula function because the iterative function doesn't use floats\n\nNOTE 2: the Binet's formula function is much more limited in the size of inputs\nthat it can handle due to the size limitations of Python floats\n\"\"\"\n\nfrom math import sqrt\nfrom time import time\n\n\ndef time_func(func, *args, **kwargs):\n \"\"\"\n Times the execution of a function with parameters\n \"\"\"\n start = time()\n output = func(*args, **kwargs)\n end = time()\n if int(end - start) > 0:\n print(f\"{func.__name__} runtime: {(end - start):0.4f} s\")\n else:\n print(f\"{func.__name__} runtime: {(end - start) * 1000:0.4f} ms\")\n return output\n\n\ndef fib_iterative(n: int) -> list[int]:\n \"\"\"\n Calculates the first n (0-indexed) Fibonacci numbers using iteration\n >>> fib_iterative(0)\n [0]\n >>> fib_iterative(1)\n [0, 1]\n >>> fib_iterative(5)\n [0, 1, 1, 2, 3, 5]\n >>> fib_iterative(10)\n [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n >>> fib_iterative(-1)\n Traceback (most recent call last):\n ...\n Exception: n is negative\n \"\"\"\n if n < 0:\n raise Exception(\"n is negative\")\n if n == 0:\n return [0]\n fib = [0, 1]\n for _ in range(n - 1):\n fib.append(fib[-1] + fib[-2])\n return fib\n\n\ndef fib_recursive(n: int) -> list[int]:\n \"\"\"\n Calculates the first n (0-indexed) Fibonacci numbers using recursion\n >>> fib_iterative(0)\n [0]\n >>> fib_iterative(1)\n [0, 1]\n >>> fib_iterative(5)\n [0, 1, 1, 2, 3, 5]\n >>> fib_iterative(10)\n [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n >>> fib_iterative(-1)\n Traceback (most recent call last):\n ...\n Exception: n is negative\n \"\"\"\n\n def fib_recursive_term(i: int) -> int:\n \"\"\"\n Calculates the i-th (0-indexed) Fibonacci number using recursion\n \"\"\"\n if i < 0:\n raise Exception(\"n is negative\")\n if i < 2:\n return i\n return fib_recursive_term(i - 1) + fib_recursive_term(i - 2)\n\n if n < 0:\n raise Exception(\"n is negative\")\n return [fib_recursive_term(i) for i in range(n + 1)]\n\n\ndef fib_binet(n: int) -> list[int]:\n \"\"\"\n Calculates the first n (0-indexed) Fibonacci numbers using a simplified form\n of Binet's formula:\n https://en.m.wikipedia.org/wiki/Fibonacci_number#Computation_by_rounding\n\n NOTE 1: this function diverges from fib_iterative at around n = 71, likely\n due to compounding floating-point arithmetic errors\n\n NOTE 2: this function doesn't accept n >= 1475 because it overflows\n thereafter due to the size limitations of Python floats\n >>> fib_binet(0)\n [0]\n >>> fib_binet(1)\n [0, 1]\n >>> fib_binet(5)\n [0, 1, 1, 2, 3, 5]\n >>> fib_binet(10)\n [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55]\n >>> fib_binet(-1)\n Traceback (most recent call last):\n ...\n Exception: n is negative\n >>> fib_binet(1475)\n Traceback (most recent call last):\n ...\n Exception: n is too large\n \"\"\"\n if n < 0:\n raise Exception(\"n is negative\")\n if n >= 1475:\n raise Exception(\"n is too large\")\n sqrt_5 = sqrt(5)\n phi = (1 + sqrt_5) / 2\n return [round(phi ** i / sqrt_5) for i in range(n + 1)]\n\n\nif __name__ == \"__main__\":\n num = 20\n time_func(fib_iterative, num)\n time_func(fib_recursive, num)\n time_func(fib_binet, num)\n", "path": "maths/fibonacci.py"}]}
1,783
166
gh_patches_debug_32824
rasdani/github-patches
git_diff
mozilla__pontoon-3056
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Project+locale AJAX endpoint logic is not uniform At the moment, `/:code/:slug/ajax/` and `/:code/:slug/ajax/tags/` check if the locale & project exist and are visible to the user, while `/:code/:slug/ajax/contributors/` and `/:code/:slug/ajax/insights/` check that the ProjectLocale exists. [All of these](https://github.com/mozilla/pontoon/blob/master/pontoon/localizations/views.py) should check both conditions. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pontoon/localizations/views.py` Content: ``` 1 import math 2 from operator import attrgetter 3 from django.conf import settings 4 from django.core.exceptions import ImproperlyConfigured 5 from django.db.models import Q 6 from django.http import Http404, HttpResponseRedirect 7 from django.shortcuts import get_object_or_404, render 8 from django.views.generic.detail import DetailView 9 10 from pontoon.base.models import ( 11 Locale, 12 Project, 13 ProjectLocale, 14 TranslatedResource, 15 ) 16 from pontoon.base.utils import ( 17 require_AJAX, 18 get_project_or_redirect, 19 get_locale_or_redirect, 20 ) 21 from pontoon.contributors.views import ContributorsMixin 22 from pontoon.insights.utils import get_insights 23 from pontoon.tags.utils import TagsTool 24 25 26 def localization(request, code, slug): 27 """Locale-project overview.""" 28 locale = get_locale_or_redirect( 29 code, "pontoon.localizations.localization", "code", slug=slug 30 ) 31 if isinstance(locale, HttpResponseRedirect): 32 return locale 33 34 project = get_project_or_redirect( 35 slug, "pontoon.localizations.localization", "slug", request.user, code=code 36 ) 37 if isinstance(project, HttpResponseRedirect): 38 return project 39 40 project_locale = get_object_or_404( 41 ProjectLocale, 42 locale=locale, 43 project=project, 44 ) 45 46 resource_count = len(locale.parts_stats(project)) - 1 47 48 return render( 49 request, 50 "localizations/localization.html", 51 { 52 "locale": locale, 53 "project": project, 54 "project_locale": project_locale, 55 "resource_count": resource_count, 56 "tags_count": ( 57 project.tag_set.filter(resources__isnull=False).distinct().count() 58 if project.tags_enabled 59 else None 60 ), 61 }, 62 ) 63 64 65 @require_AJAX 66 def ajax_resources(request, code, slug): 67 """Resources tab.""" 68 locale = get_object_or_404(Locale, code=code) 69 project = get_object_or_404( 70 Project.objects.visible_for(request.user).available(), 71 slug=slug, 72 ) 73 74 # Amend the parts dict with latest activity info. 75 translatedresources_qs = TranslatedResource.objects.filter( 76 resource__project=project, locale=locale 77 ).prefetch_related("resource", "latest_translation__user") 78 79 if not len(translatedresources_qs): 80 raise Http404 81 82 translatedresources = {s.resource.path: s for s in translatedresources_qs} 83 translatedresources = dict(list(translatedresources.items())) 84 parts = locale.parts_stats(project) 85 86 resource_priority_map = project.resource_priority_map() 87 88 for part in parts: 89 part["resource__priority"] = resource_priority_map.get(part["title"], None) 90 91 translatedresource = translatedresources.get(part["title"], None) 92 if translatedresource and translatedresource.latest_translation: 93 part[ 94 "latest_activity" 95 ] = translatedresource.latest_translation.latest_activity 96 else: 97 part["latest_activity"] = None 98 99 part["chart"] = { 100 "unreviewed_strings": part["unreviewed_strings"], 101 "pretranslated_strings": part["pretranslated_strings"], 102 "strings_with_errors": part["strings_with_errors"], 103 "strings_with_warnings": part["strings_with_warnings"], 104 "total_strings": part["resource__total_strings"], 105 "approved_strings": part["approved_strings"], 106 "approved_share": round( 107 part["approved_strings"] / part["resource__total_strings"] * 100 108 ), 109 "unreviewed_share": round( 110 part["unreviewed_strings"] / part["resource__total_strings"] * 100 111 ), 112 "pretranslated_share": round( 113 part["pretranslated_strings"] / part["resource__total_strings"] * 100 114 ), 115 "errors_share": round( 116 part["strings_with_errors"] / part["resource__total_strings"] * 100 117 ), 118 "warnings_share": round( 119 part["strings_with_warnings"] / part["resource__total_strings"] * 100 120 ), 121 "completion_percent": int( 122 math.floor( 123 ( 124 part["approved_strings"] 125 + part["pretranslated_strings"] 126 + part["strings_with_warnings"] 127 ) 128 / part["resource__total_strings"] 129 * 100 130 ) 131 ), 132 } 133 134 return render( 135 request, 136 "localizations/includes/resources.html", 137 { 138 "locale": locale, 139 "project": project, 140 "resources": parts, 141 "deadline": any(part["resource__deadline"] for part in parts), 142 "priority": any(part["resource__priority"] for part in parts), 143 }, 144 ) 145 146 147 @require_AJAX 148 def ajax_tags(request, code, slug): 149 """Tags tab.""" 150 locale = get_object_or_404(Locale, code=code) 151 project = get_object_or_404(Project.objects.visible_for(request.user), slug=slug) 152 153 if not project.tags_enabled: 154 raise Http404 155 156 tags_tool = TagsTool( 157 locales=[locale], 158 projects=[project], 159 priority=True, 160 ) 161 162 tags = sorted(tags_tool, key=attrgetter("priority"), reverse=True) 163 164 return render( 165 request, 166 "localizations/includes/tags.html", 167 {"locale": locale, "project": project, "tags": tags}, 168 ) 169 170 171 @require_AJAX 172 def ajax_insights(request, code, slug): 173 """Insights tab.""" 174 if not settings.ENABLE_INSIGHTS: 175 raise ImproperlyConfigured("ENABLE_INSIGHTS variable not set in settings.") 176 177 pl = get_object_or_404(ProjectLocale, locale__code=code, project__slug=slug) 178 insights = get_insights(locale=pl.locale, project=pl.project) 179 180 return render(request, "localizations/includes/insights.html", insights) 181 182 183 class LocalizationContributorsView(ContributorsMixin, DetailView): 184 """ 185 Renders view of contributors for the localization. 186 """ 187 188 template_name = "localizations/includes/contributors.html" 189 190 def get_object(self): 191 return get_object_or_404( 192 ProjectLocale, 193 locale__code=self.kwargs["code"], 194 project__slug=self.kwargs["slug"], 195 ) 196 197 def get_context_object_name(self, obj): 198 return "projectlocale" 199 200 def contributors_filter(self, **kwargs): 201 return Q( 202 entity__resource__project=self.object.project, locale=self.object.locale 203 ) 204 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pontoon/localizations/views.py b/pontoon/localizations/views.py --- a/pontoon/localizations/views.py +++ b/pontoon/localizations/views.py @@ -71,6 +71,9 @@ slug=slug, ) + # Check if ProjectLocale exists + get_object_or_404(ProjectLocale, locale=locale, project=project) + # Amend the parts dict with latest activity info. translatedresources_qs = TranslatedResource.objects.filter( resource__project=project, locale=locale @@ -150,6 +153,9 @@ locale = get_object_or_404(Locale, code=code) project = get_object_or_404(Project.objects.visible_for(request.user), slug=slug) + # Check if ProjectLocale exists + get_object_or_404(ProjectLocale, locale=locale, project=project) + if not project.tags_enabled: raise Http404 @@ -174,6 +180,8 @@ if not settings.ENABLE_INSIGHTS: raise ImproperlyConfigured("ENABLE_INSIGHTS variable not set in settings.") + get_object_or_404(Locale, code=code) + get_object_or_404(Project.objects.visible_for(request.user), slug=slug) pl = get_object_or_404(ProjectLocale, locale__code=code, project__slug=slug) insights = get_insights(locale=pl.locale, project=pl.project) @@ -188,6 +196,10 @@ template_name = "localizations/includes/contributors.html" def get_object(self): + get_object_or_404(Locale, code=self.kwargs["code"]) + get_object_or_404( + Project.objects.visible_for(self.request.user), slug=self.kwargs["slug"] + ) return get_object_or_404( ProjectLocale, locale__code=self.kwargs["code"],
{"golden_diff": "diff --git a/pontoon/localizations/views.py b/pontoon/localizations/views.py\n--- a/pontoon/localizations/views.py\n+++ b/pontoon/localizations/views.py\n@@ -71,6 +71,9 @@\n slug=slug,\n )\n \n+ # Check if ProjectLocale exists\n+ get_object_or_404(ProjectLocale, locale=locale, project=project)\n+\n # Amend the parts dict with latest activity info.\n translatedresources_qs = TranslatedResource.objects.filter(\n resource__project=project, locale=locale\n@@ -150,6 +153,9 @@\n locale = get_object_or_404(Locale, code=code)\n project = get_object_or_404(Project.objects.visible_for(request.user), slug=slug)\n \n+ # Check if ProjectLocale exists\n+ get_object_or_404(ProjectLocale, locale=locale, project=project)\n+\n if not project.tags_enabled:\n raise Http404\n \n@@ -174,6 +180,8 @@\n if not settings.ENABLE_INSIGHTS:\n raise ImproperlyConfigured(\"ENABLE_INSIGHTS variable not set in settings.\")\n \n+ get_object_or_404(Locale, code=code)\n+ get_object_or_404(Project.objects.visible_for(request.user), slug=slug)\n pl = get_object_or_404(ProjectLocale, locale__code=code, project__slug=slug)\n insights = get_insights(locale=pl.locale, project=pl.project)\n \n@@ -188,6 +196,10 @@\n template_name = \"localizations/includes/contributors.html\"\n \n def get_object(self):\n+ get_object_or_404(Locale, code=self.kwargs[\"code\"])\n+ get_object_or_404(\n+ Project.objects.visible_for(self.request.user), slug=self.kwargs[\"slug\"]\n+ )\n return get_object_or_404(\n ProjectLocale,\n locale__code=self.kwargs[\"code\"],\n", "issue": "Project+locale AJAX endpoint logic is not uniform\nAt the moment, `/:code/:slug/ajax/` and `/:code/:slug/ajax/tags/` check if the locale & project exist and are visible to the user, while `/:code/:slug/ajax/contributors/` and `/:code/:slug/ajax/insights/` check that the ProjectLocale exists. [All of these](https://github.com/mozilla/pontoon/blob/master/pontoon/localizations/views.py) should check both conditions.\n", "before_files": [{"content": "import math\nfrom operator import attrgetter\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db.models import Q\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.generic.detail import DetailView\n\nfrom pontoon.base.models import (\n Locale,\n Project,\n ProjectLocale,\n TranslatedResource,\n)\nfrom pontoon.base.utils import (\n require_AJAX,\n get_project_or_redirect,\n get_locale_or_redirect,\n)\nfrom pontoon.contributors.views import ContributorsMixin\nfrom pontoon.insights.utils import get_insights\nfrom pontoon.tags.utils import TagsTool\n\n\ndef localization(request, code, slug):\n \"\"\"Locale-project overview.\"\"\"\n locale = get_locale_or_redirect(\n code, \"pontoon.localizations.localization\", \"code\", slug=slug\n )\n if isinstance(locale, HttpResponseRedirect):\n return locale\n\n project = get_project_or_redirect(\n slug, \"pontoon.localizations.localization\", \"slug\", request.user, code=code\n )\n if isinstance(project, HttpResponseRedirect):\n return project\n\n project_locale = get_object_or_404(\n ProjectLocale,\n locale=locale,\n project=project,\n )\n\n resource_count = len(locale.parts_stats(project)) - 1\n\n return render(\n request,\n \"localizations/localization.html\",\n {\n \"locale\": locale,\n \"project\": project,\n \"project_locale\": project_locale,\n \"resource_count\": resource_count,\n \"tags_count\": (\n project.tag_set.filter(resources__isnull=False).distinct().count()\n if project.tags_enabled\n else None\n ),\n },\n )\n\n\n@require_AJAX\ndef ajax_resources(request, code, slug):\n \"\"\"Resources tab.\"\"\"\n locale = get_object_or_404(Locale, code=code)\n project = get_object_or_404(\n Project.objects.visible_for(request.user).available(),\n slug=slug,\n )\n\n # Amend the parts dict with latest activity info.\n translatedresources_qs = TranslatedResource.objects.filter(\n resource__project=project, locale=locale\n ).prefetch_related(\"resource\", \"latest_translation__user\")\n\n if not len(translatedresources_qs):\n raise Http404\n\n translatedresources = {s.resource.path: s for s in translatedresources_qs}\n translatedresources = dict(list(translatedresources.items()))\n parts = locale.parts_stats(project)\n\n resource_priority_map = project.resource_priority_map()\n\n for part in parts:\n part[\"resource__priority\"] = resource_priority_map.get(part[\"title\"], None)\n\n translatedresource = translatedresources.get(part[\"title\"], None)\n if translatedresource and translatedresource.latest_translation:\n part[\n \"latest_activity\"\n ] = translatedresource.latest_translation.latest_activity\n else:\n part[\"latest_activity\"] = None\n\n part[\"chart\"] = {\n \"unreviewed_strings\": part[\"unreviewed_strings\"],\n \"pretranslated_strings\": part[\"pretranslated_strings\"],\n \"strings_with_errors\": part[\"strings_with_errors\"],\n \"strings_with_warnings\": part[\"strings_with_warnings\"],\n \"total_strings\": part[\"resource__total_strings\"],\n \"approved_strings\": part[\"approved_strings\"],\n \"approved_share\": round(\n part[\"approved_strings\"] / part[\"resource__total_strings\"] * 100\n ),\n \"unreviewed_share\": round(\n part[\"unreviewed_strings\"] / part[\"resource__total_strings\"] * 100\n ),\n \"pretranslated_share\": round(\n part[\"pretranslated_strings\"] / part[\"resource__total_strings\"] * 100\n ),\n \"errors_share\": round(\n part[\"strings_with_errors\"] / part[\"resource__total_strings\"] * 100\n ),\n \"warnings_share\": round(\n part[\"strings_with_warnings\"] / part[\"resource__total_strings\"] * 100\n ),\n \"completion_percent\": int(\n math.floor(\n (\n part[\"approved_strings\"]\n + part[\"pretranslated_strings\"]\n + part[\"strings_with_warnings\"]\n )\n / part[\"resource__total_strings\"]\n * 100\n )\n ),\n }\n\n return render(\n request,\n \"localizations/includes/resources.html\",\n {\n \"locale\": locale,\n \"project\": project,\n \"resources\": parts,\n \"deadline\": any(part[\"resource__deadline\"] for part in parts),\n \"priority\": any(part[\"resource__priority\"] for part in parts),\n },\n )\n\n\n@require_AJAX\ndef ajax_tags(request, code, slug):\n \"\"\"Tags tab.\"\"\"\n locale = get_object_or_404(Locale, code=code)\n project = get_object_or_404(Project.objects.visible_for(request.user), slug=slug)\n\n if not project.tags_enabled:\n raise Http404\n\n tags_tool = TagsTool(\n locales=[locale],\n projects=[project],\n priority=True,\n )\n\n tags = sorted(tags_tool, key=attrgetter(\"priority\"), reverse=True)\n\n return render(\n request,\n \"localizations/includes/tags.html\",\n {\"locale\": locale, \"project\": project, \"tags\": tags},\n )\n\n\n@require_AJAX\ndef ajax_insights(request, code, slug):\n \"\"\"Insights tab.\"\"\"\n if not settings.ENABLE_INSIGHTS:\n raise ImproperlyConfigured(\"ENABLE_INSIGHTS variable not set in settings.\")\n\n pl = get_object_or_404(ProjectLocale, locale__code=code, project__slug=slug)\n insights = get_insights(locale=pl.locale, project=pl.project)\n\n return render(request, \"localizations/includes/insights.html\", insights)\n\n\nclass LocalizationContributorsView(ContributorsMixin, DetailView):\n \"\"\"\n Renders view of contributors for the localization.\n \"\"\"\n\n template_name = \"localizations/includes/contributors.html\"\n\n def get_object(self):\n return get_object_or_404(\n ProjectLocale,\n locale__code=self.kwargs[\"code\"],\n project__slug=self.kwargs[\"slug\"],\n )\n\n def get_context_object_name(self, obj):\n return \"projectlocale\"\n\n def contributors_filter(self, **kwargs):\n return Q(\n entity__resource__project=self.object.project, locale=self.object.locale\n )\n", "path": "pontoon/localizations/views.py"}], "after_files": [{"content": "import math\nfrom operator import attrgetter\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db.models import Q\nfrom django.http import Http404, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.views.generic.detail import DetailView\n\nfrom pontoon.base.models import (\n Locale,\n Project,\n ProjectLocale,\n TranslatedResource,\n)\nfrom pontoon.base.utils import (\n require_AJAX,\n get_project_or_redirect,\n get_locale_or_redirect,\n)\nfrom pontoon.contributors.views import ContributorsMixin\nfrom pontoon.insights.utils import get_insights\nfrom pontoon.tags.utils import TagsTool\n\n\ndef localization(request, code, slug):\n \"\"\"Locale-project overview.\"\"\"\n locale = get_locale_or_redirect(\n code, \"pontoon.localizations.localization\", \"code\", slug=slug\n )\n if isinstance(locale, HttpResponseRedirect):\n return locale\n\n project = get_project_or_redirect(\n slug, \"pontoon.localizations.localization\", \"slug\", request.user, code=code\n )\n if isinstance(project, HttpResponseRedirect):\n return project\n\n project_locale = get_object_or_404(\n ProjectLocale,\n locale=locale,\n project=project,\n )\n\n resource_count = len(locale.parts_stats(project)) - 1\n\n return render(\n request,\n \"localizations/localization.html\",\n {\n \"locale\": locale,\n \"project\": project,\n \"project_locale\": project_locale,\n \"resource_count\": resource_count,\n \"tags_count\": (\n project.tag_set.filter(resources__isnull=False).distinct().count()\n if project.tags_enabled\n else None\n ),\n },\n )\n\n\n@require_AJAX\ndef ajax_resources(request, code, slug):\n \"\"\"Resources tab.\"\"\"\n locale = get_object_or_404(Locale, code=code)\n project = get_object_or_404(\n Project.objects.visible_for(request.user).available(),\n slug=slug,\n )\n\n # Check if ProjectLocale exists\n get_object_or_404(ProjectLocale, locale=locale, project=project)\n\n # Amend the parts dict with latest activity info.\n translatedresources_qs = TranslatedResource.objects.filter(\n resource__project=project, locale=locale\n ).prefetch_related(\"resource\", \"latest_translation__user\")\n\n if not len(translatedresources_qs):\n raise Http404\n\n translatedresources = {s.resource.path: s for s in translatedresources_qs}\n translatedresources = dict(list(translatedresources.items()))\n parts = locale.parts_stats(project)\n\n resource_priority_map = project.resource_priority_map()\n\n for part in parts:\n part[\"resource__priority\"] = resource_priority_map.get(part[\"title\"], None)\n\n translatedresource = translatedresources.get(part[\"title\"], None)\n if translatedresource and translatedresource.latest_translation:\n part[\n \"latest_activity\"\n ] = translatedresource.latest_translation.latest_activity\n else:\n part[\"latest_activity\"] = None\n\n part[\"chart\"] = {\n \"unreviewed_strings\": part[\"unreviewed_strings\"],\n \"pretranslated_strings\": part[\"pretranslated_strings\"],\n \"strings_with_errors\": part[\"strings_with_errors\"],\n \"strings_with_warnings\": part[\"strings_with_warnings\"],\n \"total_strings\": part[\"resource__total_strings\"],\n \"approved_strings\": part[\"approved_strings\"],\n \"approved_share\": round(\n part[\"approved_strings\"] / part[\"resource__total_strings\"] * 100\n ),\n \"unreviewed_share\": round(\n part[\"unreviewed_strings\"] / part[\"resource__total_strings\"] * 100\n ),\n \"pretranslated_share\": round(\n part[\"pretranslated_strings\"] / part[\"resource__total_strings\"] * 100\n ),\n \"errors_share\": round(\n part[\"strings_with_errors\"] / part[\"resource__total_strings\"] * 100\n ),\n \"warnings_share\": round(\n part[\"strings_with_warnings\"] / part[\"resource__total_strings\"] * 100\n ),\n \"completion_percent\": int(\n math.floor(\n (\n part[\"approved_strings\"]\n + part[\"pretranslated_strings\"]\n + part[\"strings_with_warnings\"]\n )\n / part[\"resource__total_strings\"]\n * 100\n )\n ),\n }\n\n return render(\n request,\n \"localizations/includes/resources.html\",\n {\n \"locale\": locale,\n \"project\": project,\n \"resources\": parts,\n \"deadline\": any(part[\"resource__deadline\"] for part in parts),\n \"priority\": any(part[\"resource__priority\"] for part in parts),\n },\n )\n\n\n@require_AJAX\ndef ajax_tags(request, code, slug):\n \"\"\"Tags tab.\"\"\"\n locale = get_object_or_404(Locale, code=code)\n project = get_object_or_404(Project.objects.visible_for(request.user), slug=slug)\n\n # Check if ProjectLocale exists\n get_object_or_404(ProjectLocale, locale=locale, project=project)\n\n if not project.tags_enabled:\n raise Http404\n\n tags_tool = TagsTool(\n locales=[locale],\n projects=[project],\n priority=True,\n )\n\n tags = sorted(tags_tool, key=attrgetter(\"priority\"), reverse=True)\n\n return render(\n request,\n \"localizations/includes/tags.html\",\n {\"locale\": locale, \"project\": project, \"tags\": tags},\n )\n\n\n@require_AJAX\ndef ajax_insights(request, code, slug):\n \"\"\"Insights tab.\"\"\"\n if not settings.ENABLE_INSIGHTS:\n raise ImproperlyConfigured(\"ENABLE_INSIGHTS variable not set in settings.\")\n\n get_object_or_404(Locale, code=code)\n get_object_or_404(Project.objects.visible_for(request.user), slug=slug)\n pl = get_object_or_404(ProjectLocale, locale__code=code, project__slug=slug)\n insights = get_insights(locale=pl.locale, project=pl.project)\n\n return render(request, \"localizations/includes/insights.html\", insights)\n\n\nclass LocalizationContributorsView(ContributorsMixin, DetailView):\n \"\"\"\n Renders view of contributors for the localization.\n \"\"\"\n\n template_name = \"localizations/includes/contributors.html\"\n\n def get_object(self):\n get_object_or_404(Locale, code=self.kwargs[\"code\"])\n get_object_or_404(\n Project.objects.visible_for(self.request.user), slug=self.kwargs[\"slug\"]\n )\n return get_object_or_404(\n ProjectLocale,\n locale__code=self.kwargs[\"code\"],\n project__slug=self.kwargs[\"slug\"],\n )\n\n def get_context_object_name(self, obj):\n return \"projectlocale\"\n\n def contributors_filter(self, **kwargs):\n return Q(\n entity__resource__project=self.object.project, locale=self.object.locale\n )\n", "path": "pontoon/localizations/views.py"}]}
2,257
443
gh_patches_debug_6077
rasdani/github-patches
git_diff
learningequality__kolibri-3759
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- remove translations for user kinds on backend ### Observed behavior In role kinds we use the string "Classroom Assignable Coach": https://github.com/learningequality/kolibri/blob/develop/kolibri/auth/constants/role_kinds.py#L15 This string is not something that should be user-facing ### Expected behavior implementation details hidden from user ### User-facing consequences confusing, inconsistent terminology ### Context https://crowdin.com/translate/kolibri/498/en-es#37506 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/auth/constants/role_kinds.py` Content: ``` 1 """ 2 This module contains constants representing the kinds of "roles" that a user can have with respect to a Collection. 3 """ 4 from __future__ import unicode_literals 5 6 from django.utils.translation import ugettext_lazy as _ 7 8 ADMIN = "admin" 9 COACH = "coach" 10 ASSIGNABLE_COACH = "classroom assignable coach" 11 12 choices = ( 13 (ADMIN, _("Admin")), 14 (COACH, _("Coach")), 15 (ASSIGNABLE_COACH, _("Classroom Assignable Coach")), 16 ) 17 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/auth/constants/role_kinds.py b/kolibri/auth/constants/role_kinds.py --- a/kolibri/auth/constants/role_kinds.py +++ b/kolibri/auth/constants/role_kinds.py @@ -3,14 +3,12 @@ """ from __future__ import unicode_literals -from django.utils.translation import ugettext_lazy as _ - ADMIN = "admin" COACH = "coach" ASSIGNABLE_COACH = "classroom assignable coach" choices = ( - (ADMIN, _("Admin")), - (COACH, _("Coach")), - (ASSIGNABLE_COACH, _("Classroom Assignable Coach")), + (ADMIN, "Admin"), + (COACH, "Coach"), + (ASSIGNABLE_COACH, "Classroom Assignable Coach"), )
{"golden_diff": "diff --git a/kolibri/auth/constants/role_kinds.py b/kolibri/auth/constants/role_kinds.py\n--- a/kolibri/auth/constants/role_kinds.py\n+++ b/kolibri/auth/constants/role_kinds.py\n@@ -3,14 +3,12 @@\n \"\"\"\n from __future__ import unicode_literals\n \n-from django.utils.translation import ugettext_lazy as _\n-\n ADMIN = \"admin\"\n COACH = \"coach\"\n ASSIGNABLE_COACH = \"classroom assignable coach\"\n \n choices = (\n- (ADMIN, _(\"Admin\")),\n- (COACH, _(\"Coach\")),\n- (ASSIGNABLE_COACH, _(\"Classroom Assignable Coach\")),\n+ (ADMIN, \"Admin\"),\n+ (COACH, \"Coach\"),\n+ (ASSIGNABLE_COACH, \"Classroom Assignable Coach\"),\n )\n", "issue": "remove translations for user kinds on backend\n### Observed behavior\r\n\r\nIn role kinds we use the string \"Classroom Assignable Coach\": https://github.com/learningequality/kolibri/blob/develop/kolibri/auth/constants/role_kinds.py#L15\r\n\r\nThis string is not something that should be user-facing\r\n\r\n### Expected behavior\r\n\r\nimplementation details hidden from user\r\n\r\n### User-facing consequences\r\n\r\nconfusing, inconsistent terminology\r\n\r\n\r\n### Context\r\n\r\nhttps://crowdin.com/translate/kolibri/498/en-es#37506\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis module contains constants representing the kinds of \"roles\" that a user can have with respect to a Collection.\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom django.utils.translation import ugettext_lazy as _\n\nADMIN = \"admin\"\nCOACH = \"coach\"\nASSIGNABLE_COACH = \"classroom assignable coach\"\n\nchoices = (\n (ADMIN, _(\"Admin\")),\n (COACH, _(\"Coach\")),\n (ASSIGNABLE_COACH, _(\"Classroom Assignable Coach\")),\n)\n", "path": "kolibri/auth/constants/role_kinds.py"}], "after_files": [{"content": "\"\"\"\nThis module contains constants representing the kinds of \"roles\" that a user can have with respect to a Collection.\n\"\"\"\nfrom __future__ import unicode_literals\n\nADMIN = \"admin\"\nCOACH = \"coach\"\nASSIGNABLE_COACH = \"classroom assignable coach\"\n\nchoices = (\n (ADMIN, \"Admin\"),\n (COACH, \"Coach\"),\n (ASSIGNABLE_COACH, \"Classroom Assignable Coach\"),\n)\n", "path": "kolibri/auth/constants/role_kinds.py"}]}
504
177
gh_patches_debug_31669
rasdani/github-patches
git_diff
oobabooga__text-generation-webui-628
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- silero_tts will not load if I am not connected to the internet ### Describe the bug I used to be able to use this extension offline, but now I can't load the extension if I am not online. If I am online the extension loads just fine. The actual language models is saved on my machine via the .cache file: C:\Users\myself\.cache\torch\hub\snakers4_silero-models_master\src\silero\model The model name is called v3_en.pt, it's being cached on my machine and when I load the extension with an internet connection the miniconda console says that it's using the cached model, so I don't know why I NEED to be connected to the internet for it to work. ### Is there an existing issue for this? - [x] I have searched the existing issues ### Reproduction Run this (change your install location as necessary) with and without an internet connection. cd F:\OoBaboogaMarch17\text-generation-webui conda activate textgen python .\server.py --auto-devices --gptq-bits 4 --cai-chat --gptq-model-type LLaMa --extension silero_tts ### Screenshot I'm including two screenshots, one when I am connected to the internet, and one when I am not connected to the internet. ![internetConnection](https://user-images.githubusercontent.com/6488699/227263118-cf13d103-975d-4103-b1d7-a0763e492bc7.png) ![no internet connection](https://user-images.githubusercontent.com/6488699/227263135-017e10b7-7164-465f-b2be-af7f837a9d87.png) ### Logs ```shell See screenshots ``` ### System Info ```shell Window 10, 4090, i9 13900, windows mode not wsl ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `extensions/silero_tts/script.py` Content: ``` 1 import time 2 from pathlib import Path 3 4 import gradio as gr 5 import torch 6 from extensions.silero_tts import tts_preprocessor 7 from modules import chat, shared 8 from modules.html_generator import chat_html_wrapper 9 10 torch._C._jit_set_profiling_mode(False) 11 12 13 params = { 14 'activate': True, 15 'speaker': 'en_56', 16 'language': 'en', 17 'model_id': 'v3_en', 18 'sample_rate': 48000, 19 'device': 'cpu', 20 'show_text': False, 21 'autoplay': True, 22 'voice_pitch': 'medium', 23 'voice_speed': 'medium', 24 } 25 26 current_params = params.copy() 27 voices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115'] 28 voice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high'] 29 voice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast'] 30 streaming_state = shared.args.no_stream # remember if chat streaming was enabled 31 32 # Used for making text xml compatible, needed for voice pitch and speed control 33 table = str.maketrans({ 34 "<": "&lt;", 35 ">": "&gt;", 36 "&": "&amp;", 37 "'": "&apos;", 38 '"': "&quot;", 39 }) 40 41 42 def xmlesc(txt): 43 return txt.translate(table) 44 45 46 def load_model(): 47 model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id']) 48 model.to(params['device']) 49 return model 50 51 52 model = load_model() 53 54 55 def remove_tts_from_history(name1, name2, mode): 56 for i, entry in enumerate(shared.history['internal']): 57 shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]] 58 return chat_html_wrapper(shared.history['visible'], name1, name2, mode) 59 60 61 def toggle_text_in_history(name1, name2, mode): 62 for i, entry in enumerate(shared.history['visible']): 63 visible_reply = entry[1] 64 if visible_reply.startswith('<audio'): 65 if params['show_text']: 66 reply = shared.history['internal'][i][1] 67 shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>\n\n{reply}"] 68 else: 69 shared.history['visible'][i] = [shared.history['visible'][i][0], f"{visible_reply.split('</audio>')[0]}</audio>"] 70 return chat_html_wrapper(shared.history['visible'], name1, name2, mode) 71 72 73 def input_modifier(string): 74 """ 75 This function is applied to your text inputs before 76 they are fed into the model. 77 """ 78 79 # Remove autoplay from the last reply 80 if shared.is_chat() and len(shared.history['internal']) > 0: 81 shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>', 'controls>')] 82 83 shared.processing_message = "*Is recording a voice message...*" 84 shared.args.no_stream = True # Disable streaming cause otherwise the audio output will stutter and begin anew every time the message is being updated 85 return string 86 87 88 def output_modifier(string): 89 """ 90 This function is applied to the model outputs. 91 """ 92 93 global model, current_params, streaming_state 94 95 for i in params: 96 if params[i] != current_params[i]: 97 model = load_model() 98 current_params = params.copy() 99 break 100 101 if not params['activate']: 102 return string 103 104 original_string = string 105 string = tts_preprocessor.preprocess(string) 106 107 if string == '': 108 string = '*Empty reply, try regenerating*' 109 else: 110 output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav') 111 prosody = '<prosody rate="{}" pitch="{}">'.format(params['voice_speed'], params['voice_pitch']) 112 silero_input = f'<speak>{prosody}{xmlesc(string)}</prosody></speak>' 113 model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file)) 114 115 autoplay = 'autoplay' if params['autoplay'] else '' 116 string = f'<audio src="file/{output_file.as_posix()}" controls {autoplay}></audio>' 117 if params['show_text']: 118 string += f'\n\n{original_string}' 119 120 shared.processing_message = "*Is typing...*" 121 shared.args.no_stream = streaming_state # restore the streaming option to the previous value 122 return string 123 124 125 def bot_prefix_modifier(string): 126 """ 127 This function is only applied in chat mode. It modifies 128 the prefix text for the Bot and can be used to bias its 129 behavior. 130 """ 131 132 return string 133 134 135 def ui(): 136 # Gradio elements 137 with gr.Accordion("Silero TTS"): 138 with gr.Row(): 139 activate = gr.Checkbox(value=params['activate'], label='Activate TTS') 140 autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically') 141 142 show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player') 143 voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice') 144 with gr.Row(): 145 v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch') 146 v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed') 147 148 with gr.Row(): 149 convert = gr.Button('Permanently replace audios with the message texts') 150 convert_cancel = gr.Button('Cancel', visible=False) 151 convert_confirm = gr.Button('Confirm (cannot be undone)', variant="stop", visible=False) 152 153 # Convert history with confirmation 154 convert_arr = [convert_confirm, convert, convert_cancel] 155 convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr) 156 convert_confirm.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) 157 convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], shared.gradio['display']) 158 convert_confirm.click(lambda: chat.save_history(timestamp=False), [], [], show_progress=False) 159 convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr) 160 161 # Toggle message text in history 162 show_text.change(lambda x: params.update({"show_text": x}), show_text, None) 163 show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], shared.gradio['display']) 164 show_text.change(lambda: chat.save_history(timestamp=False), [], [], show_progress=False) 165 166 # Event functions to update the parameters in the backend 167 activate.change(lambda x: params.update({"activate": x}), activate, None) 168 autoplay.change(lambda x: params.update({"autoplay": x}), autoplay, None) 169 voice.change(lambda x: params.update({"speaker": x}), voice, None) 170 v_pitch.change(lambda x: params.update({"voice_pitch": x}), v_pitch, None) 171 v_speed.change(lambda x: params.update({"voice_speed": x}), v_speed, None) 172 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py --- a/extensions/silero_tts/script.py +++ b/extensions/silero_tts/script.py @@ -21,6 +21,7 @@ 'autoplay': True, 'voice_pitch': 'medium', 'voice_speed': 'medium', + 'local_cache_path': '' # User can override the default cache path to something other via settings.json } current_params = params.copy() @@ -44,14 +45,18 @@ def load_model(): - model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id']) + torch_cache_path = torch.hub.get_dir() if params['local_cache_path'] == '' else params['local_cache_path'] + model_path = torch_cache_path + "/snakers4_silero-models_master/src/silero/model/" + params['model_id'] + ".pt" + if Path(model_path).is_file(): + print(f'\nUsing Silero TTS cached checkpoint found at {torch_cache_path}') + model, example_text = torch.hub.load(repo_or_dir=torch_cache_path + '/snakers4_silero-models_master/', model='silero_tts', language=params['language'], speaker=params['model_id'], source='local', path=model_path, force_reload=True) + else: + print(f'\nSilero TTS cache not found at {torch_cache_path}. Attempting to download...') + model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id']) model.to(params['device']) return model -model = load_model() - - def remove_tts_from_history(name1, name2, mode): for i, entry in enumerate(shared.history['internal']): shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]] @@ -132,6 +137,11 @@ return string +def setup(): + global model + model = load_model() + + def ui(): # Gradio elements with gr.Accordion("Silero TTS"):
{"golden_diff": "diff --git a/extensions/silero_tts/script.py b/extensions/silero_tts/script.py\n--- a/extensions/silero_tts/script.py\n+++ b/extensions/silero_tts/script.py\n@@ -21,6 +21,7 @@\n 'autoplay': True,\n 'voice_pitch': 'medium',\n 'voice_speed': 'medium',\n+ 'local_cache_path': '' # User can override the default cache path to something other via settings.json\n }\n \n current_params = params.copy()\n@@ -44,14 +45,18 @@\n \n \n def load_model():\n- model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])\n+ torch_cache_path = torch.hub.get_dir() if params['local_cache_path'] == '' else params['local_cache_path']\n+ model_path = torch_cache_path + \"/snakers4_silero-models_master/src/silero/model/\" + params['model_id'] + \".pt\"\n+ if Path(model_path).is_file():\n+ print(f'\\nUsing Silero TTS cached checkpoint found at {torch_cache_path}')\n+ model, example_text = torch.hub.load(repo_or_dir=torch_cache_path + '/snakers4_silero-models_master/', model='silero_tts', language=params['language'], speaker=params['model_id'], source='local', path=model_path, force_reload=True)\n+ else:\n+ print(f'\\nSilero TTS cache not found at {torch_cache_path}. Attempting to download...')\n+ model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])\n model.to(params['device'])\n return model\n \n \n-model = load_model()\n-\n-\n def remove_tts_from_history(name1, name2, mode):\n for i, entry in enumerate(shared.history['internal']):\n shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]\n@@ -132,6 +137,11 @@\n return string\n \n \n+def setup():\n+ global model\n+ model = load_model()\n+\n+\n def ui():\n # Gradio elements\n with gr.Accordion(\"Silero TTS\"):\n", "issue": "silero_tts will not load if I am not connected to the internet\n### Describe the bug\n\nI used to be able to use this extension offline, but now I can't load the extension if I am not online. If I am online the extension loads just fine. The actual language models is saved on my machine via the .cache file: C:\\Users\\myself\\.cache\\torch\\hub\\snakers4_silero-models_master\\src\\silero\\model\r\n\r\nThe model name is called v3_en.pt, it's being cached on my machine and when I load the extension with an internet connection the miniconda console says that it's using the cached model, so I don't know why I NEED to be connected to the internet for it to work.\n\n### Is there an existing issue for this?\n\n- [x] I have searched the existing issues\n\n### Reproduction\n\nRun this (change your install location as necessary) with and without an internet connection.\r\n\r\ncd F:\\OoBaboogaMarch17\\text-generation-webui\r\nconda activate textgen\r\npython .\\server.py --auto-devices --gptq-bits 4 --cai-chat --gptq-model-type LLaMa --extension silero_tts \n\n### Screenshot\n\nI'm including two screenshots, one when I am connected to the internet, and one when I am not connected to the internet.\r\n\r\n![internetConnection](https://user-images.githubusercontent.com/6488699/227263118-cf13d103-975d-4103-b1d7-a0763e492bc7.png)\r\n![no internet connection](https://user-images.githubusercontent.com/6488699/227263135-017e10b7-7164-465f-b2be-af7f837a9d87.png)\r\n\n\n### Logs\n\n```shell\nSee screenshots\n```\n\n\n### System Info\n\n```shell\nWindow 10, 4090, i9 13900, windows mode not wsl\n```\n\n", "before_files": [{"content": "import time\nfrom pathlib import Path\n\nimport gradio as gr\nimport torch\nfrom extensions.silero_tts import tts_preprocessor\nfrom modules import chat, shared\nfrom modules.html_generator import chat_html_wrapper\n\ntorch._C._jit_set_profiling_mode(False)\n\n\nparams = {\n 'activate': True,\n 'speaker': 'en_56',\n 'language': 'en',\n 'model_id': 'v3_en',\n 'sample_rate': 48000,\n 'device': 'cpu',\n 'show_text': False,\n 'autoplay': True,\n 'voice_pitch': 'medium',\n 'voice_speed': 'medium',\n}\n\ncurrent_params = params.copy()\nvoices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']\nvoice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high']\nvoice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast']\nstreaming_state = shared.args.no_stream # remember if chat streaming was enabled\n\n# Used for making text xml compatible, needed for voice pitch and speed control\ntable = str.maketrans({\n \"<\": \"&lt;\",\n \">\": \"&gt;\",\n \"&\": \"&amp;\",\n \"'\": \"&apos;\",\n '\"': \"&quot;\",\n})\n\n\ndef xmlesc(txt):\n return txt.translate(table)\n\n\ndef load_model():\n model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])\n model.to(params['device'])\n return model\n\n\nmodel = load_model()\n\n\ndef remove_tts_from_history(name1, name2, mode):\n for i, entry in enumerate(shared.history['internal']):\n shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]\n return chat_html_wrapper(shared.history['visible'], name1, name2, mode)\n\n\ndef toggle_text_in_history(name1, name2, mode):\n for i, entry in enumerate(shared.history['visible']):\n visible_reply = entry[1]\n if visible_reply.startswith('<audio'):\n if params['show_text']:\n reply = shared.history['internal'][i][1]\n shared.history['visible'][i] = [shared.history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\\n\\n{reply}\"]\n else:\n shared.history['visible'][i] = [shared.history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\"]\n return chat_html_wrapper(shared.history['visible'], name1, name2, mode)\n\n\ndef input_modifier(string):\n \"\"\"\n This function is applied to your text inputs before\n they are fed into the model.\n \"\"\"\n\n # Remove autoplay from the last reply\n if shared.is_chat() and len(shared.history['internal']) > 0:\n shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>', 'controls>')]\n\n shared.processing_message = \"*Is recording a voice message...*\"\n shared.args.no_stream = True # Disable streaming cause otherwise the audio output will stutter and begin anew every time the message is being updated\n return string\n\n\ndef output_modifier(string):\n \"\"\"\n This function is applied to the model outputs.\n \"\"\"\n\n global model, current_params, streaming_state\n\n for i in params:\n if params[i] != current_params[i]:\n model = load_model()\n current_params = params.copy()\n break\n\n if not params['activate']:\n return string\n\n original_string = string\n string = tts_preprocessor.preprocess(string)\n\n if string == '':\n string = '*Empty reply, try regenerating*'\n else:\n output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')\n prosody = '<prosody rate=\"{}\" pitch=\"{}\">'.format(params['voice_speed'], params['voice_pitch'])\n silero_input = f'<speak>{prosody}{xmlesc(string)}</prosody></speak>'\n model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))\n\n autoplay = 'autoplay' if params['autoplay'] else ''\n string = f'<audio src=\"file/{output_file.as_posix()}\" controls {autoplay}></audio>'\n if params['show_text']:\n string += f'\\n\\n{original_string}'\n\n shared.processing_message = \"*Is typing...*\"\n shared.args.no_stream = streaming_state # restore the streaming option to the previous value\n return string\n\n\ndef bot_prefix_modifier(string):\n \"\"\"\n This function is only applied in chat mode. It modifies\n the prefix text for the Bot and can be used to bias its\n behavior.\n \"\"\"\n\n return string\n\n\ndef ui():\n # Gradio elements\n with gr.Accordion(\"Silero TTS\"):\n with gr.Row():\n activate = gr.Checkbox(value=params['activate'], label='Activate TTS')\n autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')\n\n show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')\n voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')\n with gr.Row():\n v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch')\n v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed')\n\n with gr.Row():\n convert = gr.Button('Permanently replace audios with the message texts')\n convert_cancel = gr.Button('Cancel', visible=False)\n convert_confirm = gr.Button('Confirm (cannot be undone)', variant=\"stop\", visible=False)\n\n # Convert history with confirmation\n convert_arr = [convert_confirm, convert, convert_cancel]\n convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)\n convert_confirm.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)\n convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], shared.gradio['display'])\n convert_confirm.click(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)\n convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)\n\n # Toggle message text in history\n show_text.change(lambda x: params.update({\"show_text\": x}), show_text, None)\n show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], shared.gradio['display'])\n show_text.change(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)\n\n # Event functions to update the parameters in the backend\n activate.change(lambda x: params.update({\"activate\": x}), activate, None)\n autoplay.change(lambda x: params.update({\"autoplay\": x}), autoplay, None)\n voice.change(lambda x: params.update({\"speaker\": x}), voice, None)\n v_pitch.change(lambda x: params.update({\"voice_pitch\": x}), v_pitch, None)\n v_speed.change(lambda x: params.update({\"voice_speed\": x}), v_speed, None)\n", "path": "extensions/silero_tts/script.py"}], "after_files": [{"content": "import time\nfrom pathlib import Path\n\nimport gradio as gr\nimport torch\nfrom extensions.silero_tts import tts_preprocessor\nfrom modules import chat, shared\nfrom modules.html_generator import chat_html_wrapper\n\ntorch._C._jit_set_profiling_mode(False)\n\n\nparams = {\n 'activate': True,\n 'speaker': 'en_56',\n 'language': 'en',\n 'model_id': 'v3_en',\n 'sample_rate': 48000,\n 'device': 'cpu',\n 'show_text': False,\n 'autoplay': True,\n 'voice_pitch': 'medium',\n 'voice_speed': 'medium',\n 'local_cache_path': '' # User can override the default cache path to something other via settings.json\n}\n\ncurrent_params = params.copy()\nvoices_by_gender = ['en_99', 'en_45', 'en_18', 'en_117', 'en_49', 'en_51', 'en_68', 'en_0', 'en_26', 'en_56', 'en_74', 'en_5', 'en_38', 'en_53', 'en_21', 'en_37', 'en_107', 'en_10', 'en_82', 'en_16', 'en_41', 'en_12', 'en_67', 'en_61', 'en_14', 'en_11', 'en_39', 'en_52', 'en_24', 'en_97', 'en_28', 'en_72', 'en_94', 'en_36', 'en_4', 'en_43', 'en_88', 'en_25', 'en_65', 'en_6', 'en_44', 'en_75', 'en_91', 'en_60', 'en_109', 'en_85', 'en_101', 'en_108', 'en_50', 'en_96', 'en_64', 'en_92', 'en_76', 'en_33', 'en_116', 'en_48', 'en_98', 'en_86', 'en_62', 'en_54', 'en_95', 'en_55', 'en_111', 'en_3', 'en_83', 'en_8', 'en_47', 'en_59', 'en_1', 'en_2', 'en_7', 'en_9', 'en_13', 'en_15', 'en_17', 'en_19', 'en_20', 'en_22', 'en_23', 'en_27', 'en_29', 'en_30', 'en_31', 'en_32', 'en_34', 'en_35', 'en_40', 'en_42', 'en_46', 'en_57', 'en_58', 'en_63', 'en_66', 'en_69', 'en_70', 'en_71', 'en_73', 'en_77', 'en_78', 'en_79', 'en_80', 'en_81', 'en_84', 'en_87', 'en_89', 'en_90', 'en_93', 'en_100', 'en_102', 'en_103', 'en_104', 'en_105', 'en_106', 'en_110', 'en_112', 'en_113', 'en_114', 'en_115']\nvoice_pitches = ['x-low', 'low', 'medium', 'high', 'x-high']\nvoice_speeds = ['x-slow', 'slow', 'medium', 'fast', 'x-fast']\nstreaming_state = shared.args.no_stream # remember if chat streaming was enabled\n\n# Used for making text xml compatible, needed for voice pitch and speed control\ntable = str.maketrans({\n \"<\": \"&lt;\",\n \">\": \"&gt;\",\n \"&\": \"&amp;\",\n \"'\": \"&apos;\",\n '\"': \"&quot;\",\n})\n\n\ndef xmlesc(txt):\n return txt.translate(table)\n\n\ndef load_model():\n torch_cache_path = torch.hub.get_dir() if params['local_cache_path'] == '' else params['local_cache_path']\n model_path = torch_cache_path + \"/snakers4_silero-models_master/src/silero/model/\" + params['model_id'] + \".pt\"\n if Path(model_path).is_file():\n print(f'\\nUsing Silero TTS cached checkpoint found at {torch_cache_path}')\n model, example_text = torch.hub.load(repo_or_dir=torch_cache_path + '/snakers4_silero-models_master/', model='silero_tts', language=params['language'], speaker=params['model_id'], source='local', path=model_path, force_reload=True)\n else:\n print(f'\\nSilero TTS cache not found at {torch_cache_path}. Attempting to download...')\n model, example_text = torch.hub.load(repo_or_dir='snakers4/silero-models', model='silero_tts', language=params['language'], speaker=params['model_id'])\n model.to(params['device'])\n return model\n\n\ndef remove_tts_from_history(name1, name2, mode):\n for i, entry in enumerate(shared.history['internal']):\n shared.history['visible'][i] = [shared.history['visible'][i][0], entry[1]]\n return chat_html_wrapper(shared.history['visible'], name1, name2, mode)\n\n\ndef toggle_text_in_history(name1, name2, mode):\n for i, entry in enumerate(shared.history['visible']):\n visible_reply = entry[1]\n if visible_reply.startswith('<audio'):\n if params['show_text']:\n reply = shared.history['internal'][i][1]\n shared.history['visible'][i] = [shared.history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\\n\\n{reply}\"]\n else:\n shared.history['visible'][i] = [shared.history['visible'][i][0], f\"{visible_reply.split('</audio>')[0]}</audio>\"]\n return chat_html_wrapper(shared.history['visible'], name1, name2, mode)\n\n\ndef input_modifier(string):\n \"\"\"\n This function is applied to your text inputs before\n they are fed into the model.\n \"\"\"\n\n # Remove autoplay from the last reply\n if shared.is_chat() and len(shared.history['internal']) > 0:\n shared.history['visible'][-1] = [shared.history['visible'][-1][0], shared.history['visible'][-1][1].replace('controls autoplay>', 'controls>')]\n\n shared.processing_message = \"*Is recording a voice message...*\"\n shared.args.no_stream = True # Disable streaming cause otherwise the audio output will stutter and begin anew every time the message is being updated\n return string\n\n\ndef output_modifier(string):\n \"\"\"\n This function is applied to the model outputs.\n \"\"\"\n\n global model, current_params, streaming_state\n\n for i in params:\n if params[i] != current_params[i]:\n model = load_model()\n current_params = params.copy()\n break\n\n if not params['activate']:\n return string\n\n original_string = string\n string = tts_preprocessor.preprocess(string)\n\n if string == '':\n string = '*Empty reply, try regenerating*'\n else:\n output_file = Path(f'extensions/silero_tts/outputs/{shared.character}_{int(time.time())}.wav')\n prosody = '<prosody rate=\"{}\" pitch=\"{}\">'.format(params['voice_speed'], params['voice_pitch'])\n silero_input = f'<speak>{prosody}{xmlesc(string)}</prosody></speak>'\n model.save_wav(ssml_text=silero_input, speaker=params['speaker'], sample_rate=int(params['sample_rate']), audio_path=str(output_file))\n\n autoplay = 'autoplay' if params['autoplay'] else ''\n string = f'<audio src=\"file/{output_file.as_posix()}\" controls {autoplay}></audio>'\n if params['show_text']:\n string += f'\\n\\n{original_string}'\n\n shared.processing_message = \"*Is typing...*\"\n shared.args.no_stream = streaming_state # restore the streaming option to the previous value\n return string\n\n\ndef bot_prefix_modifier(string):\n \"\"\"\n This function is only applied in chat mode. It modifies\n the prefix text for the Bot and can be used to bias its\n behavior.\n \"\"\"\n\n return string\n\n\ndef setup():\n global model\n model = load_model()\n\n\ndef ui():\n # Gradio elements\n with gr.Accordion(\"Silero TTS\"):\n with gr.Row():\n activate = gr.Checkbox(value=params['activate'], label='Activate TTS')\n autoplay = gr.Checkbox(value=params['autoplay'], label='Play TTS automatically')\n\n show_text = gr.Checkbox(value=params['show_text'], label='Show message text under audio player')\n voice = gr.Dropdown(value=params['speaker'], choices=voices_by_gender, label='TTS voice')\n with gr.Row():\n v_pitch = gr.Dropdown(value=params['voice_pitch'], choices=voice_pitches, label='Voice pitch')\n v_speed = gr.Dropdown(value=params['voice_speed'], choices=voice_speeds, label='Voice speed')\n\n with gr.Row():\n convert = gr.Button('Permanently replace audios with the message texts')\n convert_cancel = gr.Button('Cancel', visible=False)\n convert_confirm = gr.Button('Confirm (cannot be undone)', variant=\"stop\", visible=False)\n\n # Convert history with confirmation\n convert_arr = [convert_confirm, convert, convert_cancel]\n convert.click(lambda: [gr.update(visible=True), gr.update(visible=False), gr.update(visible=True)], None, convert_arr)\n convert_confirm.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)\n convert_confirm.click(remove_tts_from_history, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], shared.gradio['display'])\n convert_confirm.click(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)\n convert_cancel.click(lambda: [gr.update(visible=False), gr.update(visible=True), gr.update(visible=False)], None, convert_arr)\n\n # Toggle message text in history\n show_text.change(lambda x: params.update({\"show_text\": x}), show_text, None)\n show_text.change(toggle_text_in_history, [shared.gradio[k] for k in ['name1', 'name2', 'Chat mode']], shared.gradio['display'])\n show_text.change(lambda: chat.save_history(timestamp=False), [], [], show_progress=False)\n\n # Event functions to update the parameters in the backend\n activate.change(lambda x: params.update({\"activate\": x}), activate, None)\n autoplay.change(lambda x: params.update({\"autoplay\": x}), autoplay, None)\n voice.change(lambda x: params.update({\"speaker\": x}), voice, None)\n v_pitch.change(lambda x: params.update({\"voice_pitch\": x}), v_pitch, None)\n v_speed.change(lambda x: params.update({\"voice_speed\": x}), v_speed, None)\n", "path": "extensions/silero_tts/script.py"}]}
3,495
519
gh_patches_debug_11605
rasdani/github-patches
git_diff
fidals__shopelectro-342
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docker-compose.yml:93: Resurrect coala eslint. The puzzle `322-dbccf9a1` from #322 has to be resolved: https://github.com/fidals/shopelectro/blob/c827fa74e382579bf43fa368be3549cd108f6050/docker/docker-compose.yml#L93-L93 The puzzle was created by Artemiy on 08-Jun-18. Estimate: 0 minutes, If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/views/catalog.py` Content: ``` 1 from functools import partial 2 3 from django.conf import settings 4 from django.core.paginator import Paginator 5 from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden 6 from django.shortcuts import render, get_object_or_404 7 from django.views.decorators.http import require_POST 8 from django_user_agents.utils import get_user_agent 9 10 from catalog.views import catalog 11 from images.models import Image 12 from pages import views as pages_views 13 14 from shopelectro import config 15 from shopelectro import models 16 from shopelectro.views.helpers import set_csrf_cookie 17 18 PRODUCTS_ON_PAGE_PC = 48 19 PRODUCTS_ON_PAGE_MOB = 12 20 21 22 def get_products_count(request): 23 """Calculate max products list size from request. List size depends on device type.""" 24 mobile_view = get_user_agent(request).is_mobile 25 return PRODUCTS_ON_PAGE_MOB if mobile_view else PRODUCTS_ON_PAGE_PC 26 27 28 # CATALOG VIEWS 29 class CategoryTree(catalog.CategoryTree): 30 category_model = models.Category 31 32 33 @set_csrf_cookie 34 class ProductPage(catalog.ProductPage): 35 pk_url_kwarg = None 36 slug_url_kwarg = 'product_vendor_code' 37 slug_field = 'vendor_code' 38 39 queryset = ( 40 models.Product.objects 41 .filter(category__isnull=False) 42 .prefetch_related('product_feedbacks', 'page__images') 43 .select_related('page') 44 ) 45 46 def get_context_data(self, **kwargs): 47 context = super(ProductPage, self).get_context_data(**kwargs) 48 49 group_tags_pairs = ( 50 models.Tag.objects 51 .filter(products=self.object) 52 .get_group_tags_pairs() 53 ) 54 55 return { 56 **context, 57 'price_bounds': config.PRICE_BOUNDS, 58 'group_tags_pairs': group_tags_pairs 59 } 60 61 62 # SHOPELECTRO-SPECIFIC VIEWS 63 @set_csrf_cookie 64 class IndexPage(pages_views.CustomPageView): 65 66 def get_context_data(self, **kwargs): 67 """Extended method. Add product's images to context.""" 68 context = super(IndexPage, self).get_context_data(**kwargs) 69 mobile_view = get_user_agent(self.request).is_mobile 70 71 top_products = ( 72 models.Product.objects 73 .filter(id__in=settings.TOP_PRODUCTS) 74 .prefetch_related('category') 75 .select_related('page') 76 ) 77 78 images = Image.objects.get_main_images_by_pages( 79 models.ProductPage.objects.filter( 80 shopelectro_product__in=top_products 81 ) 82 ) 83 84 categories = models.Category.objects.get_root_categories_by_products( 85 top_products) 86 87 prepared_top_products = [] 88 if not mobile_view: 89 prepared_top_products = [ 90 (product, images.get(product.page), categories.get(product)) 91 for product in top_products 92 ] 93 94 return { 95 **context, 96 'category_tile': config.MAIN_PAGE_TILE, 97 'prepared_top_products': prepared_top_products, 98 } 99 100 101 def merge_products_and_images(products): 102 images = Image.objects.get_main_images_by_pages( 103 models.ProductPage.objects.filter(shopelectro_product__in=products) 104 ) 105 106 return [ 107 (product, images.get(product.page)) 108 for product in products 109 ] 110 111 112 @set_csrf_cookie 113 class CategoryPage(catalog.CategoryPage): 114 115 def get_context_data(self, **kwargs): 116 """Add sorting options and view_types in context.""" 117 context = super().get_context_data(**kwargs) 118 products_on_page = int(self.request.GET.get( 119 'step', get_products_count(self.request), 120 )) 121 page_number = int(self.request.GET.get('page', 1)) 122 view_type = self.request.session.get('view_type', 'tile') 123 sorting = int(self.kwargs.get('sorting', 0)) 124 sorting_option = config.category_sorting(sorting) 125 category = context['category'] 126 if ( 127 page_number < 1 or 128 products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS 129 ): 130 raise Http404('Page does not exist.') 131 132 all_products = ( 133 models.Product.objects 134 .prefetch_related('page__images') 135 .select_related('page') 136 .get_by_category(category, ordering=(sorting_option, )) 137 ) 138 139 group_tags_pairs = ( 140 models.Tag.objects 141 .filter(products__in=all_products) 142 .get_group_tags_pairs() 143 ) 144 145 tags = self.kwargs.get('tags') 146 147 tag_titles = '' 148 if tags: 149 slugs = models.Tag.parse_url_tags(tags) 150 tags = models.Tag.objects.filter(slug__in=slugs) 151 152 all_products = ( 153 all_products 154 .filter(tags__in=tags) 155 # Use distinct because filtering by QuerySet tags, 156 # that related with products by many-to-many relation. 157 .distinct(sorting_option.lstrip('-')) 158 ) 159 160 tag_titles = models.serialize_tags_to_title(tags) 161 162 def template_context(page, tag_titles, tags): 163 return { 164 'page': page, 165 'tag_titles': tag_titles, 166 'tags': tags, 167 } 168 169 page = context['page'] 170 page.get_template_render_context = partial( 171 template_context, page, tag_titles, tags) 172 173 paginated_page = Paginator(all_products, products_on_page).page(page_number) 174 total_products = all_products.count() 175 products = paginated_page.object_list 176 if not products: 177 raise Http404('Page without products does not exist.') 178 179 return { 180 **context, 181 'product_image_pairs': merge_products_and_images(products), 182 'group_tags_pairs': group_tags_pairs, 183 'total_products': total_products, 184 'products_count': (page_number - 1) * products_on_page + products.count(), 185 'paginated_page': paginated_page, 186 'sorting_options': config.category_sorting(), 187 'limits': settings.CATEGORY_STEP_MULTIPLIERS, 188 'sort': sorting, 189 'tags': tags, 190 'view_type': view_type, 191 'skip_canonical': bool(tags), 192 } 193 194 195 def load_more(request, category_slug, offset=0, limit=0, sorting=0, tags=None): 196 """ 197 Load more products of a given category. 198 199 :param sorting: preferred sorting index from CATEGORY_SORTING tuple 200 :param request: HttpRequest object 201 :param category_slug: Slug for a given category 202 :param offset: used for slicing QuerySet. 203 :return: products list in html format 204 """ 205 products_on_page = limit or get_products_count(request) 206 offset = int(offset) 207 if offset < 0: 208 return HttpResponseBadRequest('The offset is wrong. An offset should be greater than or equal to 0.') 209 if products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS: 210 return HttpResponseBadRequest( 211 'The limit number is wrong. List of available numbers:' 212 f' {", ".join(map(str, settings.CATEGORY_STEP_MULTIPLIERS))}' 213 ) 214 # increment page number because: 215 # 11 // 12 = 0, 0 // 12 = 0 but it should be the first page 216 # 12 // 12 = 1, 23 // 12 = 1, but it should be the second page 217 page_number = (offset // products_on_page) + 1 218 category = get_object_or_404(models.CategoryPage, slug=category_slug).model 219 sorting_option = config.category_sorting(int(sorting)) 220 221 all_products = ( 222 models.Product.objects 223 .prefetch_related('page__images') 224 .select_related('page') 225 .get_by_category(category, ordering=(sorting_option,)) 226 ) 227 228 if tags: 229 tag_entities = models.Tag.objects.filter( 230 slug__in=models.Tag.parse_url_tags(tags) 231 ) 232 233 all_products = ( 234 all_products 235 .filter(tags__in=tag_entities) 236 # Use distinct because filtering by QuerySet tags, 237 # that related with products by many-to-many relation. 238 .distinct(sorting_option.lstrip('-')) 239 ) 240 241 paginated_page = Paginator(all_products, products_on_page).page(page_number) 242 products = paginated_page.object_list 243 view = request.session.get('view_type', 'tile') 244 245 return render(request, 'catalog/category_products.html', { 246 'product_image_pairs': merge_products_and_images(products), 247 'paginated_page': paginated_page, 248 'view_type': view, 249 'prods': products_on_page, 250 }) 251 252 253 @require_POST 254 def save_feedback(request): 255 def get_keys_from_post(*args): 256 return {arg: request.POST.get(arg, '') for arg in args} 257 258 product_id = request.POST.get('id') 259 product = models.Product.objects.filter(id=product_id).first() 260 if not (product_id and product): 261 return HttpResponse(status=422) 262 263 fields = ['rating', 'name', 'dignities', 'limitations', 'general'] 264 feedback_data = get_keys_from_post(*fields) 265 266 models.ProductFeedback.objects.create(product=product, **feedback_data) 267 return HttpResponse('ok') 268 269 270 @require_POST 271 def delete_feedback(request): 272 if not request.user.is_authenticated: 273 return HttpResponseForbidden('Not today, sly guy...') 274 275 feedback_id = request.POST.get('id') 276 feedback = models.ProductFeedback.objects.filter(id=feedback_id).first() 277 if not (feedback_id and feedback): 278 return HttpResponse(status=422) 279 280 feedback.delete() 281 return HttpResponse('Feedback with id={} was deleted.'.format(feedback_id)) 282 283 284 class ProductsWithoutImages(catalog.ProductsWithoutImages): 285 model = models.Product 286 287 288 class ProductsWithoutText(catalog.ProductsWithoutText): 289 model = models.Product 290 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/views/catalog.py b/shopelectro/views/catalog.py --- a/shopelectro/views/catalog.py +++ b/shopelectro/views/catalog.py @@ -205,7 +205,9 @@ products_on_page = limit or get_products_count(request) offset = int(offset) if offset < 0: - return HttpResponseBadRequest('The offset is wrong. An offset should be greater than or equal to 0.') + return HttpResponseBadRequest( + 'The offset is wrong. An offset should be greater than or equal to 0.' + ) if products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS: return HttpResponseBadRequest( 'The limit number is wrong. List of available numbers:'
{"golden_diff": "diff --git a/shopelectro/views/catalog.py b/shopelectro/views/catalog.py\n--- a/shopelectro/views/catalog.py\n+++ b/shopelectro/views/catalog.py\n@@ -205,7 +205,9 @@\n products_on_page = limit or get_products_count(request)\n offset = int(offset)\n if offset < 0:\n- return HttpResponseBadRequest('The offset is wrong. An offset should be greater than or equal to 0.')\n+ return HttpResponseBadRequest(\n+ 'The offset is wrong. An offset should be greater than or equal to 0.'\n+ )\n if products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS:\n return HttpResponseBadRequest(\n 'The limit number is wrong. List of available numbers:'\n", "issue": "docker-compose.yml:93: Resurrect coala eslint.\nThe puzzle `322-dbccf9a1` from #322 has to be resolved:\n\nhttps://github.com/fidals/shopelectro/blob/c827fa74e382579bf43fa368be3549cd108f6050/docker/docker-compose.yml#L93-L93\n\nThe puzzle was created by Artemiy on 08-Jun-18. \n\nEstimate: 0 minutes, \n\nIf you have any technical questions, don't ask me, submit new tickets instead. The task will be \"done\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).\n", "before_files": [{"content": "from functools import partial\n\nfrom django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom django_user_agents.utils import get_user_agent\n\nfrom catalog.views import catalog\nfrom images.models import Image\nfrom pages import views as pages_views\n\nfrom shopelectro import config\nfrom shopelectro import models\nfrom shopelectro.views.helpers import set_csrf_cookie\n\nPRODUCTS_ON_PAGE_PC = 48\nPRODUCTS_ON_PAGE_MOB = 12\n\n\ndef get_products_count(request):\n \"\"\"Calculate max products list size from request. List size depends on device type.\"\"\"\n mobile_view = get_user_agent(request).is_mobile\n return PRODUCTS_ON_PAGE_MOB if mobile_view else PRODUCTS_ON_PAGE_PC\n\n\n# CATALOG VIEWS\nclass CategoryTree(catalog.CategoryTree):\n category_model = models.Category\n\n\n@set_csrf_cookie\nclass ProductPage(catalog.ProductPage):\n pk_url_kwarg = None\n slug_url_kwarg = 'product_vendor_code'\n slug_field = 'vendor_code'\n\n queryset = (\n models.Product.objects\n .filter(category__isnull=False)\n .prefetch_related('product_feedbacks', 'page__images')\n .select_related('page')\n )\n\n def get_context_data(self, **kwargs):\n context = super(ProductPage, self).get_context_data(**kwargs)\n\n group_tags_pairs = (\n models.Tag.objects\n .filter(products=self.object)\n .get_group_tags_pairs()\n )\n\n return {\n **context,\n 'price_bounds': config.PRICE_BOUNDS,\n 'group_tags_pairs': group_tags_pairs\n }\n\n\n# SHOPELECTRO-SPECIFIC VIEWS\n@set_csrf_cookie\nclass IndexPage(pages_views.CustomPageView):\n\n def get_context_data(self, **kwargs):\n \"\"\"Extended method. Add product's images to context.\"\"\"\n context = super(IndexPage, self).get_context_data(**kwargs)\n mobile_view = get_user_agent(self.request).is_mobile\n\n top_products = (\n models.Product.objects\n .filter(id__in=settings.TOP_PRODUCTS)\n .prefetch_related('category')\n .select_related('page')\n )\n\n images = Image.objects.get_main_images_by_pages(\n models.ProductPage.objects.filter(\n shopelectro_product__in=top_products\n )\n )\n\n categories = models.Category.objects.get_root_categories_by_products(\n top_products)\n\n prepared_top_products = []\n if not mobile_view:\n prepared_top_products = [\n (product, images.get(product.page), categories.get(product))\n for product in top_products\n ]\n\n return {\n **context,\n 'category_tile': config.MAIN_PAGE_TILE,\n 'prepared_top_products': prepared_top_products,\n }\n\n\ndef merge_products_and_images(products):\n images = Image.objects.get_main_images_by_pages(\n models.ProductPage.objects.filter(shopelectro_product__in=products)\n )\n\n return [\n (product, images.get(product.page))\n for product in products\n ]\n\n\n@set_csrf_cookie\nclass CategoryPage(catalog.CategoryPage):\n\n def get_context_data(self, **kwargs):\n \"\"\"Add sorting options and view_types in context.\"\"\"\n context = super().get_context_data(**kwargs)\n products_on_page = int(self.request.GET.get(\n 'step', get_products_count(self.request),\n ))\n page_number = int(self.request.GET.get('page', 1))\n view_type = self.request.session.get('view_type', 'tile')\n sorting = int(self.kwargs.get('sorting', 0))\n sorting_option = config.category_sorting(sorting)\n category = context['category']\n if (\n page_number < 1 or\n products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS\n ):\n raise Http404('Page does not exist.')\n\n all_products = (\n models.Product.objects\n .prefetch_related('page__images')\n .select_related('page')\n .get_by_category(category, ordering=(sorting_option, ))\n )\n\n group_tags_pairs = (\n models.Tag.objects\n .filter(products__in=all_products)\n .get_group_tags_pairs()\n )\n\n tags = self.kwargs.get('tags')\n\n tag_titles = ''\n if tags:\n slugs = models.Tag.parse_url_tags(tags)\n tags = models.Tag.objects.filter(slug__in=slugs)\n\n all_products = (\n all_products\n .filter(tags__in=tags)\n # Use distinct because filtering by QuerySet tags,\n # that related with products by many-to-many relation.\n .distinct(sorting_option.lstrip('-'))\n )\n\n tag_titles = models.serialize_tags_to_title(tags)\n\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n page = context['page']\n page.get_template_render_context = partial(\n template_context, page, tag_titles, tags)\n\n paginated_page = Paginator(all_products, products_on_page).page(page_number)\n total_products = all_products.count()\n products = paginated_page.object_list\n if not products:\n raise Http404('Page without products does not exist.')\n\n return {\n **context,\n 'product_image_pairs': merge_products_and_images(products),\n 'group_tags_pairs': group_tags_pairs,\n 'total_products': total_products,\n 'products_count': (page_number - 1) * products_on_page + products.count(),\n 'paginated_page': paginated_page,\n 'sorting_options': config.category_sorting(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': sorting,\n 'tags': tags,\n 'view_type': view_type,\n 'skip_canonical': bool(tags),\n }\n\n\ndef load_more(request, category_slug, offset=0, limit=0, sorting=0, tags=None):\n \"\"\"\n Load more products of a given category.\n\n :param sorting: preferred sorting index from CATEGORY_SORTING tuple\n :param request: HttpRequest object\n :param category_slug: Slug for a given category\n :param offset: used for slicing QuerySet.\n :return: products list in html format\n \"\"\"\n products_on_page = limit or get_products_count(request)\n offset = int(offset)\n if offset < 0:\n return HttpResponseBadRequest('The offset is wrong. An offset should be greater than or equal to 0.')\n if products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS:\n return HttpResponseBadRequest(\n 'The limit number is wrong. List of available numbers:'\n f' {\", \".join(map(str, settings.CATEGORY_STEP_MULTIPLIERS))}'\n )\n # increment page number because:\n # 11 // 12 = 0, 0 // 12 = 0 but it should be the first page\n # 12 // 12 = 1, 23 // 12 = 1, but it should be the second page\n page_number = (offset // products_on_page) + 1\n category = get_object_or_404(models.CategoryPage, slug=category_slug).model\n sorting_option = config.category_sorting(int(sorting))\n\n all_products = (\n models.Product.objects\n .prefetch_related('page__images')\n .select_related('page')\n .get_by_category(category, ordering=(sorting_option,))\n )\n\n if tags:\n tag_entities = models.Tag.objects.filter(\n slug__in=models.Tag.parse_url_tags(tags)\n )\n\n all_products = (\n all_products\n .filter(tags__in=tag_entities)\n # Use distinct because filtering by QuerySet tags,\n # that related with products by many-to-many relation.\n .distinct(sorting_option.lstrip('-'))\n )\n\n paginated_page = Paginator(all_products, products_on_page).page(page_number)\n products = paginated_page.object_list\n view = request.session.get('view_type', 'tile')\n\n return render(request, 'catalog/category_products.html', {\n 'product_image_pairs': merge_products_and_images(products),\n 'paginated_page': paginated_page,\n 'view_type': view,\n 'prods': products_on_page,\n })\n\n\n@require_POST\ndef save_feedback(request):\n def get_keys_from_post(*args):\n return {arg: request.POST.get(arg, '') for arg in args}\n\n product_id = request.POST.get('id')\n product = models.Product.objects.filter(id=product_id).first()\n if not (product_id and product):\n return HttpResponse(status=422)\n\n fields = ['rating', 'name', 'dignities', 'limitations', 'general']\n feedback_data = get_keys_from_post(*fields)\n\n models.ProductFeedback.objects.create(product=product, **feedback_data)\n return HttpResponse('ok')\n\n\n@require_POST\ndef delete_feedback(request):\n if not request.user.is_authenticated:\n return HttpResponseForbidden('Not today, sly guy...')\n\n feedback_id = request.POST.get('id')\n feedback = models.ProductFeedback.objects.filter(id=feedback_id).first()\n if not (feedback_id and feedback):\n return HttpResponse(status=422)\n\n feedback.delete()\n return HttpResponse('Feedback with id={} was deleted.'.format(feedback_id))\n\n\nclass ProductsWithoutImages(catalog.ProductsWithoutImages):\n model = models.Product\n\n\nclass ProductsWithoutText(catalog.ProductsWithoutText):\n model = models.Product\n", "path": "shopelectro/views/catalog.py"}], "after_files": [{"content": "from functools import partial\n\nfrom django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom django_user_agents.utils import get_user_agent\n\nfrom catalog.views import catalog\nfrom images.models import Image\nfrom pages import views as pages_views\n\nfrom shopelectro import config\nfrom shopelectro import models\nfrom shopelectro.views.helpers import set_csrf_cookie\n\nPRODUCTS_ON_PAGE_PC = 48\nPRODUCTS_ON_PAGE_MOB = 12\n\n\ndef get_products_count(request):\n \"\"\"Calculate max products list size from request. List size depends on device type.\"\"\"\n mobile_view = get_user_agent(request).is_mobile\n return PRODUCTS_ON_PAGE_MOB if mobile_view else PRODUCTS_ON_PAGE_PC\n\n\n# CATALOG VIEWS\nclass CategoryTree(catalog.CategoryTree):\n category_model = models.Category\n\n\n@set_csrf_cookie\nclass ProductPage(catalog.ProductPage):\n pk_url_kwarg = None\n slug_url_kwarg = 'product_vendor_code'\n slug_field = 'vendor_code'\n\n queryset = (\n models.Product.objects\n .filter(category__isnull=False)\n .prefetch_related('product_feedbacks', 'page__images')\n .select_related('page')\n )\n\n def get_context_data(self, **kwargs):\n context = super(ProductPage, self).get_context_data(**kwargs)\n\n group_tags_pairs = (\n models.Tag.objects\n .filter(products=self.object)\n .get_group_tags_pairs()\n )\n\n return {\n **context,\n 'price_bounds': config.PRICE_BOUNDS,\n 'group_tags_pairs': group_tags_pairs\n }\n\n\n# SHOPELECTRO-SPECIFIC VIEWS\n@set_csrf_cookie\nclass IndexPage(pages_views.CustomPageView):\n\n def get_context_data(self, **kwargs):\n \"\"\"Extended method. Add product's images to context.\"\"\"\n context = super(IndexPage, self).get_context_data(**kwargs)\n mobile_view = get_user_agent(self.request).is_mobile\n\n top_products = (\n models.Product.objects\n .filter(id__in=settings.TOP_PRODUCTS)\n .prefetch_related('category')\n .select_related('page')\n )\n\n images = Image.objects.get_main_images_by_pages(\n models.ProductPage.objects.filter(\n shopelectro_product__in=top_products\n )\n )\n\n categories = models.Category.objects.get_root_categories_by_products(\n top_products)\n\n prepared_top_products = []\n if not mobile_view:\n prepared_top_products = [\n (product, images.get(product.page), categories.get(product))\n for product in top_products\n ]\n\n return {\n **context,\n 'category_tile': config.MAIN_PAGE_TILE,\n 'prepared_top_products': prepared_top_products,\n }\n\n\ndef merge_products_and_images(products):\n images = Image.objects.get_main_images_by_pages(\n models.ProductPage.objects.filter(shopelectro_product__in=products)\n )\n\n return [\n (product, images.get(product.page))\n for product in products\n ]\n\n\n@set_csrf_cookie\nclass CategoryPage(catalog.CategoryPage):\n\n def get_context_data(self, **kwargs):\n \"\"\"Add sorting options and view_types in context.\"\"\"\n context = super().get_context_data(**kwargs)\n products_on_page = int(self.request.GET.get(\n 'step', get_products_count(self.request),\n ))\n page_number = int(self.request.GET.get('page', 1))\n view_type = self.request.session.get('view_type', 'tile')\n sorting = int(self.kwargs.get('sorting', 0))\n sorting_option = config.category_sorting(sorting)\n category = context['category']\n if (\n page_number < 1 or\n products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS\n ):\n raise Http404('Page does not exist.')\n\n all_products = (\n models.Product.objects\n .prefetch_related('page__images')\n .select_related('page')\n .get_by_category(category, ordering=(sorting_option, ))\n )\n\n group_tags_pairs = (\n models.Tag.objects\n .filter(products__in=all_products)\n .get_group_tags_pairs()\n )\n\n tags = self.kwargs.get('tags')\n\n tag_titles = ''\n if tags:\n slugs = models.Tag.parse_url_tags(tags)\n tags = models.Tag.objects.filter(slug__in=slugs)\n\n all_products = (\n all_products\n .filter(tags__in=tags)\n # Use distinct because filtering by QuerySet tags,\n # that related with products by many-to-many relation.\n .distinct(sorting_option.lstrip('-'))\n )\n\n tag_titles = models.serialize_tags_to_title(tags)\n\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n page = context['page']\n page.get_template_render_context = partial(\n template_context, page, tag_titles, tags)\n\n paginated_page = Paginator(all_products, products_on_page).page(page_number)\n total_products = all_products.count()\n products = paginated_page.object_list\n if not products:\n raise Http404('Page without products does not exist.')\n\n return {\n **context,\n 'product_image_pairs': merge_products_and_images(products),\n 'group_tags_pairs': group_tags_pairs,\n 'total_products': total_products,\n 'products_count': (page_number - 1) * products_on_page + products.count(),\n 'paginated_page': paginated_page,\n 'sorting_options': config.category_sorting(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': sorting,\n 'tags': tags,\n 'view_type': view_type,\n 'skip_canonical': bool(tags),\n }\n\n\ndef load_more(request, category_slug, offset=0, limit=0, sorting=0, tags=None):\n \"\"\"\n Load more products of a given category.\n\n :param sorting: preferred sorting index from CATEGORY_SORTING tuple\n :param request: HttpRequest object\n :param category_slug: Slug for a given category\n :param offset: used for slicing QuerySet.\n :return: products list in html format\n \"\"\"\n products_on_page = limit or get_products_count(request)\n offset = int(offset)\n if offset < 0:\n return HttpResponseBadRequest(\n 'The offset is wrong. An offset should be greater than or equal to 0.'\n )\n if products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS:\n return HttpResponseBadRequest(\n 'The limit number is wrong. List of available numbers:'\n f' {\", \".join(map(str, settings.CATEGORY_STEP_MULTIPLIERS))}'\n )\n # increment page number because:\n # 11 // 12 = 0, 0 // 12 = 0 but it should be the first page\n # 12 // 12 = 1, 23 // 12 = 1, but it should be the second page\n page_number = (offset // products_on_page) + 1\n category = get_object_or_404(models.CategoryPage, slug=category_slug).model\n sorting_option = config.category_sorting(int(sorting))\n\n all_products = (\n models.Product.objects\n .prefetch_related('page__images')\n .select_related('page')\n .get_by_category(category, ordering=(sorting_option,))\n )\n\n if tags:\n tag_entities = models.Tag.objects.filter(\n slug__in=models.Tag.parse_url_tags(tags)\n )\n\n all_products = (\n all_products\n .filter(tags__in=tag_entities)\n # Use distinct because filtering by QuerySet tags,\n # that related with products by many-to-many relation.\n .distinct(sorting_option.lstrip('-'))\n )\n\n paginated_page = Paginator(all_products, products_on_page).page(page_number)\n products = paginated_page.object_list\n view = request.session.get('view_type', 'tile')\n\n return render(request, 'catalog/category_products.html', {\n 'product_image_pairs': merge_products_and_images(products),\n 'paginated_page': paginated_page,\n 'view_type': view,\n 'prods': products_on_page,\n })\n\n\n@require_POST\ndef save_feedback(request):\n def get_keys_from_post(*args):\n return {arg: request.POST.get(arg, '') for arg in args}\n\n product_id = request.POST.get('id')\n product = models.Product.objects.filter(id=product_id).first()\n if not (product_id and product):\n return HttpResponse(status=422)\n\n fields = ['rating', 'name', 'dignities', 'limitations', 'general']\n feedback_data = get_keys_from_post(*fields)\n\n models.ProductFeedback.objects.create(product=product, **feedback_data)\n return HttpResponse('ok')\n\n\n@require_POST\ndef delete_feedback(request):\n if not request.user.is_authenticated:\n return HttpResponseForbidden('Not today, sly guy...')\n\n feedback_id = request.POST.get('id')\n feedback = models.ProductFeedback.objects.filter(id=feedback_id).first()\n if not (feedback_id and feedback):\n return HttpResponse(status=422)\n\n feedback.delete()\n return HttpResponse('Feedback with id={} was deleted.'.format(feedback_id))\n\n\nclass ProductsWithoutImages(catalog.ProductsWithoutImages):\n model = models.Product\n\n\nclass ProductsWithoutText(catalog.ProductsWithoutText):\n model = models.Product\n", "path": "shopelectro/views/catalog.py"}]}
3,332
165
gh_patches_debug_316
rasdani/github-patches
git_diff
microsoft__ptvsd-806
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- listen(0) in create_server() does not allow client to connect to linux server ## Environment data - PTVSD version: 4.1.1 - OS and version: linux kernel 4.14.46 - Python version (& distribution if applicable, e.g. Anaconda): 2.7.13, 3.5.3 - Using VS Code or Visual Studio: N/A ## Actual behavior server on linux never accepts connection, i.e. `client, _ = sock.accept()` in `connect()` in socket.py never returns. This is due to the `listen(0)` call in `create_server()`. This was changed from `listen(1)` in 322f6946. Although `listen(0)` does work correctly on mac, it does not on linux. ## Expected behavior the incoming connection to be accepted ## Steps to reproduce: run: ```python ptvsd.enable_attach(address=('0.0.0.0', 9876), redirect_output=True) ptvsd.wait_for_attach() ``` then from the command line, see that `telnet localhost 9876` hangs instead of connecting. some background history is at https://bugs.python.org/issue8498 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ptvsd/socket.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. See LICENSE in the project root 3 # for license information. 4 5 from __future__ import absolute_import 6 7 from collections import namedtuple 8 import contextlib 9 import errno 10 import socket 11 try: 12 from urllib.parse import urlparse 13 except ImportError: 14 from urlparse import urlparse 15 16 17 try: 18 ConnectionError # noqa 19 BrokenPipeError # noqa 20 ConnectionResetError # noqa 21 except NameError: 22 class BrokenPipeError(Exception): 23 # EPIPE and ESHUTDOWN 24 pass 25 26 class ConnectionResetError(Exception): 27 # ECONNRESET 28 pass 29 30 31 NOT_CONNECTED = ( 32 errno.ENOTCONN, 33 errno.EBADF, 34 ) 35 36 CLOSED = ( 37 errno.EPIPE, 38 errno.ESHUTDOWN, 39 errno.ECONNRESET, 40 # Windows 41 10038, # "An operation was attempted on something that is not a socket" 42 10058, 43 ) 44 45 EOF = NOT_CONNECTED + CLOSED 46 47 48 @contextlib.contextmanager 49 def convert_eof(): 50 """A context manager to convert some socket errors into EOFError.""" 51 try: 52 yield 53 except ConnectionResetError: 54 raise EOFError 55 except BrokenPipeError: 56 raise EOFError 57 except OSError as exc: 58 if exc.errno in EOF: 59 raise EOFError 60 raise 61 62 63 class TimeoutError(socket.timeout): 64 """A socket timeout happened.""" 65 66 67 def is_socket(sock): 68 """Return True if the object can be used as a socket.""" 69 return isinstance(sock, socket.socket) 70 71 72 def create_server(host, port): 73 """Return a local server socket listening on the given port.""" 74 if host is None: 75 host = 'localhost' 76 server = _new_sock() 77 server.bind((host, port)) 78 server.listen(0) 79 return server 80 81 82 def create_client(): 83 """Return a client socket that may be connected to a remote address.""" 84 return _new_sock() 85 86 87 def _new_sock(): 88 sock = socket.socket(socket.AF_INET, 89 socket.SOCK_STREAM, 90 socket.IPPROTO_TCP) 91 sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 92 return sock 93 94 95 @contextlib.contextmanager 96 def ignored_errno(*ignored): 97 """A context manager that ignores the given errnos.""" 98 try: 99 yield 100 except OSError as exc: 101 if exc.errno not in ignored: 102 raise 103 104 105 class KeepAlive(namedtuple('KeepAlive', 'interval idle maxfails')): 106 """TCP keep-alive settings.""" 107 108 INTERVAL = 3 # seconds 109 IDLE = 1 # seconds after idle 110 MAX_FAILS = 5 111 112 @classmethod 113 def from_raw(cls, raw): 114 """Return the corresponding KeepAlive.""" 115 if raw is None: 116 return None 117 elif isinstance(raw, cls): 118 return raw 119 elif isinstance(raw, (str, int, float)): 120 return cls(raw) 121 else: 122 try: 123 raw = dict(raw) 124 except TypeError: 125 return cls(*raw) 126 else: 127 return cls(**raw) 128 129 def __new__(cls, interval=None, idle=None, maxfails=None): 130 self = super(KeepAlive, cls).__new__( 131 cls, 132 float(interval) if interval or interval == 0 else cls.INTERVAL, 133 float(idle) if idle or idle == 0 else cls.IDLE, 134 float(maxfails) if maxfails or maxfails == 0 else cls.MAX_FAILS, 135 ) 136 return self 137 138 def apply(self, sock): 139 """Set the keepalive values on the socket.""" 140 sock.setsockopt(socket.SOL_SOCKET, 141 socket.SO_KEEPALIVE, 142 1) 143 interval = self.interval 144 idle = self.idle 145 maxfails = self.maxfails 146 try: 147 if interval > 0: 148 sock.setsockopt(socket.IPPROTO_TCP, 149 socket.TCP_KEEPINTVL, 150 interval) 151 if idle > 0: 152 sock.setsockopt(socket.IPPROTO_TCP, 153 socket.TCP_KEEPIDLE, 154 idle) 155 if maxfails >= 0: 156 sock.setsockopt(socket.IPPROTO_TCP, 157 socket.TCP_KEEPCNT, 158 maxfails) 159 except AttributeError: 160 # mostly linux-only 161 pass 162 163 164 def connect(sock, addr, keepalive=None): 165 """Return the client socket for the next connection.""" 166 if addr is None: 167 if keepalive is None or keepalive is True: 168 keepalive = KeepAlive() 169 elif keepalive: 170 keepalive = KeepAlive.from_raw(keepalive) 171 client, _ = sock.accept() 172 if keepalive: 173 keepalive.apply(client) 174 return client 175 else: 176 if keepalive: 177 raise NotImplementedError 178 sock.connect(addr) 179 return sock 180 181 182 def shut_down(sock, how=socket.SHUT_RDWR, ignored=NOT_CONNECTED): 183 """Shut down the given socket.""" 184 with ignored_errno(*ignored or ()): 185 sock.shutdown(how) 186 187 188 def close_socket(sock): 189 """Shutdown and close the socket.""" 190 try: 191 shut_down(sock) 192 except Exception: 193 # TODO: Log errors? 194 pass 195 sock.close() 196 197 198 class Address(namedtuple('Address', 'host port')): 199 """An IP address to use for sockets.""" 200 201 @classmethod 202 def from_raw(cls, raw, defaultport=None): 203 """Return an address corresponding to the given data.""" 204 if isinstance(raw, cls): 205 return raw 206 elif isinstance(raw, int): 207 return cls(None, raw) 208 elif isinstance(raw, str): 209 if raw == '': 210 return cls('', defaultport) 211 parsed = urlparse(raw) 212 if not parsed.netloc: 213 if parsed.scheme: 214 raise ValueError('invalid address {!r}'.format(raw)) 215 return cls.from_raw('x://' + raw, defaultport=defaultport) 216 return cls( 217 parsed.hostname or '', 218 parsed.port if parsed.port else defaultport, 219 ) 220 elif not raw: 221 return cls(None, defaultport) 222 else: 223 try: 224 kwargs = dict(**raw) 225 except TypeError: 226 return cls(*raw) 227 else: 228 kwargs.setdefault('host', None) 229 kwargs.setdefault('port', defaultport) 230 return cls(**kwargs) 231 232 @classmethod 233 def as_server(cls, host, port): 234 """Return an address to use as a server address.""" 235 return cls(host, port, isserver=True) 236 237 @classmethod 238 def as_client(cls, host, port): 239 """Return an address to use as a server address.""" 240 return cls(host, port, isserver=False) 241 242 def __new__(cls, host, port, **kwargs): 243 if host == '*': 244 host = '' 245 isserver = kwargs.pop('isserver', None) 246 if isserver is None: 247 isserver = (host is None or host == '') 248 else: 249 isserver = bool(isserver) 250 if host is None: 251 host = 'localhost' 252 self = super(Address, cls).__new__( 253 cls, 254 str(host), 255 int(port) if port is not None else None, 256 **kwargs 257 ) 258 self._isserver = isserver 259 return self 260 261 def __init__(self, *args, **kwargs): 262 if self.port is None: 263 raise TypeError('missing port') 264 if self.port <= 0 or self.port > 65535: 265 raise ValueError('port must be positive int < 65535') 266 267 def __repr__(self): 268 orig = super(Address, self).__repr__() 269 return '{}, isserver={})'.format(orig[:-1], self._isserver) 270 271 def __eq__(self, other): 272 if not super(Address, self).__eq__(other): 273 return False 274 try: 275 other = self.from_raw(other) 276 except Exception: 277 return False 278 return self._isserver == other._isserver 279 280 @property 281 def isserver(self): 282 return self._isserver 283 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ptvsd/socket.py b/ptvsd/socket.py --- a/ptvsd/socket.py +++ b/ptvsd/socket.py @@ -75,7 +75,7 @@ host = 'localhost' server = _new_sock() server.bind((host, port)) - server.listen(0) + server.listen(1) return server
{"golden_diff": "diff --git a/ptvsd/socket.py b/ptvsd/socket.py\n--- a/ptvsd/socket.py\n+++ b/ptvsd/socket.py\n@@ -75,7 +75,7 @@\n host = 'localhost'\n server = _new_sock()\n server.bind((host, port))\n- server.listen(0)\n+ server.listen(1)\n return server\n", "issue": "listen(0) in create_server() does not allow client to connect to linux server\n## Environment data\r\n\r\n- PTVSD version: 4.1.1\r\n- OS and version: linux kernel 4.14.46\r\n- Python version (& distribution if applicable, e.g. Anaconda): 2.7.13, 3.5.3\r\n- Using VS Code or Visual Studio: N/A\r\n\r\n## Actual behavior\r\nserver on linux never accepts connection, i.e. `client, _ = sock.accept()` in `connect()` in socket.py never returns. This is due to the `listen(0)` call in `create_server()`. This was changed from `listen(1)` in 322f6946. Although `listen(0)` does work correctly on mac, it does not on linux.\r\n\r\n## Expected behavior\r\nthe incoming connection to be accepted\r\n\r\n## Steps to reproduce:\r\nrun:\r\n```python\r\nptvsd.enable_attach(address=('0.0.0.0', 9876), redirect_output=True)\r\nptvsd.wait_for_attach()\r\n```\r\nthen from the command line, see that `telnet localhost 9876` hangs instead of connecting. some background history is at https://bugs.python.org/issue8498\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nimport contextlib\nimport errno\nimport socket\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\n\ntry:\n ConnectionError # noqa\n BrokenPipeError # noqa\n ConnectionResetError # noqa\nexcept NameError:\n class BrokenPipeError(Exception):\n # EPIPE and ESHUTDOWN\n pass\n\n class ConnectionResetError(Exception):\n # ECONNRESET\n pass\n\n\nNOT_CONNECTED = (\n errno.ENOTCONN,\n errno.EBADF,\n)\n\nCLOSED = (\n errno.EPIPE,\n errno.ESHUTDOWN,\n errno.ECONNRESET,\n # Windows\n 10038, # \"An operation was attempted on something that is not a socket\"\n 10058,\n)\n\nEOF = NOT_CONNECTED + CLOSED\n\n\[email protected]\ndef convert_eof():\n \"\"\"A context manager to convert some socket errors into EOFError.\"\"\"\n try:\n yield\n except ConnectionResetError:\n raise EOFError\n except BrokenPipeError:\n raise EOFError\n except OSError as exc:\n if exc.errno in EOF:\n raise EOFError\n raise\n\n\nclass TimeoutError(socket.timeout):\n \"\"\"A socket timeout happened.\"\"\"\n\n\ndef is_socket(sock):\n \"\"\"Return True if the object can be used as a socket.\"\"\"\n return isinstance(sock, socket.socket)\n\n\ndef create_server(host, port):\n \"\"\"Return a local server socket listening on the given port.\"\"\"\n if host is None:\n host = 'localhost'\n server = _new_sock()\n server.bind((host, port))\n server.listen(0)\n return server\n\n\ndef create_client():\n \"\"\"Return a client socket that may be connected to a remote address.\"\"\"\n return _new_sock()\n\n\ndef _new_sock():\n sock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM,\n socket.IPPROTO_TCP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return sock\n\n\[email protected]\ndef ignored_errno(*ignored):\n \"\"\"A context manager that ignores the given errnos.\"\"\"\n try:\n yield\n except OSError as exc:\n if exc.errno not in ignored:\n raise\n\n\nclass KeepAlive(namedtuple('KeepAlive', 'interval idle maxfails')):\n \"\"\"TCP keep-alive settings.\"\"\"\n\n INTERVAL = 3 # seconds\n IDLE = 1 # seconds after idle\n MAX_FAILS = 5\n\n @classmethod\n def from_raw(cls, raw):\n \"\"\"Return the corresponding KeepAlive.\"\"\"\n if raw is None:\n return None\n elif isinstance(raw, cls):\n return raw\n elif isinstance(raw, (str, int, float)):\n return cls(raw)\n else:\n try:\n raw = dict(raw)\n except TypeError:\n return cls(*raw)\n else:\n return cls(**raw)\n\n def __new__(cls, interval=None, idle=None, maxfails=None):\n self = super(KeepAlive, cls).__new__(\n cls,\n float(interval) if interval or interval == 0 else cls.INTERVAL,\n float(idle) if idle or idle == 0 else cls.IDLE,\n float(maxfails) if maxfails or maxfails == 0 else cls.MAX_FAILS,\n )\n return self\n\n def apply(self, sock):\n \"\"\"Set the keepalive values on the socket.\"\"\"\n sock.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE,\n 1)\n interval = self.interval\n idle = self.idle\n maxfails = self.maxfails\n try:\n if interval > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPINTVL,\n interval)\n if idle > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE,\n idle)\n if maxfails >= 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPCNT,\n maxfails)\n except AttributeError:\n # mostly linux-only\n pass\n\n\ndef connect(sock, addr, keepalive=None):\n \"\"\"Return the client socket for the next connection.\"\"\"\n if addr is None:\n if keepalive is None or keepalive is True:\n keepalive = KeepAlive()\n elif keepalive:\n keepalive = KeepAlive.from_raw(keepalive)\n client, _ = sock.accept()\n if keepalive:\n keepalive.apply(client)\n return client\n else:\n if keepalive:\n raise NotImplementedError\n sock.connect(addr)\n return sock\n\n\ndef shut_down(sock, how=socket.SHUT_RDWR, ignored=NOT_CONNECTED):\n \"\"\"Shut down the given socket.\"\"\"\n with ignored_errno(*ignored or ()):\n sock.shutdown(how)\n\n\ndef close_socket(sock):\n \"\"\"Shutdown and close the socket.\"\"\"\n try:\n shut_down(sock)\n except Exception:\n # TODO: Log errors?\n pass\n sock.close()\n\n\nclass Address(namedtuple('Address', 'host port')):\n \"\"\"An IP address to use for sockets.\"\"\"\n\n @classmethod\n def from_raw(cls, raw, defaultport=None):\n \"\"\"Return an address corresponding to the given data.\"\"\"\n if isinstance(raw, cls):\n return raw\n elif isinstance(raw, int):\n return cls(None, raw)\n elif isinstance(raw, str):\n if raw == '':\n return cls('', defaultport)\n parsed = urlparse(raw)\n if not parsed.netloc:\n if parsed.scheme:\n raise ValueError('invalid address {!r}'.format(raw))\n return cls.from_raw('x://' + raw, defaultport=defaultport)\n return cls(\n parsed.hostname or '',\n parsed.port if parsed.port else defaultport,\n )\n elif not raw:\n return cls(None, defaultport)\n else:\n try:\n kwargs = dict(**raw)\n except TypeError:\n return cls(*raw)\n else:\n kwargs.setdefault('host', None)\n kwargs.setdefault('port', defaultport)\n return cls(**kwargs)\n\n @classmethod\n def as_server(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=True)\n\n @classmethod\n def as_client(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=False)\n\n def __new__(cls, host, port, **kwargs):\n if host == '*':\n host = ''\n isserver = kwargs.pop('isserver', None)\n if isserver is None:\n isserver = (host is None or host == '')\n else:\n isserver = bool(isserver)\n if host is None:\n host = 'localhost'\n self = super(Address, cls).__new__(\n cls,\n str(host),\n int(port) if port is not None else None,\n **kwargs\n )\n self._isserver = isserver\n return self\n\n def __init__(self, *args, **kwargs):\n if self.port is None:\n raise TypeError('missing port')\n if self.port <= 0 or self.port > 65535:\n raise ValueError('port must be positive int < 65535')\n\n def __repr__(self):\n orig = super(Address, self).__repr__()\n return '{}, isserver={})'.format(orig[:-1], self._isserver)\n\n def __eq__(self, other):\n if not super(Address, self).__eq__(other):\n return False\n try:\n other = self.from_raw(other)\n except Exception:\n return False\n return self._isserver == other._isserver\n\n @property\n def isserver(self):\n return self._isserver\n", "path": "ptvsd/socket.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nfrom __future__ import absolute_import\n\nfrom collections import namedtuple\nimport contextlib\nimport errno\nimport socket\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\n\ntry:\n ConnectionError # noqa\n BrokenPipeError # noqa\n ConnectionResetError # noqa\nexcept NameError:\n class BrokenPipeError(Exception):\n # EPIPE and ESHUTDOWN\n pass\n\n class ConnectionResetError(Exception):\n # ECONNRESET\n pass\n\n\nNOT_CONNECTED = (\n errno.ENOTCONN,\n errno.EBADF,\n)\n\nCLOSED = (\n errno.EPIPE,\n errno.ESHUTDOWN,\n errno.ECONNRESET,\n # Windows\n 10038, # \"An operation was attempted on something that is not a socket\"\n 10058,\n)\n\nEOF = NOT_CONNECTED + CLOSED\n\n\[email protected]\ndef convert_eof():\n \"\"\"A context manager to convert some socket errors into EOFError.\"\"\"\n try:\n yield\n except ConnectionResetError:\n raise EOFError\n except BrokenPipeError:\n raise EOFError\n except OSError as exc:\n if exc.errno in EOF:\n raise EOFError\n raise\n\n\nclass TimeoutError(socket.timeout):\n \"\"\"A socket timeout happened.\"\"\"\n\n\ndef is_socket(sock):\n \"\"\"Return True if the object can be used as a socket.\"\"\"\n return isinstance(sock, socket.socket)\n\n\ndef create_server(host, port):\n \"\"\"Return a local server socket listening on the given port.\"\"\"\n if host is None:\n host = 'localhost'\n server = _new_sock()\n server.bind((host, port))\n server.listen(1)\n return server\n\n\ndef create_client():\n \"\"\"Return a client socket that may be connected to a remote address.\"\"\"\n return _new_sock()\n\n\ndef _new_sock():\n sock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM,\n socket.IPPROTO_TCP)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n return sock\n\n\[email protected]\ndef ignored_errno(*ignored):\n \"\"\"A context manager that ignores the given errnos.\"\"\"\n try:\n yield\n except OSError as exc:\n if exc.errno not in ignored:\n raise\n\n\nclass KeepAlive(namedtuple('KeepAlive', 'interval idle maxfails')):\n \"\"\"TCP keep-alive settings.\"\"\"\n\n INTERVAL = 3 # seconds\n IDLE = 1 # seconds after idle\n MAX_FAILS = 5\n\n @classmethod\n def from_raw(cls, raw):\n \"\"\"Return the corresponding KeepAlive.\"\"\"\n if raw is None:\n return None\n elif isinstance(raw, cls):\n return raw\n elif isinstance(raw, (str, int, float)):\n return cls(raw)\n else:\n try:\n raw = dict(raw)\n except TypeError:\n return cls(*raw)\n else:\n return cls(**raw)\n\n def __new__(cls, interval=None, idle=None, maxfails=None):\n self = super(KeepAlive, cls).__new__(\n cls,\n float(interval) if interval or interval == 0 else cls.INTERVAL,\n float(idle) if idle or idle == 0 else cls.IDLE,\n float(maxfails) if maxfails or maxfails == 0 else cls.MAX_FAILS,\n )\n return self\n\n def apply(self, sock):\n \"\"\"Set the keepalive values on the socket.\"\"\"\n sock.setsockopt(socket.SOL_SOCKET,\n socket.SO_KEEPALIVE,\n 1)\n interval = self.interval\n idle = self.idle\n maxfails = self.maxfails\n try:\n if interval > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPINTVL,\n interval)\n if idle > 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPIDLE,\n idle)\n if maxfails >= 0:\n sock.setsockopt(socket.IPPROTO_TCP,\n socket.TCP_KEEPCNT,\n maxfails)\n except AttributeError:\n # mostly linux-only\n pass\n\n\ndef connect(sock, addr, keepalive=None):\n \"\"\"Return the client socket for the next connection.\"\"\"\n if addr is None:\n if keepalive is None or keepalive is True:\n keepalive = KeepAlive()\n elif keepalive:\n keepalive = KeepAlive.from_raw(keepalive)\n client, _ = sock.accept()\n if keepalive:\n keepalive.apply(client)\n return client\n else:\n if keepalive:\n raise NotImplementedError\n sock.connect(addr)\n return sock\n\n\ndef shut_down(sock, how=socket.SHUT_RDWR, ignored=NOT_CONNECTED):\n \"\"\"Shut down the given socket.\"\"\"\n with ignored_errno(*ignored or ()):\n sock.shutdown(how)\n\n\ndef close_socket(sock):\n \"\"\"Shutdown and close the socket.\"\"\"\n try:\n shut_down(sock)\n except Exception:\n # TODO: Log errors?\n pass\n sock.close()\n\n\nclass Address(namedtuple('Address', 'host port')):\n \"\"\"An IP address to use for sockets.\"\"\"\n\n @classmethod\n def from_raw(cls, raw, defaultport=None):\n \"\"\"Return an address corresponding to the given data.\"\"\"\n if isinstance(raw, cls):\n return raw\n elif isinstance(raw, int):\n return cls(None, raw)\n elif isinstance(raw, str):\n if raw == '':\n return cls('', defaultport)\n parsed = urlparse(raw)\n if not parsed.netloc:\n if parsed.scheme:\n raise ValueError('invalid address {!r}'.format(raw))\n return cls.from_raw('x://' + raw, defaultport=defaultport)\n return cls(\n parsed.hostname or '',\n parsed.port if parsed.port else defaultport,\n )\n elif not raw:\n return cls(None, defaultport)\n else:\n try:\n kwargs = dict(**raw)\n except TypeError:\n return cls(*raw)\n else:\n kwargs.setdefault('host', None)\n kwargs.setdefault('port', defaultport)\n return cls(**kwargs)\n\n @classmethod\n def as_server(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=True)\n\n @classmethod\n def as_client(cls, host, port):\n \"\"\"Return an address to use as a server address.\"\"\"\n return cls(host, port, isserver=False)\n\n def __new__(cls, host, port, **kwargs):\n if host == '*':\n host = ''\n isserver = kwargs.pop('isserver', None)\n if isserver is None:\n isserver = (host is None or host == '')\n else:\n isserver = bool(isserver)\n if host is None:\n host = 'localhost'\n self = super(Address, cls).__new__(\n cls,\n str(host),\n int(port) if port is not None else None,\n **kwargs\n )\n self._isserver = isserver\n return self\n\n def __init__(self, *args, **kwargs):\n if self.port is None:\n raise TypeError('missing port')\n if self.port <= 0 or self.port > 65535:\n raise ValueError('port must be positive int < 65535')\n\n def __repr__(self):\n orig = super(Address, self).__repr__()\n return '{}, isserver={})'.format(orig[:-1], self._isserver)\n\n def __eq__(self, other):\n if not super(Address, self).__eq__(other):\n return False\n try:\n other = self.from_raw(other)\n except Exception:\n return False\n return self._isserver == other._isserver\n\n @property\n def isserver(self):\n return self._isserver\n", "path": "ptvsd/socket.py"}]}
3,006
87
gh_patches_debug_20286
rasdani/github-patches
git_diff
mlflow__mlflow-10751
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Refactor for `subprocess.run` ### Summary ```diff diff --git a/mlflow/models/wheeled_model.py b/mlflow/models/wheeled_model.py index 0830208fe8..f64bf4194c 100644 --- a/mlflow/models/wheeled_model.py +++ b/mlflow/models/wheeled_model.py @@ -211,14 +211,23 @@ class WheeledModel: os.makedirs(dst_path) pip_wheel_options = MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS.get() - - download_command = ( - f"{sys.executable} -m pip wheel {pip_wheel_options} --wheel-dir={dst_path} -r" - f"{pip_requirements_path} --no-cache-dir" - ) - try: - subprocess.run([download_command], check=True, shell=True, capture_output=True) + subprocess.run( + [ + sys.executable, + "-m", + "pip", + "wheel", + pip_wheel_options, + "--wheel-dir", + dst_path, + "-r", + pip_requirements_path, + "--no-cache-dir", + ], + check=True, + capture_output=True, + ) except subprocess.CalledProcessError as e: raise MlflowException( f"An error occurred while downloading the dependency wheels: {e.stderr}" ``` ### Notes - Make sure to open a PR from a **non-master** branch. - Sign off the commit using the `-s` flag when making a commit: ```sh git commit -s -m "..." # ^^ make sure to use this ``` - Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mlflow/models/wheeled_model.py` Content: ``` 1 import os 2 import platform 3 import shutil 4 import subprocess 5 import sys 6 7 import yaml 8 9 import mlflow 10 from mlflow import MlflowClient 11 from mlflow.environment_variables import MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS 12 from mlflow.exceptions import MlflowException 13 from mlflow.protos.databricks_pb2 import BAD_REQUEST 14 from mlflow.pyfunc.model import MLMODEL_FILE_NAME, Model 15 from mlflow.store.artifact.utils.models import _parse_model_uri, get_model_name_and_version 16 from mlflow.tracking.artifact_utils import _download_artifact_from_uri 17 from mlflow.utils.annotations import experimental 18 from mlflow.utils.environment import ( 19 _REQUIREMENTS_FILE_NAME, 20 _get_pip_deps, 21 _mlflow_additional_pip_env, 22 _overwrite_pip_deps, 23 ) 24 from mlflow.utils.model_utils import _validate_and_prepare_target_save_path 25 from mlflow.utils.uri import get_databricks_profile_uri_from_artifact_uri 26 27 _WHEELS_FOLDER_NAME = "wheels" 28 _ORIGINAL_REQ_FILE_NAME = "original_requirements.txt" 29 _PLATFORM = "platform" 30 31 32 @experimental 33 class WheeledModel: 34 """ 35 Helper class to create a model with added dependency wheels from an existing registered model. 36 The `wheeled` model contains all the model dependencies as wheels stored as model artifacts. 37 .. note:: 38 This utility only operates on a model that has been registered to the Model Registry. 39 """ 40 41 def __init__(self, model_uri): 42 self._model_uri = model_uri 43 databricks_profile_uri = ( 44 get_databricks_profile_uri_from_artifact_uri(model_uri) or mlflow.get_registry_uri() 45 ) 46 client = MlflowClient(registry_uri=databricks_profile_uri) 47 self._model_name, _ = get_model_name_and_version(client, model_uri) 48 49 @classmethod 50 def log_model(cls, model_uri, registered_model_name=None): 51 """ 52 Logs a registered model as an MLflow artifact for the current run. This only operates on 53 a model which has been registered to the Model Registry. Given a registered model_uri ( 54 e.g. models:/<model_name>/<model_version>), this utility re-logs the model along with all 55 the required model libraries back to the Model Registry. The required model libraries are 56 stored along with the model as model artifacts. In addition, supporting files to the 57 model (e.g. conda.yaml, requirements.txt) are modified to use the added libraries. 58 59 By default, this utility creates a new model version under the same registered model 60 specified by ``model_uri``. This behavior can be overridden by specifying the 61 ``registered_model_name`` argument. 62 63 :param model_uri: A registered model uri in the Model Registry of the form 64 models:/<model_name>/<model_version/stage/latest> 65 :param registered_model_name: The new model version (model with its libraries) is 66 registered under the inputted registered_model_name. If None, 67 a new version is logged to the existing model in the Model 68 Registry. 69 70 .. code-block:: python 71 :caption: Example 72 73 # Given a model uri, log the wheeled model 74 with mlflow.start_run(): 75 WheeledModel.log_model(model_uri) 76 """ 77 parsed_uri = _parse_model_uri(model_uri) 78 return Model.log( 79 artifact_path=None, 80 flavor=WheeledModel(model_uri), 81 registered_model_name=registered_model_name or parsed_uri.name, 82 ) 83 84 def save_model(self, path, mlflow_model=None): 85 """ 86 Given an existing registered model, saves the model along with it's dependencies stored as 87 wheels to a path on the local file system. 88 89 This does not modify existing model behavior or existing model flavors. It simply downloads 90 the model dependencies as wheels and modifies the requirements.txt and conda.yaml file to 91 point to the downloaded wheels. 92 93 The download_command defaults to downloading only binary packages using the 94 `--only-binary=:all:` option. This behavior can be overridden using an environment 95 variable `MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS`, which will allows setting 96 different options such as `--prefer-binary`, `--no-binary`, etc. 97 :param path: Local path where the model is to be saved. 98 :param mlflow_model: The new :py:mod:`mlflow.models.Model` metadata file to store the 99 updated model metadata. 100 """ 101 from mlflow.pyfunc import ENV, FLAVOR_NAME, _extract_conda_env 102 103 path = os.path.abspath(path) 104 _validate_and_prepare_target_save_path(path) 105 106 local_model_path = _download_artifact_from_uri(self._model_uri, output_path=path) 107 108 wheels_dir = os.path.join(local_model_path, _WHEELS_FOLDER_NAME) 109 pip_requirements_path = os.path.join(local_model_path, _REQUIREMENTS_FILE_NAME) 110 model_metadata_path = os.path.join(local_model_path, MLMODEL_FILE_NAME) 111 112 model_metadata = Model.load(model_metadata_path) 113 114 # Check if the model file has `wheels` set to True 115 if model_metadata.__dict__.get(_WHEELS_FOLDER_NAME, None) is not None: 116 raise MlflowException("Model libraries are already added", BAD_REQUEST) 117 118 conda_env = _extract_conda_env(model_metadata.flavors.get(FLAVOR_NAME, {}).get(ENV, None)) 119 conda_env_path = os.path.join(local_model_path, conda_env) 120 if conda_env is None and not os.path.isfile(pip_requirements_path): 121 raise MlflowException( 122 "Cannot add libraries for model with no logged dependencies.", BAD_REQUEST 123 ) 124 125 if not os.path.isfile(pip_requirements_path): 126 self._create_pip_requirement(conda_env_path, pip_requirements_path) 127 128 WheeledModel._download_wheels( 129 pip_requirements_path=pip_requirements_path, dst_path=wheels_dir 130 ) 131 132 # Keep a copy of the original requirement.txt 133 shutil.copy2(pip_requirements_path, os.path.join(local_model_path, _ORIGINAL_REQ_FILE_NAME)) 134 135 # Update requirements.txt with wheels 136 pip_deps = self._overwrite_pip_requirements_with_wheels( 137 pip_requirements_path=pip_requirements_path, wheels_dir=wheels_dir 138 ) 139 140 # Update conda.yaml with wheels 141 self._update_conda_env(pip_deps, conda_env_path) 142 143 # Update MLModel File 144 mlflow_model = self._update_mlflow_model( 145 original_model_metadata=model_metadata, mlflow_model=mlflow_model 146 ) 147 mlflow_model.save(model_metadata_path) 148 return mlflow_model 149 150 def _update_conda_env(self, new_pip_deps, conda_env_path): 151 """ 152 Updates the list pip packages in the conda.yaml file to the list of wheels in the wheels 153 directory. 154 { 155 "name": "env", 156 "channels": [...], 157 "dependencies": [ 158 ..., 159 "pip", 160 {"pip": [...]}, <- Overwrite this with list of wheels 161 ], 162 } 163 :param new_pip_deps: List of pip dependencies as wheels 164 :param conda_env_path: Path to conda.yaml file in the model directory 165 """ 166 with open(conda_env_path) as f: 167 conda_env = yaml.safe_load(f) 168 169 new_conda_env = _overwrite_pip_deps(conda_env, new_pip_deps) 170 171 with open(conda_env_path, "w") as out: 172 yaml.safe_dump(new_conda_env, stream=out, default_flow_style=False) 173 174 def _update_mlflow_model(self, original_model_metadata, mlflow_model): 175 """ 176 Modifies the MLModel file to reflect updated information such as the run_id, 177 utc_time_created. Additionally, this also adds `wheels` to the MLModel file to indicate that 178 this is a `wheeled` model. 179 :param original_model_file_path: The model metadata stored in the original MLmodel file. 180 :param mlflow_model: :py:mod:`mlflow.models.Model` configuration of the newly created 181 wheeled model 182 """ 183 184 run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id 185 if mlflow_model is None: 186 mlflow_model = Model(run_id=run_id) 187 188 original_model_metadata.__dict__.update( 189 {k: v for k, v in mlflow_model.__dict__.items() if v} 190 ) 191 mlflow_model.__dict__.update(original_model_metadata.__dict__) 192 mlflow_model.artifact_path = WheeledModel.get_wheel_artifact_path( 193 mlflow_model.artifact_path 194 ) 195 196 mlflow_model.wheels = {_PLATFORM: platform.platform()} 197 return mlflow_model 198 199 @classmethod 200 def _download_wheels(cls, pip_requirements_path, dst_path): 201 """ 202 Downloads all the wheels of the dependencies specified in the requirements.txt file. 203 The pip wheel download_command defaults to downloading only binary packages using 204 the `--only-binary=:all:` option. This behavior can be overridden using an 205 environment variable `MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS`, which will allows 206 setting different options such as `--prefer-binary`, `--no-binary`, etc. 207 :param pip_requirements_path: Path to requirements.txt in the model directory 208 :param dst_path: Path to the directory where the wheels are to be downloaded 209 """ 210 if not os.path.exists(dst_path): 211 os.makedirs(dst_path) 212 213 pip_wheel_options = MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS.get() 214 215 download_command = ( 216 f"{sys.executable} -m pip wheel {pip_wheel_options} --wheel-dir={dst_path} -r" 217 f"{pip_requirements_path} --no-cache-dir" 218 ) 219 220 try: 221 subprocess.run([download_command], check=True, shell=True, capture_output=True) 222 except subprocess.CalledProcessError as e: 223 raise MlflowException( 224 f"An error occurred while downloading the dependency wheels: {e.stderr}" 225 ) 226 227 def _overwrite_pip_requirements_with_wheels(self, pip_requirements_path, wheels_dir): 228 """ 229 Overwrites the requirements.txt with the wheels of the required dependencies. 230 :param pip_requirements_path: Path to requirements.txt in the model directory 231 :param wheels_dir: Path to directory where wheels are stored 232 """ 233 wheels = [] 234 with open(pip_requirements_path, "w") as wheels_requirements: 235 for wheel_file in os.listdir(wheels_dir): 236 if wheel_file.endswith(".whl"): 237 complete_wheel_file = os.path.join(_WHEELS_FOLDER_NAME, wheel_file) 238 wheels.append(complete_wheel_file) 239 wheels_requirements.write(complete_wheel_file + "\n") 240 return wheels 241 242 def _create_pip_requirement(self, conda_env_path, pip_requirements_path): 243 """ 244 This method creates a requirements.txt file for the model dependencies if the file does not 245 already exist. If uses the pip dependencies found in the conda.yaml env file. 246 :param conda_env_path: Path to conda.yaml env file which contains the required pip 247 dependencies 248 :param pip_requirements_path: Path where the new requirements.txt will be created. 249 """ 250 with open(conda_env_path) as f: 251 conda_env = yaml.safe_load(f) 252 pip_deps = _get_pip_deps(conda_env) 253 _mlflow_additional_pip_env(pip_deps, pip_requirements_path) 254 255 @classmethod 256 def get_wheel_artifact_path(cls, original_artifact_path): 257 return original_artifact_path + "_" + _WHEELS_FOLDER_NAME 258 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mlflow/models/wheeled_model.py b/mlflow/models/wheeled_model.py --- a/mlflow/models/wheeled_model.py +++ b/mlflow/models/wheeled_model.py @@ -212,13 +212,23 @@ pip_wheel_options = MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS.get() - download_command = ( - f"{sys.executable} -m pip wheel {pip_wheel_options} --wheel-dir={dst_path} -r" - f"{pip_requirements_path} --no-cache-dir" - ) - try: - subprocess.run([download_command], check=True, shell=True, capture_output=True) + subprocess.run( + [ + sys.executable, + "-m", + "pip", + "wheel", + pip_wheel_options, + "--wheel-dir", + dst_path, + "-r", + pip_requirements_path, + "--no-cache-dir", + ], + check=True, + capture_output=True, + ) except subprocess.CalledProcessError as e: raise MlflowException( f"An error occurred while downloading the dependency wheels: {e.stderr}"
{"golden_diff": "diff --git a/mlflow/models/wheeled_model.py b/mlflow/models/wheeled_model.py\n--- a/mlflow/models/wheeled_model.py\n+++ b/mlflow/models/wheeled_model.py\n@@ -212,13 +212,23 @@\n \n pip_wheel_options = MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS.get()\n \n- download_command = (\n- f\"{sys.executable} -m pip wheel {pip_wheel_options} --wheel-dir={dst_path} -r\"\n- f\"{pip_requirements_path} --no-cache-dir\"\n- )\n-\n try:\n- subprocess.run([download_command], check=True, shell=True, capture_output=True)\n+ subprocess.run(\n+ [\n+ sys.executable,\n+ \"-m\",\n+ \"pip\",\n+ \"wheel\",\n+ pip_wheel_options,\n+ \"--wheel-dir\",\n+ dst_path,\n+ \"-r\",\n+ pip_requirements_path,\n+ \"--no-cache-dir\",\n+ ],\n+ check=True,\n+ capture_output=True,\n+ )\n except subprocess.CalledProcessError as e:\n raise MlflowException(\n f\"An error occurred while downloading the dependency wheels: {e.stderr}\"\n", "issue": "Refactor for `subprocess.run`\n### Summary\n\n```diff\r\ndiff --git a/mlflow/models/wheeled_model.py b/mlflow/models/wheeled_model.py\r\nindex 0830208fe8..f64bf4194c 100644\r\n--- a/mlflow/models/wheeled_model.py\r\n+++ b/mlflow/models/wheeled_model.py\r\n@@ -211,14 +211,23 @@ class WheeledModel:\r\n os.makedirs(dst_path)\r\n \r\n pip_wheel_options = MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS.get()\r\n-\r\n- download_command = (\r\n- f\"{sys.executable} -m pip wheel {pip_wheel_options} --wheel-dir={dst_path} -r\"\r\n- f\"{pip_requirements_path} --no-cache-dir\"\r\n- )\r\n-\r\n try:\r\n- subprocess.run([download_command], check=True, shell=True, capture_output=True)\r\n+ subprocess.run(\r\n+ [\r\n+ sys.executable,\r\n+ \"-m\",\r\n+ \"pip\",\r\n+ \"wheel\",\r\n+ pip_wheel_options,\r\n+ \"--wheel-dir\",\r\n+ dst_path,\r\n+ \"-r\",\r\n+ pip_requirements_path,\r\n+ \"--no-cache-dir\",\r\n+ ],\r\n+ check=True,\r\n+ capture_output=True,\r\n+ )\r\n except subprocess.CalledProcessError as e:\r\n raise MlflowException(\r\n f\"An error occurred while downloading the dependency wheels: {e.stderr}\"\r\n```\n\n### Notes\n\n- Make sure to open a PR from a **non-master** branch.\r\n- Sign off the commit using the `-s` flag when making a commit:\r\n\r\n ```sh\r\n git commit -s -m \"...\"\r\n # ^^ make sure to use this\r\n ```\r\n\r\n- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.\r\n\n", "before_files": [{"content": "import os\nimport platform\nimport shutil\nimport subprocess\nimport sys\n\nimport yaml\n\nimport mlflow\nfrom mlflow import MlflowClient\nfrom mlflow.environment_variables import MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import BAD_REQUEST\nfrom mlflow.pyfunc.model import MLMODEL_FILE_NAME, Model\nfrom mlflow.store.artifact.utils.models import _parse_model_uri, get_model_name_and_version\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.annotations import experimental\nfrom mlflow.utils.environment import (\n _REQUIREMENTS_FILE_NAME,\n _get_pip_deps,\n _mlflow_additional_pip_env,\n _overwrite_pip_deps,\n)\nfrom mlflow.utils.model_utils import _validate_and_prepare_target_save_path\nfrom mlflow.utils.uri import get_databricks_profile_uri_from_artifact_uri\n\n_WHEELS_FOLDER_NAME = \"wheels\"\n_ORIGINAL_REQ_FILE_NAME = \"original_requirements.txt\"\n_PLATFORM = \"platform\"\n\n\n@experimental\nclass WheeledModel:\n \"\"\"\n Helper class to create a model with added dependency wheels from an existing registered model.\n The `wheeled` model contains all the model dependencies as wheels stored as model artifacts.\n .. note::\n This utility only operates on a model that has been registered to the Model Registry.\n \"\"\"\n\n def __init__(self, model_uri):\n self._model_uri = model_uri\n databricks_profile_uri = (\n get_databricks_profile_uri_from_artifact_uri(model_uri) or mlflow.get_registry_uri()\n )\n client = MlflowClient(registry_uri=databricks_profile_uri)\n self._model_name, _ = get_model_name_and_version(client, model_uri)\n\n @classmethod\n def log_model(cls, model_uri, registered_model_name=None):\n \"\"\"\n Logs a registered model as an MLflow artifact for the current run. This only operates on\n a model which has been registered to the Model Registry. Given a registered model_uri (\n e.g. models:/<model_name>/<model_version>), this utility re-logs the model along with all\n the required model libraries back to the Model Registry. The required model libraries are\n stored along with the model as model artifacts. In addition, supporting files to the\n model (e.g. conda.yaml, requirements.txt) are modified to use the added libraries.\n\n By default, this utility creates a new model version under the same registered model\n specified by ``model_uri``. This behavior can be overridden by specifying the\n ``registered_model_name`` argument.\n\n :param model_uri: A registered model uri in the Model Registry of the form\n models:/<model_name>/<model_version/stage/latest>\n :param registered_model_name: The new model version (model with its libraries) is\n registered under the inputted registered_model_name. If None,\n a new version is logged to the existing model in the Model\n Registry.\n\n .. code-block:: python\n :caption: Example\n\n # Given a model uri, log the wheeled model\n with mlflow.start_run():\n WheeledModel.log_model(model_uri)\n \"\"\"\n parsed_uri = _parse_model_uri(model_uri)\n return Model.log(\n artifact_path=None,\n flavor=WheeledModel(model_uri),\n registered_model_name=registered_model_name or parsed_uri.name,\n )\n\n def save_model(self, path, mlflow_model=None):\n \"\"\"\n Given an existing registered model, saves the model along with it's dependencies stored as\n wheels to a path on the local file system.\n\n This does not modify existing model behavior or existing model flavors. It simply downloads\n the model dependencies as wheels and modifies the requirements.txt and conda.yaml file to\n point to the downloaded wheels.\n\n The download_command defaults to downloading only binary packages using the\n `--only-binary=:all:` option. This behavior can be overridden using an environment\n variable `MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS`, which will allows setting\n different options such as `--prefer-binary`, `--no-binary`, etc.\n :param path: Local path where the model is to be saved.\n :param mlflow_model: The new :py:mod:`mlflow.models.Model` metadata file to store the\n updated model metadata.\n \"\"\"\n from mlflow.pyfunc import ENV, FLAVOR_NAME, _extract_conda_env\n\n path = os.path.abspath(path)\n _validate_and_prepare_target_save_path(path)\n\n local_model_path = _download_artifact_from_uri(self._model_uri, output_path=path)\n\n wheels_dir = os.path.join(local_model_path, _WHEELS_FOLDER_NAME)\n pip_requirements_path = os.path.join(local_model_path, _REQUIREMENTS_FILE_NAME)\n model_metadata_path = os.path.join(local_model_path, MLMODEL_FILE_NAME)\n\n model_metadata = Model.load(model_metadata_path)\n\n # Check if the model file has `wheels` set to True\n if model_metadata.__dict__.get(_WHEELS_FOLDER_NAME, None) is not None:\n raise MlflowException(\"Model libraries are already added\", BAD_REQUEST)\n\n conda_env = _extract_conda_env(model_metadata.flavors.get(FLAVOR_NAME, {}).get(ENV, None))\n conda_env_path = os.path.join(local_model_path, conda_env)\n if conda_env is None and not os.path.isfile(pip_requirements_path):\n raise MlflowException(\n \"Cannot add libraries for model with no logged dependencies.\", BAD_REQUEST\n )\n\n if not os.path.isfile(pip_requirements_path):\n self._create_pip_requirement(conda_env_path, pip_requirements_path)\n\n WheeledModel._download_wheels(\n pip_requirements_path=pip_requirements_path, dst_path=wheels_dir\n )\n\n # Keep a copy of the original requirement.txt\n shutil.copy2(pip_requirements_path, os.path.join(local_model_path, _ORIGINAL_REQ_FILE_NAME))\n\n # Update requirements.txt with wheels\n pip_deps = self._overwrite_pip_requirements_with_wheels(\n pip_requirements_path=pip_requirements_path, wheels_dir=wheels_dir\n )\n\n # Update conda.yaml with wheels\n self._update_conda_env(pip_deps, conda_env_path)\n\n # Update MLModel File\n mlflow_model = self._update_mlflow_model(\n original_model_metadata=model_metadata, mlflow_model=mlflow_model\n )\n mlflow_model.save(model_metadata_path)\n return mlflow_model\n\n def _update_conda_env(self, new_pip_deps, conda_env_path):\n \"\"\"\n Updates the list pip packages in the conda.yaml file to the list of wheels in the wheels\n directory.\n {\n \"name\": \"env\",\n \"channels\": [...],\n \"dependencies\": [\n ...,\n \"pip\",\n {\"pip\": [...]}, <- Overwrite this with list of wheels\n ],\n }\n :param new_pip_deps: List of pip dependencies as wheels\n :param conda_env_path: Path to conda.yaml file in the model directory\n \"\"\"\n with open(conda_env_path) as f:\n conda_env = yaml.safe_load(f)\n\n new_conda_env = _overwrite_pip_deps(conda_env, new_pip_deps)\n\n with open(conda_env_path, \"w\") as out:\n yaml.safe_dump(new_conda_env, stream=out, default_flow_style=False)\n\n def _update_mlflow_model(self, original_model_metadata, mlflow_model):\n \"\"\"\n Modifies the MLModel file to reflect updated information such as the run_id,\n utc_time_created. Additionally, this also adds `wheels` to the MLModel file to indicate that\n this is a `wheeled` model.\n :param original_model_file_path: The model metadata stored in the original MLmodel file.\n :param mlflow_model: :py:mod:`mlflow.models.Model` configuration of the newly created\n wheeled model\n \"\"\"\n\n run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id\n if mlflow_model is None:\n mlflow_model = Model(run_id=run_id)\n\n original_model_metadata.__dict__.update(\n {k: v for k, v in mlflow_model.__dict__.items() if v}\n )\n mlflow_model.__dict__.update(original_model_metadata.__dict__)\n mlflow_model.artifact_path = WheeledModel.get_wheel_artifact_path(\n mlflow_model.artifact_path\n )\n\n mlflow_model.wheels = {_PLATFORM: platform.platform()}\n return mlflow_model\n\n @classmethod\n def _download_wheels(cls, pip_requirements_path, dst_path):\n \"\"\"\n Downloads all the wheels of the dependencies specified in the requirements.txt file.\n The pip wheel download_command defaults to downloading only binary packages using\n the `--only-binary=:all:` option. This behavior can be overridden using an\n environment variable `MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS`, which will allows\n setting different options such as `--prefer-binary`, `--no-binary`, etc.\n :param pip_requirements_path: Path to requirements.txt in the model directory\n :param dst_path: Path to the directory where the wheels are to be downloaded\n \"\"\"\n if not os.path.exists(dst_path):\n os.makedirs(dst_path)\n\n pip_wheel_options = MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS.get()\n\n download_command = (\n f\"{sys.executable} -m pip wheel {pip_wheel_options} --wheel-dir={dst_path} -r\"\n f\"{pip_requirements_path} --no-cache-dir\"\n )\n\n try:\n subprocess.run([download_command], check=True, shell=True, capture_output=True)\n except subprocess.CalledProcessError as e:\n raise MlflowException(\n f\"An error occurred while downloading the dependency wheels: {e.stderr}\"\n )\n\n def _overwrite_pip_requirements_with_wheels(self, pip_requirements_path, wheels_dir):\n \"\"\"\n Overwrites the requirements.txt with the wheels of the required dependencies.\n :param pip_requirements_path: Path to requirements.txt in the model directory\n :param wheels_dir: Path to directory where wheels are stored\n \"\"\"\n wheels = []\n with open(pip_requirements_path, \"w\") as wheels_requirements:\n for wheel_file in os.listdir(wheels_dir):\n if wheel_file.endswith(\".whl\"):\n complete_wheel_file = os.path.join(_WHEELS_FOLDER_NAME, wheel_file)\n wheels.append(complete_wheel_file)\n wheels_requirements.write(complete_wheel_file + \"\\n\")\n return wheels\n\n def _create_pip_requirement(self, conda_env_path, pip_requirements_path):\n \"\"\"\n This method creates a requirements.txt file for the model dependencies if the file does not\n already exist. If uses the pip dependencies found in the conda.yaml env file.\n :param conda_env_path: Path to conda.yaml env file which contains the required pip\n dependencies\n :param pip_requirements_path: Path where the new requirements.txt will be created.\n \"\"\"\n with open(conda_env_path) as f:\n conda_env = yaml.safe_load(f)\n pip_deps = _get_pip_deps(conda_env)\n _mlflow_additional_pip_env(pip_deps, pip_requirements_path)\n\n @classmethod\n def get_wheel_artifact_path(cls, original_artifact_path):\n return original_artifact_path + \"_\" + _WHEELS_FOLDER_NAME\n", "path": "mlflow/models/wheeled_model.py"}], "after_files": [{"content": "import os\nimport platform\nimport shutil\nimport subprocess\nimport sys\n\nimport yaml\n\nimport mlflow\nfrom mlflow import MlflowClient\nfrom mlflow.environment_variables import MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.databricks_pb2 import BAD_REQUEST\nfrom mlflow.pyfunc.model import MLMODEL_FILE_NAME, Model\nfrom mlflow.store.artifact.utils.models import _parse_model_uri, get_model_name_and_version\nfrom mlflow.tracking.artifact_utils import _download_artifact_from_uri\nfrom mlflow.utils.annotations import experimental\nfrom mlflow.utils.environment import (\n _REQUIREMENTS_FILE_NAME,\n _get_pip_deps,\n _mlflow_additional_pip_env,\n _overwrite_pip_deps,\n)\nfrom mlflow.utils.model_utils import _validate_and_prepare_target_save_path\nfrom mlflow.utils.uri import get_databricks_profile_uri_from_artifact_uri\n\n_WHEELS_FOLDER_NAME = \"wheels\"\n_ORIGINAL_REQ_FILE_NAME = \"original_requirements.txt\"\n_PLATFORM = \"platform\"\n\n\n@experimental\nclass WheeledModel:\n \"\"\"\n Helper class to create a model with added dependency wheels from an existing registered model.\n The `wheeled` model contains all the model dependencies as wheels stored as model artifacts.\n .. note::\n This utility only operates on a model that has been registered to the Model Registry.\n \"\"\"\n\n def __init__(self, model_uri):\n self._model_uri = model_uri\n databricks_profile_uri = (\n get_databricks_profile_uri_from_artifact_uri(model_uri) or mlflow.get_registry_uri()\n )\n client = MlflowClient(registry_uri=databricks_profile_uri)\n self._model_name, _ = get_model_name_and_version(client, model_uri)\n\n @classmethod\n def log_model(cls, model_uri, registered_model_name=None):\n \"\"\"\n Logs a registered model as an MLflow artifact for the current run. This only operates on\n a model which has been registered to the Model Registry. Given a registered model_uri (\n e.g. models:/<model_name>/<model_version>), this utility re-logs the model along with all\n the required model libraries back to the Model Registry. The required model libraries are\n stored along with the model as model artifacts. In addition, supporting files to the\n model (e.g. conda.yaml, requirements.txt) are modified to use the added libraries.\n\n By default, this utility creates a new model version under the same registered model\n specified by ``model_uri``. This behavior can be overridden by specifying the\n ``registered_model_name`` argument.\n\n :param model_uri: A registered model uri in the Model Registry of the form\n models:/<model_name>/<model_version/stage/latest>\n :param registered_model_name: The new model version (model with its libraries) is\n registered under the inputted registered_model_name. If None,\n a new version is logged to the existing model in the Model\n Registry.\n\n .. code-block:: python\n :caption: Example\n\n # Given a model uri, log the wheeled model\n with mlflow.start_run():\n WheeledModel.log_model(model_uri)\n \"\"\"\n parsed_uri = _parse_model_uri(model_uri)\n return Model.log(\n artifact_path=None,\n flavor=WheeledModel(model_uri),\n registered_model_name=registered_model_name or parsed_uri.name,\n )\n\n def save_model(self, path, mlflow_model=None):\n \"\"\"\n Given an existing registered model, saves the model along with it's dependencies stored as\n wheels to a path on the local file system.\n\n This does not modify existing model behavior or existing model flavors. It simply downloads\n the model dependencies as wheels and modifies the requirements.txt and conda.yaml file to\n point to the downloaded wheels.\n\n The download_command defaults to downloading only binary packages using the\n `--only-binary=:all:` option. This behavior can be overridden using an environment\n variable `MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS`, which will allows setting\n different options such as `--prefer-binary`, `--no-binary`, etc.\n :param path: Local path where the model is to be saved.\n :param mlflow_model: The new :py:mod:`mlflow.models.Model` metadata file to store the\n updated model metadata.\n \"\"\"\n from mlflow.pyfunc import ENV, FLAVOR_NAME, _extract_conda_env\n\n path = os.path.abspath(path)\n _validate_and_prepare_target_save_path(path)\n\n local_model_path = _download_artifact_from_uri(self._model_uri, output_path=path)\n\n wheels_dir = os.path.join(local_model_path, _WHEELS_FOLDER_NAME)\n pip_requirements_path = os.path.join(local_model_path, _REQUIREMENTS_FILE_NAME)\n model_metadata_path = os.path.join(local_model_path, MLMODEL_FILE_NAME)\n\n model_metadata = Model.load(model_metadata_path)\n\n # Check if the model file has `wheels` set to True\n if model_metadata.__dict__.get(_WHEELS_FOLDER_NAME, None) is not None:\n raise MlflowException(\"Model libraries are already added\", BAD_REQUEST)\n\n conda_env = _extract_conda_env(model_metadata.flavors.get(FLAVOR_NAME, {}).get(ENV, None))\n conda_env_path = os.path.join(local_model_path, conda_env)\n if conda_env is None and not os.path.isfile(pip_requirements_path):\n raise MlflowException(\n \"Cannot add libraries for model with no logged dependencies.\", BAD_REQUEST\n )\n\n if not os.path.isfile(pip_requirements_path):\n self._create_pip_requirement(conda_env_path, pip_requirements_path)\n\n WheeledModel._download_wheels(\n pip_requirements_path=pip_requirements_path, dst_path=wheels_dir\n )\n\n # Keep a copy of the original requirement.txt\n shutil.copy2(pip_requirements_path, os.path.join(local_model_path, _ORIGINAL_REQ_FILE_NAME))\n\n # Update requirements.txt with wheels\n pip_deps = self._overwrite_pip_requirements_with_wheels(\n pip_requirements_path=pip_requirements_path, wheels_dir=wheels_dir\n )\n\n # Update conda.yaml with wheels\n self._update_conda_env(pip_deps, conda_env_path)\n\n # Update MLModel File\n mlflow_model = self._update_mlflow_model(\n original_model_metadata=model_metadata, mlflow_model=mlflow_model\n )\n mlflow_model.save(model_metadata_path)\n return mlflow_model\n\n def _update_conda_env(self, new_pip_deps, conda_env_path):\n \"\"\"\n Updates the list pip packages in the conda.yaml file to the list of wheels in the wheels\n directory.\n {\n \"name\": \"env\",\n \"channels\": [...],\n \"dependencies\": [\n ...,\n \"pip\",\n {\"pip\": [...]}, <- Overwrite this with list of wheels\n ],\n }\n :param new_pip_deps: List of pip dependencies as wheels\n :param conda_env_path: Path to conda.yaml file in the model directory\n \"\"\"\n with open(conda_env_path) as f:\n conda_env = yaml.safe_load(f)\n\n new_conda_env = _overwrite_pip_deps(conda_env, new_pip_deps)\n\n with open(conda_env_path, \"w\") as out:\n yaml.safe_dump(new_conda_env, stream=out, default_flow_style=False)\n\n def _update_mlflow_model(self, original_model_metadata, mlflow_model):\n \"\"\"\n Modifies the MLModel file to reflect updated information such as the run_id,\n utc_time_created. Additionally, this also adds `wheels` to the MLModel file to indicate that\n this is a `wheeled` model.\n :param original_model_file_path: The model metadata stored in the original MLmodel file.\n :param mlflow_model: :py:mod:`mlflow.models.Model` configuration of the newly created\n wheeled model\n \"\"\"\n\n run_id = mlflow.tracking.fluent._get_or_start_run().info.run_id\n if mlflow_model is None:\n mlflow_model = Model(run_id=run_id)\n\n original_model_metadata.__dict__.update(\n {k: v for k, v in mlflow_model.__dict__.items() if v}\n )\n mlflow_model.__dict__.update(original_model_metadata.__dict__)\n mlflow_model.artifact_path = WheeledModel.get_wheel_artifact_path(\n mlflow_model.artifact_path\n )\n\n mlflow_model.wheels = {_PLATFORM: platform.platform()}\n return mlflow_model\n\n @classmethod\n def _download_wheels(cls, pip_requirements_path, dst_path):\n \"\"\"\n Downloads all the wheels of the dependencies specified in the requirements.txt file.\n The pip wheel download_command defaults to downloading only binary packages using\n the `--only-binary=:all:` option. This behavior can be overridden using an\n environment variable `MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS`, which will allows\n setting different options such as `--prefer-binary`, `--no-binary`, etc.\n :param pip_requirements_path: Path to requirements.txt in the model directory\n :param dst_path: Path to the directory where the wheels are to be downloaded\n \"\"\"\n if not os.path.exists(dst_path):\n os.makedirs(dst_path)\n\n pip_wheel_options = MLFLOW_WHEELED_MODEL_PIP_DOWNLOAD_OPTIONS.get()\n\n try:\n subprocess.run(\n [\n sys.executable,\n \"-m\",\n \"pip\",\n \"wheel\",\n pip_wheel_options,\n \"--wheel-dir\",\n dst_path,\n \"-r\",\n pip_requirements_path,\n \"--no-cache-dir\",\n ],\n check=True,\n capture_output=True,\n )\n except subprocess.CalledProcessError as e:\n raise MlflowException(\n f\"An error occurred while downloading the dependency wheels: {e.stderr}\"\n )\n\n def _overwrite_pip_requirements_with_wheels(self, pip_requirements_path, wheels_dir):\n \"\"\"\n Overwrites the requirements.txt with the wheels of the required dependencies.\n :param pip_requirements_path: Path to requirements.txt in the model directory\n :param wheels_dir: Path to directory where wheels are stored\n \"\"\"\n wheels = []\n with open(pip_requirements_path, \"w\") as wheels_requirements:\n for wheel_file in os.listdir(wheels_dir):\n if wheel_file.endswith(\".whl\"):\n complete_wheel_file = os.path.join(_WHEELS_FOLDER_NAME, wheel_file)\n wheels.append(complete_wheel_file)\n wheels_requirements.write(complete_wheel_file + \"\\n\")\n return wheels\n\n def _create_pip_requirement(self, conda_env_path, pip_requirements_path):\n \"\"\"\n This method creates a requirements.txt file for the model dependencies if the file does not\n already exist. If uses the pip dependencies found in the conda.yaml env file.\n :param conda_env_path: Path to conda.yaml env file which contains the required pip\n dependencies\n :param pip_requirements_path: Path where the new requirements.txt will be created.\n \"\"\"\n with open(conda_env_path) as f:\n conda_env = yaml.safe_load(f)\n pip_deps = _get_pip_deps(conda_env)\n _mlflow_additional_pip_env(pip_deps, pip_requirements_path)\n\n @classmethod\n def get_wheel_artifact_path(cls, original_artifact_path):\n return original_artifact_path + \"_\" + _WHEELS_FOLDER_NAME\n", "path": "mlflow/models/wheeled_model.py"}]}
3,833
265
gh_patches_debug_12296
rasdani/github-patches
git_diff
fedora-infra__bodhi-2359
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cannot run database migrations on the 3.7 branch I built a beta out of the ```HEAD``` of the ```3.7``` branch, and the migrations fail to run: ``` [root@bodhi-backend01 bowlofeggs][STG]# /usr/bin/alembic -c /etc/bodhi/alembic.ini upgrade head INFO [alembic.runtime.migration] Context impl PostgresqlImpl. INFO [alembic.runtime.migration] Will assume transactional DDL. INFO [alembic.env] Emitting SQL to allow for global DDL locking with BDR /usr/lib/python2.7/site-packages/alembic/util/messaging.py:69: UserWarning: Revision be25565a1211 referenced from be25565a1211 -> 59c0f5fbc1b2 (head), Add a greenwave_unsatisfied_requirements column to the updates table. is not present warnings.warn(msg) Traceback (most recent call last): File "/usr/bin/alembic", line 12, in <module> sys.exit(load_entry_point('alembic', 'console_scripts', 'alembic')()) File "/usr/lib/python2.7/site-packages/alembic/config.py", line 479, in main CommandLine(prog=prog).main(argv=argv) File "/usr/lib/python2.7/site-packages/alembic/config.py", line 473, in main self.run_cmd(cfg, options) File "/usr/lib/python2.7/site-packages/alembic/config.py", line 456, in run_cmd **dict((k, getattr(options, k, None)) for k in kwarg) File "/usr/lib/python2.7/site-packages/alembic/command.py", line 254, in upgrade script.run_env() File "/usr/lib/python2.7/site-packages/alembic/script/base.py", line 425, in run_env util.load_python_file(self.dir, 'env.py') File "/usr/lib/python2.7/site-packages/alembic/util/pyfiles.py", line 81, in load_python_file module = load_module_py(module_id, path) File "/usr/lib/python2.7/site-packages/alembic/util/compat.py", line 141, in load_module_py mod = imp.load_source(module_id, path, fp) File "/usr/lib/python2.7/site-packages/bodhi/server/migrations/env.py", line 112, in <module> run_migrations_online() File "/usr/lib/python2.7/site-packages/bodhi/server/migrations/env.py", line 104, in run_migrations_online context.run_migrations() File "<string>", line 8, in run_migrations File "/usr/lib/python2.7/site-packages/alembic/runtime/environment.py", line 836, in run_migrations self.get_context().run_migrations(**kw) File "/usr/lib/python2.7/site-packages/alembic/runtime/migration.py", line 321, in run_migrations for step in self._migrations_fn(heads, self): File "/usr/lib/python2.7/site-packages/alembic/command.py", line 243, in upgrade return script._upgrade_revs(revision, rev) File "/usr/lib/python2.7/site-packages/alembic/script/base.py", line 334, in _upgrade_revs revs = list(revs) File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 645, in _iterate_revisions requested_lowers = self.get_revisions(lower) File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 299, in get_revisions return sum([self.get_revisions(id_elem) for id_elem in id_], ()) File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 301, in get_revisions resolved_id, branch_label = self._resolve_revision_number(id_) File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 437, in _resolve_revision_number self._revision_map File "/usr/lib/python2.7/site-packages/alembic/util/langhelpers.py", line 239, in __get__ obj.__dict__[self.__name__] = result = self.fget(obj) File "/usr/lib/python2.7/site-packages/alembic/script/revision.py", line 152, in _revision_map down_revision = map_[downrev] KeyError: 'be25565a1211' ``` It sounds like there's a migration on ```develop``` that is not on the ```3.7``` branch, and when I cherry-picked the migration back to ```3.7``` it now references a migration that does not exist. To fix this, I'll need to shuffle the order of the migrations. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py` Content: ``` 1 # Copyright (c) 2018 Red Hat, Inc. 2 # 3 # This file is part of Bodhi. 4 # 5 # This program is free software; you can redistribute it and/or 6 # modify it under the terms of the GNU General Public License 7 # as published by the Free Software Foundation; either version 2 8 # of the License, or (at your option) any later version. 9 # 10 # This program is distributed in the hope that it will be useful, 11 # but WITHOUT ANY WARRANTY; without even the implied warranty of 12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 # GNU General Public License for more details. 14 # 15 # You should have received a copy of the GNU General Public License 16 # along with this program; if not, write to the Free Software 17 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 18 """ 19 Add a greenwave_unsatisfied_requirements column to the updates table. 20 21 Revision ID: 59c0f5fbc1b2 22 Revises: be25565a1211 23 Create Date: 2018-05-01 15:37:07.346034 24 """ 25 from alembic import op 26 import sqlalchemy as sa 27 28 29 # revision identifiers, used by Alembic. 30 revision = '59c0f5fbc1b2' 31 down_revision = 'be25565a1211' 32 33 34 def upgrade(): 35 """Add a greenwave_unsatisfied_requirements to the updates table.""" 36 op.add_column('updates', 37 sa.Column('greenwave_unsatisfied_requirements', sa.UnicodeText(), nullable=True)) 38 39 40 def downgrade(): 41 """Drop the greenwave_unsatisfied_requirements from the updates table.""" 42 op.drop_column('updates', 'greenwave_unsatisfied_requirements') 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py b/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py --- a/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py +++ b/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py @@ -19,7 +19,7 @@ Add a greenwave_unsatisfied_requirements column to the updates table. Revision ID: 59c0f5fbc1b2 -Revises: be25565a1211 +Revises: c21dd18b161a Create Date: 2018-05-01 15:37:07.346034 """ from alembic import op @@ -28,7 +28,7 @@ # revision identifiers, used by Alembic. revision = '59c0f5fbc1b2' -down_revision = 'be25565a1211' +down_revision = 'c21dd18b161a' def upgrade():
{"golden_diff": "diff --git a/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py b/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py\n--- a/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py\n+++ b/bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py\n@@ -19,7 +19,7 @@\n Add a greenwave_unsatisfied_requirements column to the updates table.\n \n Revision ID: 59c0f5fbc1b2\n-Revises: be25565a1211\n+Revises: c21dd18b161a\n Create Date: 2018-05-01 15:37:07.346034\n \"\"\"\n from alembic import op\n@@ -28,7 +28,7 @@\n \n # revision identifiers, used by Alembic.\n revision = '59c0f5fbc1b2'\n-down_revision = 'be25565a1211'\n+down_revision = 'c21dd18b161a'\n \n \n def upgrade():\n", "issue": "Cannot run database migrations on the 3.7 branch\nI built a beta out of the ```HEAD``` of the ```3.7``` branch, and the migrations fail to run:\r\n\r\n```\r\n[root@bodhi-backend01 bowlofeggs][STG]# /usr/bin/alembic -c /etc/bodhi/alembic.ini upgrade head\r\nINFO [alembic.runtime.migration] Context impl PostgresqlImpl.\r\nINFO [alembic.runtime.migration] Will assume transactional DDL.\r\nINFO [alembic.env] Emitting SQL to allow for global DDL locking with BDR\r\n/usr/lib/python2.7/site-packages/alembic/util/messaging.py:69: UserWarning: Revision be25565a1211 referenced from be25565a1211 -> 59c0f5fbc1b2 (head), Add a greenwave_unsatisfied_requirements column to the updates table. is not present\r\n warnings.warn(msg)\r\nTraceback (most recent call last):\r\n File \"/usr/bin/alembic\", line 12, in <module>\r\n sys.exit(load_entry_point('alembic', 'console_scripts', 'alembic')())\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 479, in main\r\n CommandLine(prog=prog).main(argv=argv)\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 473, in main\r\n self.run_cmd(cfg, options)\r\n File \"/usr/lib/python2.7/site-packages/alembic/config.py\", line 456, in run_cmd\r\n **dict((k, getattr(options, k, None)) for k in kwarg)\r\n File \"/usr/lib/python2.7/site-packages/alembic/command.py\", line 254, in upgrade\r\n script.run_env()\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/base.py\", line 425, in run_env\r\n util.load_python_file(self.dir, 'env.py')\r\n File \"/usr/lib/python2.7/site-packages/alembic/util/pyfiles.py\", line 81, in load_python_file\r\n module = load_module_py(module_id, path)\r\n File \"/usr/lib/python2.7/site-packages/alembic/util/compat.py\", line 141, in load_module_py\r\n mod = imp.load_source(module_id, path, fp)\r\n File \"/usr/lib/python2.7/site-packages/bodhi/server/migrations/env.py\", line 112, in <module>\r\n run_migrations_online()\r\n File \"/usr/lib/python2.7/site-packages/bodhi/server/migrations/env.py\", line 104, in run_migrations_online\r\n context.run_migrations()\r\n File \"<string>\", line 8, in run_migrations\r\n File \"/usr/lib/python2.7/site-packages/alembic/runtime/environment.py\", line 836, in run_migrations\r\n self.get_context().run_migrations(**kw)\r\n File \"/usr/lib/python2.7/site-packages/alembic/runtime/migration.py\", line 321, in run_migrations\r\n for step in self._migrations_fn(heads, self):\r\n File \"/usr/lib/python2.7/site-packages/alembic/command.py\", line 243, in upgrade\r\n return script._upgrade_revs(revision, rev)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/base.py\", line 334, in _upgrade_revs\r\n revs = list(revs)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 645, in _iterate_revisions\r\n requested_lowers = self.get_revisions(lower)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 299, in get_revisions\r\n return sum([self.get_revisions(id_elem) for id_elem in id_], ())\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 301, in get_revisions\r\n resolved_id, branch_label = self._resolve_revision_number(id_)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 437, in _resolve_revision_number\r\n self._revision_map\r\n File \"/usr/lib/python2.7/site-packages/alembic/util/langhelpers.py\", line 239, in __get__\r\n obj.__dict__[self.__name__] = result = self.fget(obj)\r\n File \"/usr/lib/python2.7/site-packages/alembic/script/revision.py\", line 152, in _revision_map\r\n down_revision = map_[downrev]\r\nKeyError: 'be25565a1211'\r\n```\r\n\r\nIt sounds like there's a migration on ```develop``` that is not on the ```3.7``` branch, and when I cherry-picked the migration back to ```3.7``` it now references a migration that does not exist. To fix this, I'll need to shuffle the order of the migrations.\n", "before_files": [{"content": "# Copyright (c) 2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nAdd a greenwave_unsatisfied_requirements column to the updates table.\n\nRevision ID: 59c0f5fbc1b2\nRevises: be25565a1211\nCreate Date: 2018-05-01 15:37:07.346034\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '59c0f5fbc1b2'\ndown_revision = 'be25565a1211'\n\n\ndef upgrade():\n \"\"\"Add a greenwave_unsatisfied_requirements to the updates table.\"\"\"\n op.add_column('updates',\n sa.Column('greenwave_unsatisfied_requirements', sa.UnicodeText(), nullable=True))\n\n\ndef downgrade():\n \"\"\"Drop the greenwave_unsatisfied_requirements from the updates table.\"\"\"\n op.drop_column('updates', 'greenwave_unsatisfied_requirements')\n", "path": "bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py"}], "after_files": [{"content": "# Copyright (c) 2018 Red Hat, Inc.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nAdd a greenwave_unsatisfied_requirements column to the updates table.\n\nRevision ID: 59c0f5fbc1b2\nRevises: c21dd18b161a\nCreate Date: 2018-05-01 15:37:07.346034\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '59c0f5fbc1b2'\ndown_revision = 'c21dd18b161a'\n\n\ndef upgrade():\n \"\"\"Add a greenwave_unsatisfied_requirements to the updates table.\"\"\"\n op.add_column('updates',\n sa.Column('greenwave_unsatisfied_requirements', sa.UnicodeText(), nullable=True))\n\n\ndef downgrade():\n \"\"\"Drop the greenwave_unsatisfied_requirements from the updates table.\"\"\"\n op.drop_column('updates', 'greenwave_unsatisfied_requirements')\n", "path": "bodhi/server/migrations/versions/59c0f5fbc1b2_add_a_greenwave_unsatisfied_.py"}]}
1,915
318
gh_patches_debug_39319
rasdani/github-patches
git_diff
PaddlePaddle__PaddleSeg-266
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- LabelMe标注好图片之后,使用labelme2seg无法生成真值图片 我使用LabelMe标注好图片之后,使用labelme2seg生成真值图片,运行labelme2seg的时候报错,报错信息如下: class_names: ('_background_', 'steel') Saved class_names: D:\2\class_names.txt Generating dataset from: D:\2\2020_5_15_13_19_46_995.json Traceback (most recent call last): File "D:\PaddleSeg\pdseg\tools\labelme2seg.py", line 90, in <module> main(args) File "D:\PaddleSeg\pdseg\tools\labelme2seg.py", line 78, in main if lbl.min() >= 0 and lbl.max() <= 255: AttributeError: 'tuple' object has no attribute 'min' --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pdseg/tools/labelme2seg.py` Content: ``` 1 # coding: utf8 2 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from __future__ import print_function 17 18 import argparse 19 import glob 20 import json 21 import os 22 import os.path as osp 23 24 import numpy as np 25 import PIL.Image 26 import labelme 27 28 from gray2pseudo_color import get_color_map_list 29 30 31 def parse_args(): 32 parser = argparse.ArgumentParser( 33 formatter_class=argparse.ArgumentDefaultsHelpFormatter) 34 parser.add_argument('input_dir', help='input annotated directory') 35 return parser.parse_args() 36 37 38 def main(args): 39 output_dir = osp.join(args.input_dir, 'annotations') 40 if not osp.exists(output_dir): 41 os.makedirs(output_dir) 42 print('Creating annotations directory:', output_dir) 43 44 # get the all class names for the given dataset 45 class_names = ['_background_'] 46 for label_file in glob.glob(osp.join(args.input_dir, '*.json')): 47 with open(label_file) as f: 48 data = json.load(f) 49 for shape in data['shapes']: 50 label = shape['label'] 51 cls_name = label 52 if not cls_name in class_names: 53 class_names.append(cls_name) 54 55 class_name_to_id = {} 56 for i, class_name in enumerate(class_names): 57 class_id = i # starts with 0 58 class_name_to_id[class_name] = class_id 59 if class_id == 0: 60 assert class_name == '_background_' 61 class_names = tuple(class_names) 62 print('class_names:', class_names) 63 64 out_class_names_file = osp.join(args.input_dir, 'class_names.txt') 65 with open(out_class_names_file, 'w') as f: 66 f.writelines('\n'.join(class_names)) 67 print('Saved class_names:', out_class_names_file) 68 69 color_map = get_color_map_list(256) 70 71 for label_file in glob.glob(osp.join(args.input_dir, '*.json')): 72 print('Generating dataset from:', label_file) 73 with open(label_file) as f: 74 base = osp.splitext(osp.basename(label_file))[0] 75 out_png_file = osp.join(output_dir, base + '.png') 76 77 data = json.load(f) 78 79 img_file = osp.join(osp.dirname(label_file), data['imagePath']) 80 img = np.asarray(PIL.Image.open(img_file)) 81 82 lbl = labelme.utils.shapes_to_label( 83 img_shape=img.shape, 84 shapes=data['shapes'], 85 label_name_to_value=class_name_to_id, 86 ) 87 88 if osp.splitext(out_png_file)[1] != '.png': 89 out_png_file += '.png' 90 # Assume label ranges [0, 255] for uint8, 91 if lbl.min() >= 0 and lbl.max() <= 255: 92 lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P') 93 lbl_pil.putpalette(color_map) 94 lbl_pil.save(out_png_file) 95 else: 96 raise ValueError( 97 '[%s] Cannot save the pixel-wise class label as PNG. ' 98 'Please consider using the .npy format.' % out_png_file) 99 100 101 if __name__ == '__main__': 102 args = parse_args() 103 main(args) 104 ``` Path: `pdseg/tools/jingling2seg.py` Content: ``` 1 # coding: utf8 2 # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 from __future__ import print_function 17 18 import argparse 19 import glob 20 import json 21 import os 22 import os.path as osp 23 24 import numpy as np 25 import PIL.Image 26 import labelme 27 28 from gray2pseudo_color import get_color_map_list 29 30 31 def parse_args(): 32 parser = argparse.ArgumentParser( 33 formatter_class=argparse.ArgumentDefaultsHelpFormatter) 34 parser.add_argument('input_dir', help='input annotated directory') 35 return parser.parse_args() 36 37 38 def main(args): 39 output_dir = osp.join(args.input_dir, 'annotations') 40 if not osp.exists(output_dir): 41 os.makedirs(output_dir) 42 print('Creating annotations directory:', output_dir) 43 44 # get the all class names for the given dataset 45 class_names = ['_background_'] 46 for label_file in glob.glob(osp.join(args.input_dir, '*.json')): 47 with open(label_file) as f: 48 data = json.load(f) 49 if data['outputs']: 50 for output in data['outputs']['object']: 51 name = output['name'] 52 cls_name = name 53 if not cls_name in class_names: 54 class_names.append(cls_name) 55 56 class_name_to_id = {} 57 for i, class_name in enumerate(class_names): 58 class_id = i # starts with 0 59 class_name_to_id[class_name] = class_id 60 if class_id == 0: 61 assert class_name == '_background_' 62 class_names = tuple(class_names) 63 print('class_names:', class_names) 64 65 out_class_names_file = osp.join(args.input_dir, 'class_names.txt') 66 with open(out_class_names_file, 'w') as f: 67 f.writelines('\n'.join(class_names)) 68 print('Saved class_names:', out_class_names_file) 69 70 color_map = get_color_map_list(256) 71 72 for label_file in glob.glob(osp.join(args.input_dir, '*.json')): 73 print('Generating dataset from:', label_file) 74 with open(label_file) as f: 75 base = osp.splitext(osp.basename(label_file))[0] 76 out_png_file = osp.join(output_dir, base + '.png') 77 78 data = json.load(f) 79 80 data_shapes = [] 81 if data['outputs']: 82 for output in data['outputs']['object']: 83 if 'polygon' in output.keys(): 84 polygon = output['polygon'] 85 name = output['name'] 86 87 # convert jingling format to labelme format 88 points = [] 89 for i in range(1, int(len(polygon) / 2) + 1): 90 points.append( 91 [polygon['x' + str(i)], polygon['y' + str(i)]]) 92 shape = { 93 'label': name, 94 'points': points, 95 'shape_type': 'polygon' 96 } 97 data_shapes.append(shape) 98 99 if 'size' not in data: 100 continue 101 data_size = data['size'] 102 img_shape = (data_size['height'], data_size['width'], 103 data_size['depth']) 104 105 lbl = labelme.utils.shapes_to_label( 106 img_shape=img_shape, 107 shapes=data_shapes, 108 label_name_to_value=class_name_to_id, 109 ) 110 111 if osp.splitext(out_png_file)[1] != '.png': 112 out_png_file += '.png' 113 # Assume label ranges [0, 255] for uint8, 114 if lbl.min() >= 0 and lbl.max() <= 255: 115 lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P') 116 lbl_pil.putpalette(color_map) 117 lbl_pil.save(out_png_file) 118 else: 119 raise ValueError( 120 '[%s] Cannot save the pixel-wise class label as PNG. ' 121 'Please consider using the .npy format.' % out_png_file) 122 123 124 if __name__ == '__main__': 125 args = parse_args() 126 main(args) 127 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pdseg/tools/jingling2seg.py b/pdseg/tools/jingling2seg.py --- a/pdseg/tools/jingling2seg.py +++ b/pdseg/tools/jingling2seg.py @@ -20,12 +20,11 @@ import json import os import os.path as osp - import numpy as np import PIL.Image -import labelme from gray2pseudo_color import get_color_map_list +from labelme2seg import shape2label def parse_args(): @@ -102,10 +101,10 @@ img_shape = (data_size['height'], data_size['width'], data_size['depth']) - lbl = labelme.utils.shapes_to_label( - img_shape=img_shape, + lbl = shape2label( + img_size=img_shape, shapes=data_shapes, - label_name_to_value=class_name_to_id, + class_name_mapping=class_name_to_id, ) if osp.splitext(out_png_file)[1] != '.png': diff --git a/pdseg/tools/labelme2seg.py b/pdseg/tools/labelme2seg.py --- a/pdseg/tools/labelme2seg.py +++ b/pdseg/tools/labelme2seg.py @@ -17,13 +17,14 @@ import argparse import glob +import math import json import os import os.path as osp - import numpy as np import PIL.Image -import labelme +import PIL.ImageDraw +import cv2 from gray2pseudo_color import get_color_map_list @@ -77,12 +78,12 @@ data = json.load(f) img_file = osp.join(osp.dirname(label_file), data['imagePath']) - img = np.asarray(PIL.Image.open(img_file)) + img = np.asarray(cv2.imread(img_file)) - lbl = labelme.utils.shapes_to_label( - img_shape=img.shape, + lbl = shape2label( + img_size=img.shape, shapes=data['shapes'], - label_name_to_value=class_name_to_id, + class_name_mapping=class_name_to_id, ) if osp.splitext(out_png_file)[1] != '.png': @@ -98,6 +99,27 @@ 'Please consider using the .npy format.' % out_png_file) +def shape2mask(img_size, points): + label_mask = PIL.Image.fromarray(np.zeros(img_size[:2], dtype=np.uint8)) + image_draw = PIL.ImageDraw.Draw(label_mask) + points_list = [tuple(point) for point in points] + assert len(points_list) > 2, 'Polygon must have points more than 2' + image_draw.polygon(xy=points_list, outline=1, fill=1) + return np.array(label_mask, dtype=bool) + + +def shape2label(img_size, shapes, class_name_mapping): + label = np.zeros(img_size[:2], dtype=np.int32) + for shape in shapes: + points = shape['points'] + class_name = shape['label'] + shape_type = shape.get('shape_type', None) + class_id = class_name_mapping[class_name] + label_mask = shape2mask(img_size[:2], points) + label[label_mask] = class_id + return label + + if __name__ == '__main__': args = parse_args() main(args)
{"golden_diff": "diff --git a/pdseg/tools/jingling2seg.py b/pdseg/tools/jingling2seg.py\n--- a/pdseg/tools/jingling2seg.py\n+++ b/pdseg/tools/jingling2seg.py\n@@ -20,12 +20,11 @@\n import json\n import os\n import os.path as osp\n-\n import numpy as np\n import PIL.Image\n-import labelme\n \n from gray2pseudo_color import get_color_map_list\n+from labelme2seg import shape2label\n \n \n def parse_args():\n@@ -102,10 +101,10 @@\n img_shape = (data_size['height'], data_size['width'],\n data_size['depth'])\n \n- lbl = labelme.utils.shapes_to_label(\n- img_shape=img_shape,\n+ lbl = shape2label(\n+ img_size=img_shape,\n shapes=data_shapes,\n- label_name_to_value=class_name_to_id,\n+ class_name_mapping=class_name_to_id,\n )\n \n if osp.splitext(out_png_file)[1] != '.png':\ndiff --git a/pdseg/tools/labelme2seg.py b/pdseg/tools/labelme2seg.py\n--- a/pdseg/tools/labelme2seg.py\n+++ b/pdseg/tools/labelme2seg.py\n@@ -17,13 +17,14 @@\n \n import argparse\n import glob\n+import math\n import json\n import os\n import os.path as osp\n-\n import numpy as np\n import PIL.Image\n-import labelme\n+import PIL.ImageDraw\n+import cv2\n \n from gray2pseudo_color import get_color_map_list\n \n@@ -77,12 +78,12 @@\n data = json.load(f)\n \n img_file = osp.join(osp.dirname(label_file), data['imagePath'])\n- img = np.asarray(PIL.Image.open(img_file))\n+ img = np.asarray(cv2.imread(img_file))\n \n- lbl = labelme.utils.shapes_to_label(\n- img_shape=img.shape,\n+ lbl = shape2label(\n+ img_size=img.shape,\n shapes=data['shapes'],\n- label_name_to_value=class_name_to_id,\n+ class_name_mapping=class_name_to_id,\n )\n \n if osp.splitext(out_png_file)[1] != '.png':\n@@ -98,6 +99,27 @@\n 'Please consider using the .npy format.' % out_png_file)\n \n \n+def shape2mask(img_size, points):\n+ label_mask = PIL.Image.fromarray(np.zeros(img_size[:2], dtype=np.uint8))\n+ image_draw = PIL.ImageDraw.Draw(label_mask)\n+ points_list = [tuple(point) for point in points]\n+ assert len(points_list) > 2, 'Polygon must have points more than 2'\n+ image_draw.polygon(xy=points_list, outline=1, fill=1)\n+ return np.array(label_mask, dtype=bool)\n+\n+\n+def shape2label(img_size, shapes, class_name_mapping):\n+ label = np.zeros(img_size[:2], dtype=np.int32)\n+ for shape in shapes:\n+ points = shape['points']\n+ class_name = shape['label']\n+ shape_type = shape.get('shape_type', None)\n+ class_id = class_name_mapping[class_name]\n+ label_mask = shape2mask(img_size[:2], points)\n+ label[label_mask] = class_id\n+ return label\n+\n+\n if __name__ == '__main__':\n args = parse_args()\n main(args)\n", "issue": "LabelMe\u6807\u6ce8\u597d\u56fe\u7247\u4e4b\u540e\uff0c\u4f7f\u7528labelme2seg\u65e0\u6cd5\u751f\u6210\u771f\u503c\u56fe\u7247\n\u6211\u4f7f\u7528LabelMe\u6807\u6ce8\u597d\u56fe\u7247\u4e4b\u540e\uff0c\u4f7f\u7528labelme2seg\u751f\u6210\u771f\u503c\u56fe\u7247\uff0c\u8fd0\u884clabelme2seg\u7684\u65f6\u5019\u62a5\u9519\uff0c\u62a5\u9519\u4fe1\u606f\u5982\u4e0b\uff1a\r\nclass_names: ('_background_', 'steel')\r\nSaved class_names: D:\\2\\class_names.txt\r\nGenerating dataset from: D:\\2\\2020_5_15_13_19_46_995.json\r\nTraceback (most recent call last):\r\n File \"D:\\PaddleSeg\\pdseg\\tools\\labelme2seg.py\", line 90, in <module>\r\n main(args)\r\n File \"D:\\PaddleSeg\\pdseg\\tools\\labelme2seg.py\", line 78, in main\r\n if lbl.min() >= 0 and lbl.max() <= 255:\r\nAttributeError: 'tuple' object has no attribute 'min'\n", "before_files": [{"content": "# coding: utf8\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport argparse\nimport glob\nimport json\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport PIL.Image\nimport labelme\n\nfrom gray2pseudo_color import get_color_map_list\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('input_dir', help='input annotated directory')\n return parser.parse_args()\n\n\ndef main(args):\n output_dir = osp.join(args.input_dir, 'annotations')\n if not osp.exists(output_dir):\n os.makedirs(output_dir)\n print('Creating annotations directory:', output_dir)\n\n # get the all class names for the given dataset\n class_names = ['_background_']\n for label_file in glob.glob(osp.join(args.input_dir, '*.json')):\n with open(label_file) as f:\n data = json.load(f)\n for shape in data['shapes']:\n label = shape['label']\n cls_name = label\n if not cls_name in class_names:\n class_names.append(cls_name)\n\n class_name_to_id = {}\n for i, class_name in enumerate(class_names):\n class_id = i # starts with 0\n class_name_to_id[class_name] = class_id\n if class_id == 0:\n assert class_name == '_background_'\n class_names = tuple(class_names)\n print('class_names:', class_names)\n\n out_class_names_file = osp.join(args.input_dir, 'class_names.txt')\n with open(out_class_names_file, 'w') as f:\n f.writelines('\\n'.join(class_names))\n print('Saved class_names:', out_class_names_file)\n\n color_map = get_color_map_list(256)\n\n for label_file in glob.glob(osp.join(args.input_dir, '*.json')):\n print('Generating dataset from:', label_file)\n with open(label_file) as f:\n base = osp.splitext(osp.basename(label_file))[0]\n out_png_file = osp.join(output_dir, base + '.png')\n\n data = json.load(f)\n\n img_file = osp.join(osp.dirname(label_file), data['imagePath'])\n img = np.asarray(PIL.Image.open(img_file))\n\n lbl = labelme.utils.shapes_to_label(\n img_shape=img.shape,\n shapes=data['shapes'],\n label_name_to_value=class_name_to_id,\n )\n\n if osp.splitext(out_png_file)[1] != '.png':\n out_png_file += '.png'\n # Assume label ranges [0, 255] for uint8,\n if lbl.min() >= 0 and lbl.max() <= 255:\n lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')\n lbl_pil.putpalette(color_map)\n lbl_pil.save(out_png_file)\n else:\n raise ValueError(\n '[%s] Cannot save the pixel-wise class label as PNG. '\n 'Please consider using the .npy format.' % out_png_file)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "pdseg/tools/labelme2seg.py"}, {"content": "# coding: utf8\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport argparse\nimport glob\nimport json\nimport os\nimport os.path as osp\n\nimport numpy as np\nimport PIL.Image\nimport labelme\n\nfrom gray2pseudo_color import get_color_map_list\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('input_dir', help='input annotated directory')\n return parser.parse_args()\n\n\ndef main(args):\n output_dir = osp.join(args.input_dir, 'annotations')\n if not osp.exists(output_dir):\n os.makedirs(output_dir)\n print('Creating annotations directory:', output_dir)\n\n # get the all class names for the given dataset\n class_names = ['_background_']\n for label_file in glob.glob(osp.join(args.input_dir, '*.json')):\n with open(label_file) as f:\n data = json.load(f)\n if data['outputs']:\n for output in data['outputs']['object']:\n name = output['name']\n cls_name = name\n if not cls_name in class_names:\n class_names.append(cls_name)\n\n class_name_to_id = {}\n for i, class_name in enumerate(class_names):\n class_id = i # starts with 0\n class_name_to_id[class_name] = class_id\n if class_id == 0:\n assert class_name == '_background_'\n class_names = tuple(class_names)\n print('class_names:', class_names)\n\n out_class_names_file = osp.join(args.input_dir, 'class_names.txt')\n with open(out_class_names_file, 'w') as f:\n f.writelines('\\n'.join(class_names))\n print('Saved class_names:', out_class_names_file)\n\n color_map = get_color_map_list(256)\n\n for label_file in glob.glob(osp.join(args.input_dir, '*.json')):\n print('Generating dataset from:', label_file)\n with open(label_file) as f:\n base = osp.splitext(osp.basename(label_file))[0]\n out_png_file = osp.join(output_dir, base + '.png')\n\n data = json.load(f)\n\n data_shapes = []\n if data['outputs']:\n for output in data['outputs']['object']:\n if 'polygon' in output.keys():\n polygon = output['polygon']\n name = output['name']\n\n # convert jingling format to labelme format\n points = []\n for i in range(1, int(len(polygon) / 2) + 1):\n points.append(\n [polygon['x' + str(i)], polygon['y' + str(i)]])\n shape = {\n 'label': name,\n 'points': points,\n 'shape_type': 'polygon'\n }\n data_shapes.append(shape)\n\n if 'size' not in data:\n continue\n data_size = data['size']\n img_shape = (data_size['height'], data_size['width'],\n data_size['depth'])\n\n lbl = labelme.utils.shapes_to_label(\n img_shape=img_shape,\n shapes=data_shapes,\n label_name_to_value=class_name_to_id,\n )\n\n if osp.splitext(out_png_file)[1] != '.png':\n out_png_file += '.png'\n # Assume label ranges [0, 255] for uint8,\n if lbl.min() >= 0 and lbl.max() <= 255:\n lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')\n lbl_pil.putpalette(color_map)\n lbl_pil.save(out_png_file)\n else:\n raise ValueError(\n '[%s] Cannot save the pixel-wise class label as PNG. '\n 'Please consider using the .npy format.' % out_png_file)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "pdseg/tools/jingling2seg.py"}], "after_files": [{"content": "# coding: utf8\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport argparse\nimport glob\nimport math\nimport json\nimport os\nimport os.path as osp\nimport numpy as np\nimport PIL.Image\nimport PIL.ImageDraw\nimport cv2\n\nfrom gray2pseudo_color import get_color_map_list\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('input_dir', help='input annotated directory')\n return parser.parse_args()\n\n\ndef main(args):\n output_dir = osp.join(args.input_dir, 'annotations')\n if not osp.exists(output_dir):\n os.makedirs(output_dir)\n print('Creating annotations directory:', output_dir)\n\n # get the all class names for the given dataset\n class_names = ['_background_']\n for label_file in glob.glob(osp.join(args.input_dir, '*.json')):\n with open(label_file) as f:\n data = json.load(f)\n for shape in data['shapes']:\n label = shape['label']\n cls_name = label\n if not cls_name in class_names:\n class_names.append(cls_name)\n\n class_name_to_id = {}\n for i, class_name in enumerate(class_names):\n class_id = i # starts with 0\n class_name_to_id[class_name] = class_id\n if class_id == 0:\n assert class_name == '_background_'\n class_names = tuple(class_names)\n print('class_names:', class_names)\n\n out_class_names_file = osp.join(args.input_dir, 'class_names.txt')\n with open(out_class_names_file, 'w') as f:\n f.writelines('\\n'.join(class_names))\n print('Saved class_names:', out_class_names_file)\n\n color_map = get_color_map_list(256)\n\n for label_file in glob.glob(osp.join(args.input_dir, '*.json')):\n print('Generating dataset from:', label_file)\n with open(label_file) as f:\n base = osp.splitext(osp.basename(label_file))[0]\n out_png_file = osp.join(output_dir, base + '.png')\n\n data = json.load(f)\n\n img_file = osp.join(osp.dirname(label_file), data['imagePath'])\n img = np.asarray(cv2.imread(img_file))\n\n lbl = shape2label(\n img_size=img.shape,\n shapes=data['shapes'],\n class_name_mapping=class_name_to_id,\n )\n\n if osp.splitext(out_png_file)[1] != '.png':\n out_png_file += '.png'\n # Assume label ranges [0, 255] for uint8,\n if lbl.min() >= 0 and lbl.max() <= 255:\n lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')\n lbl_pil.putpalette(color_map)\n lbl_pil.save(out_png_file)\n else:\n raise ValueError(\n '[%s] Cannot save the pixel-wise class label as PNG. '\n 'Please consider using the .npy format.' % out_png_file)\n\n\ndef shape2mask(img_size, points):\n label_mask = PIL.Image.fromarray(np.zeros(img_size[:2], dtype=np.uint8))\n image_draw = PIL.ImageDraw.Draw(label_mask)\n points_list = [tuple(point) for point in points]\n assert len(points_list) > 2, 'Polygon must have points more than 2'\n image_draw.polygon(xy=points_list, outline=1, fill=1)\n return np.array(label_mask, dtype=bool)\n\n\ndef shape2label(img_size, shapes, class_name_mapping):\n label = np.zeros(img_size[:2], dtype=np.int32)\n for shape in shapes:\n points = shape['points']\n class_name = shape['label']\n shape_type = shape.get('shape_type', None)\n class_id = class_name_mapping[class_name]\n label_mask = shape2mask(img_size[:2], points)\n label[label_mask] = class_id\n return label\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "pdseg/tools/labelme2seg.py"}, {"content": "# coding: utf8\n# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserve.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport argparse\nimport glob\nimport json\nimport os\nimport os.path as osp\nimport numpy as np\nimport PIL.Image\n\nfrom gray2pseudo_color import get_color_map_list\nfrom labelme2seg import shape2label\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('input_dir', help='input annotated directory')\n return parser.parse_args()\n\n\ndef main(args):\n output_dir = osp.join(args.input_dir, 'annotations')\n if not osp.exists(output_dir):\n os.makedirs(output_dir)\n print('Creating annotations directory:', output_dir)\n\n # get the all class names for the given dataset\n class_names = ['_background_']\n for label_file in glob.glob(osp.join(args.input_dir, '*.json')):\n with open(label_file) as f:\n data = json.load(f)\n if data['outputs']:\n for output in data['outputs']['object']:\n name = output['name']\n cls_name = name\n if not cls_name in class_names:\n class_names.append(cls_name)\n\n class_name_to_id = {}\n for i, class_name in enumerate(class_names):\n class_id = i # starts with 0\n class_name_to_id[class_name] = class_id\n if class_id == 0:\n assert class_name == '_background_'\n class_names = tuple(class_names)\n print('class_names:', class_names)\n\n out_class_names_file = osp.join(args.input_dir, 'class_names.txt')\n with open(out_class_names_file, 'w') as f:\n f.writelines('\\n'.join(class_names))\n print('Saved class_names:', out_class_names_file)\n\n color_map = get_color_map_list(256)\n\n for label_file in glob.glob(osp.join(args.input_dir, '*.json')):\n print('Generating dataset from:', label_file)\n with open(label_file) as f:\n base = osp.splitext(osp.basename(label_file))[0]\n out_png_file = osp.join(output_dir, base + '.png')\n\n data = json.load(f)\n\n data_shapes = []\n if data['outputs']:\n for output in data['outputs']['object']:\n if 'polygon' in output.keys():\n polygon = output['polygon']\n name = output['name']\n\n # convert jingling format to labelme format\n points = []\n for i in range(1, int(len(polygon) / 2) + 1):\n points.append(\n [polygon['x' + str(i)], polygon['y' + str(i)]])\n shape = {\n 'label': name,\n 'points': points,\n 'shape_type': 'polygon'\n }\n data_shapes.append(shape)\n\n if 'size' not in data:\n continue\n data_size = data['size']\n img_shape = (data_size['height'], data_size['width'],\n data_size['depth'])\n\n lbl = shape2label(\n img_size=img_shape,\n shapes=data_shapes,\n class_name_mapping=class_name_to_id,\n )\n\n if osp.splitext(out_png_file)[1] != '.png':\n out_png_file += '.png'\n # Assume label ranges [0, 255] for uint8,\n if lbl.min() >= 0 and lbl.max() <= 255:\n lbl_pil = PIL.Image.fromarray(lbl.astype(np.uint8), mode='P')\n lbl_pil.putpalette(color_map)\n lbl_pil.save(out_png_file)\n else:\n raise ValueError(\n '[%s] Cannot save the pixel-wise class label as PNG. '\n 'Please consider using the .npy format.' % out_png_file)\n\n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n", "path": "pdseg/tools/jingling2seg.py"}]}
2,757
773
gh_patches_debug_33088
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-317
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- User should be able to configure multiple databases in settings **Problem** <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> Currently, the user can only configure one Mathesar database in the settings. They should be able to configure as many databases to connect to Mathesar as they want. **Proposed solution** <!-- A clear and concise description of your proposed solution or feature. --> The user should be able to configure multiple databases in the `.env` file. **Additional context** <!-- Add any other context or screenshots about the feature request here.--> We might want to use `python-decouple`'s [built in CSV helper](https://github.com/henriquebastos/python-decouple/#built-in-csv-helper) for this. Ideally, the user would be able to associate the database key with the connection information directly using a tuple or something like that. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `config/settings.py` Content: ``` 1 """ 2 Django settings for config project. 3 4 Generated by 'django-admin startproject' using Django 3.1.7. 5 6 For more information on this file, see 7 https://docs.djangoproject.com/en/3.1/topics/settings/ 8 9 For the full list of settings and their values, see 10 https://docs.djangoproject.com/en/3.1/ref/settings/ 11 """ 12 13 import os 14 from pathlib import Path 15 16 from decouple import Csv, config as decouple_config 17 from dj_database_url import parse as db_url 18 19 # Build paths inside the project like this: BASE_DIR / 'subdir'. 20 BASE_DIR = Path(__file__).resolve().parent.parent 21 22 # Application definition 23 24 INSTALLED_APPS = [ 25 "django.contrib.admin", 26 "django.contrib.auth", 27 "django.contrib.contenttypes", 28 "django.contrib.sessions", 29 "django.contrib.messages", 30 "django.contrib.staticfiles", 31 "rest_framework", 32 "django_filters", 33 "django_property_filter", 34 "mathesar", 35 ] 36 37 MIDDLEWARE = [ 38 "django.middleware.security.SecurityMiddleware", 39 "django.contrib.sessions.middleware.SessionMiddleware", 40 "django.middleware.common.CommonMiddleware", 41 "django.middleware.csrf.CsrfViewMiddleware", 42 "django.contrib.auth.middleware.AuthenticationMiddleware", 43 "django.contrib.messages.middleware.MessageMiddleware", 44 "django.middleware.clickjacking.XFrameOptionsMiddleware", 45 ] 46 47 ROOT_URLCONF = "config.urls" 48 49 TEMPLATES = [ 50 { 51 "BACKEND": "django.template.backends.django.DjangoTemplates", 52 "DIRS": [], 53 "APP_DIRS": True, 54 "OPTIONS": { 55 "context_processors": [ 56 "config.context_processors.get_settings", 57 "django.template.context_processors.debug", 58 "django.template.context_processors.request", 59 "django.contrib.auth.context_processors.auth", 60 "django.contrib.messages.context_processors.messages", 61 ], 62 }, 63 }, 64 ] 65 66 WSGI_APPLICATION = "config.wsgi.application" 67 68 # Database 69 # https://docs.djangoproject.com/en/3.1/ref/settings/#databases 70 71 # TODO: Add to documentation that database keys should not be than 128 characters. 72 DATABASES = { 73 decouple_config('DJANGO_DATABASE_KEY'): decouple_config('DJANGO_DATABASE_URL', cast=db_url), 74 decouple_config('MATHESAR_DATABASE_KEY'): decouple_config('MATHESAR_DATABASE_URL', cast=db_url) 75 } 76 77 # pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}' 78 # and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME'] 79 if decouple_config('TEST', default=False, cast=bool): 80 DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['TEST'] = { 81 'NAME': DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['NAME'] 82 } 83 84 85 # Quick-start development settings - unsuitable for production 86 # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ 87 88 # SECURITY WARNING: keep the secret key used in production secret! 89 SECRET_KEY = decouple_config('SECRET_KEY') 90 91 # SECURITY WARNING: don't run with debug turned on in production! 92 DEBUG = decouple_config('DEBUG', default=False, cast=bool) 93 94 ALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv()) 95 96 # Password validation 97 # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators 98 99 AUTH_PASSWORD_VALIDATORS = [ 100 { 101 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", 102 }, 103 { 104 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", 105 }, 106 { 107 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", 108 }, 109 { 110 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", 111 }, 112 ] 113 114 115 # Internationalization 116 # https://docs.djangoproject.com/en/3.1/topics/i18n/ 117 118 LANGUAGE_CODE = "en-us" 119 120 TIME_ZONE = "UTC" 121 122 USE_I18N = True 123 124 USE_L10N = True 125 126 USE_TZ = True 127 128 129 # Static files (CSS, JavaScript, Images) 130 # https://docs.djangoproject.com/en/3.1/howto/static-files/ 131 132 STATIC_URL = "/static/" 133 134 CLIENT_DEV_URL = "http://localhost:3000" 135 136 137 # Media files (uploaded by the user) 138 139 MEDIA_ROOT = os.path.join(BASE_DIR, '.media/') 140 141 MEDIA_URL = "/media/" 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/config/settings.py b/config/settings.py --- a/config/settings.py +++ b/config/settings.py @@ -16,6 +16,16 @@ from decouple import Csv, config as decouple_config from dj_database_url import parse as db_url + +# We use a 'tuple' with pipes as delimiters as decople naively splits the global +# variables on commas when casting to Csv() +def pipe_delim(pipe_string): + # Remove opening and closing brackets + pipe_string = pipe_string[1:-1] + # Split on pipe delim + return pipe_string.split("|") + + # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent @@ -69,17 +79,20 @@ # https://docs.djangoproject.com/en/3.1/ref/settings/#databases # TODO: Add to documentation that database keys should not be than 128 characters. + +# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})' +# See pipe_delim above for why we use pipes as delimiters DATABASES = { - decouple_config('DJANGO_DATABASE_KEY'): decouple_config('DJANGO_DATABASE_URL', cast=db_url), - decouple_config('MATHESAR_DATABASE_KEY'): decouple_config('MATHESAR_DATABASE_URL', cast=db_url) + db_key: db_url(url_string) + for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)) } +DATABASES[decouple_config('DJANGO_DATABASE_KEY')] = decouple_config('DJANGO_DATABASE_URL', cast=db_url) # pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}' # and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME'] if decouple_config('TEST', default=False, cast=bool): - DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['TEST'] = { - 'NAME': DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['NAME'] - } + for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)): + DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']} # Quick-start development settings - unsuitable for production
{"golden_diff": "diff --git a/config/settings.py b/config/settings.py\n--- a/config/settings.py\n+++ b/config/settings.py\n@@ -16,6 +16,16 @@\n from decouple import Csv, config as decouple_config\n from dj_database_url import parse as db_url\n \n+\n+# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n+# variables on commas when casting to Csv()\n+def pipe_delim(pipe_string):\n+ # Remove opening and closing brackets\n+ pipe_string = pipe_string[1:-1]\n+ # Split on pipe delim\n+ return pipe_string.split(\"|\")\n+\n+\n # Build paths inside the project like this: BASE_DIR / 'subdir'.\n BASE_DIR = Path(__file__).resolve().parent.parent\n \n@@ -69,17 +79,20 @@\n # https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n \n # TODO: Add to documentation that database keys should not be than 128 characters.\n+\n+# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n+# See pipe_delim above for why we use pipes as delimiters\n DATABASES = {\n- decouple_config('DJANGO_DATABASE_KEY'): decouple_config('DJANGO_DATABASE_URL', cast=db_url),\n- decouple_config('MATHESAR_DATABASE_KEY'): decouple_config('MATHESAR_DATABASE_URL', cast=db_url)\n+ db_key: db_url(url_string)\n+ for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n }\n+DATABASES[decouple_config('DJANGO_DATABASE_KEY')] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n \n # pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n # and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\n if decouple_config('TEST', default=False, cast=bool):\n- DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['TEST'] = {\n- 'NAME': DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['NAME']\n- }\n+ for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n+ DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n \n \n # Quick-start development settings - unsuitable for production\n", "issue": "User should be able to configure multiple databases in settings\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\nCurrently, the user can only configure one Mathesar database in the settings. They should be able to configure as many databases to connect to Mathesar as they want.\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\nThe user should be able to configure multiple databases in the `.env` file.\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\nWe might want to use `python-decouple`'s [built in CSV helper](https://github.com/henriquebastos/python-decouple/#built-in-csv-helper) for this.\r\n\r\nIdeally, the user would be able to associate the database key with the connection information directly using a tuple or something like that.\n", "before_files": [{"content": "\"\"\"\nDjango settings for config project.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.get_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\nDATABASES = {\n decouple_config('DJANGO_DATABASE_KEY'): decouple_config('DJANGO_DATABASE_URL', cast=db_url),\n decouple_config('MATHESAR_DATABASE_KEY'): decouple_config('MATHESAR_DATABASE_URL', cast=db_url)\n}\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nif decouple_config('TEST', default=False, cast=bool):\n DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['TEST'] = {\n 'NAME': DATABASES[decouple_config('MATHESAR_DATABASE_KEY')]['NAME']\n }\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nCLIENT_DEV_URL = \"http://localhost:3000\"\n\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n", "path": "config/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for config project.\n\nGenerated by 'django-admin startproject' using Django 3.1.7.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.1/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.1/ref/settings/\n\"\"\"\n\nimport os\nfrom pathlib import Path\n\nfrom decouple import Csv, config as decouple_config\nfrom dj_database_url import parse as db_url\n\n\n# We use a 'tuple' with pipes as delimiters as decople naively splits the global\n# variables on commas when casting to Csv()\ndef pipe_delim(pipe_string):\n # Remove opening and closing brackets\n pipe_string = pipe_string[1:-1]\n # Split on pipe delim\n return pipe_string.split(\"|\")\n\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"django_filters\",\n \"django_property_filter\",\n \"mathesar\",\n]\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"config.context_processors.get_settings\",\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.1/ref/settings/#databases\n\n# TODO: Add to documentation that database keys should not be than 128 characters.\n\n# MATHESAR_DATABASES should be of the form '({db_name}|{db_url}), ({db_name}|{db_url})'\n# See pipe_delim above for why we use pipes as delimiters\nDATABASES = {\n db_key: db_url(url_string)\n for db_key, url_string in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim))\n}\nDATABASES[decouple_config('DJANGO_DATABASE_KEY')] = decouple_config('DJANGO_DATABASE_URL', cast=db_url)\n\n# pytest-django will create a new database named 'test_{DATABASES[table_db]['NAME']}'\n# and use it for our API tests if we don't specify DATABASES[table_db]['TEST']['NAME']\nif decouple_config('TEST', default=False, cast=bool):\n for db_key, _ in decouple_config('MATHESAR_DATABASES', cast=Csv(pipe_delim)):\n DATABASES[db_key]['TEST'] = {'NAME': DATABASES[db_key]['NAME']}\n\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = decouple_config('SECRET_KEY')\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = decouple_config('DEBUG', default=False, cast=bool)\n\nALLOWED_HOSTS = decouple_config('ALLOWED_HOSTS', cast=Csv())\n\n# Password validation\n# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nCLIENT_DEV_URL = \"http://localhost:3000\"\n\n\n# Media files (uploaded by the user)\n\nMEDIA_ROOT = os.path.join(BASE_DIR, '.media/')\n\nMEDIA_URL = \"/media/\"\n", "path": "config/settings.py"}]}
1,712
545
gh_patches_debug_17060
rasdani/github-patches
git_diff
pyca__cryptography-7654
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Build failed on OpenBSD 7.2 OpenBSD 7.2 upgraded LibreSSL to 3.6.0 (LIBRESSL_VERSION_NUMBER 0x3060000fL) that has OPENSSL_cleanup function. Please update version check in src/_cffi_src/openssl/crypto.py to: ``` #if CRYPTOGRAPHY_IS_LIBRESSL && LIBRESSL_VERSION_NUMBER < 0x3060000fL static const long Cryptography_HAS_OPENSSL_CLEANUP = 0; void (*OPENSSL_cleanup)(void) = NULL; #else static const long Cryptography_HAS_OPENSSL_CLEANUP = 1; #endif ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/_cffi_src/openssl/crypto.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 6 INCLUDES = """ 7 #include <openssl/crypto.h> 8 """ 9 10 TYPES = """ 11 static const long Cryptography_HAS_MEM_FUNCTIONS; 12 static const long Cryptography_HAS_OPENSSL_CLEANUP; 13 14 static const int SSLEAY_VERSION; 15 static const int SSLEAY_CFLAGS; 16 static const int SSLEAY_PLATFORM; 17 static const int SSLEAY_DIR; 18 static const int SSLEAY_BUILT_ON; 19 static const int OPENSSL_VERSION; 20 static const int OPENSSL_CFLAGS; 21 static const int OPENSSL_BUILT_ON; 22 static const int OPENSSL_PLATFORM; 23 static const int OPENSSL_DIR; 24 """ 25 26 FUNCTIONS = """ 27 void OPENSSL_cleanup(void); 28 29 /* SSLeay was removed in 1.1.0 */ 30 unsigned long SSLeay(void); 31 const char *SSLeay_version(int); 32 /* these functions were added to replace the SSLeay functions in 1.1.0 */ 33 unsigned long OpenSSL_version_num(void); 34 const char *OpenSSL_version(int); 35 36 void *OPENSSL_malloc(size_t); 37 void OPENSSL_free(void *); 38 39 40 /* Signature is significantly different in LibreSSL, so expose via different 41 symbol name */ 42 int Cryptography_CRYPTO_set_mem_functions( 43 void *(*)(size_t, const char *, int), 44 void *(*)(void *, size_t, const char *, int), 45 void (*)(void *, const char *, int)); 46 47 void *Cryptography_malloc_wrapper(size_t, const char *, int); 48 void *Cryptography_realloc_wrapper(void *, size_t, const char *, int); 49 void Cryptography_free_wrapper(void *, const char *, int); 50 """ 51 52 CUSTOMIZATIONS = """ 53 /* In 1.1.0 SSLeay has finally been retired. We bidirectionally define the 54 values so you can use either one. This is so we can use the new function 55 names no matter what OpenSSL we're running on, but users on older pyOpenSSL 56 releases won't see issues if they're running OpenSSL 1.1.0 */ 57 #if !defined(SSLEAY_VERSION) 58 # define SSLeay OpenSSL_version_num 59 # define SSLeay_version OpenSSL_version 60 # define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER 61 # define SSLEAY_VERSION OPENSSL_VERSION 62 # define SSLEAY_CFLAGS OPENSSL_CFLAGS 63 # define SSLEAY_BUILT_ON OPENSSL_BUILT_ON 64 # define SSLEAY_PLATFORM OPENSSL_PLATFORM 65 # define SSLEAY_DIR OPENSSL_DIR 66 #endif 67 #if !defined(OPENSSL_VERSION) 68 # define OpenSSL_version_num SSLeay 69 # define OpenSSL_version SSLeay_version 70 # define OPENSSL_VERSION SSLEAY_VERSION 71 # define OPENSSL_CFLAGS SSLEAY_CFLAGS 72 # define OPENSSL_BUILT_ON SSLEAY_BUILT_ON 73 # define OPENSSL_PLATFORM SSLEAY_PLATFORM 74 # define OPENSSL_DIR SSLEAY_DIR 75 #endif 76 77 #if CRYPTOGRAPHY_IS_LIBRESSL 78 static const long Cryptography_HAS_OPENSSL_CLEANUP = 0; 79 void (*OPENSSL_cleanup)(void) = NULL; 80 #else 81 static const long Cryptography_HAS_OPENSSL_CLEANUP = 1; 82 #endif 83 84 #if CRYPTOGRAPHY_IS_LIBRESSL || CRYPTOGRAPHY_IS_BORINGSSL 85 static const long Cryptography_HAS_MEM_FUNCTIONS = 0; 86 int (*Cryptography_CRYPTO_set_mem_functions)( 87 void *(*)(size_t, const char *, int), 88 void *(*)(void *, size_t, const char *, int), 89 void (*)(void *, const char *, int)) = NULL; 90 91 #else 92 static const long Cryptography_HAS_MEM_FUNCTIONS = 1; 93 94 int Cryptography_CRYPTO_set_mem_functions( 95 void *(*m)(size_t, const char *, int), 96 void *(*r)(void *, size_t, const char *, int), 97 void (*f)(void *, const char *, int) 98 ) { 99 return CRYPTO_set_mem_functions(m, r, f); 100 } 101 #endif 102 103 void *Cryptography_malloc_wrapper(size_t size, const char *path, int line) { 104 return malloc(size); 105 } 106 107 void *Cryptography_realloc_wrapper(void *ptr, size_t size, const char *path, 108 int line) { 109 return realloc(ptr, size); 110 } 111 112 void Cryptography_free_wrapper(void *ptr, const char *path, int line) { 113 free(ptr); 114 } 115 """ 116 ``` Path: `src/_cffi_src/openssl/cryptography.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 6 INCLUDES = """ 7 /* define our OpenSSL API compatibility level to 1.0.1. Any symbols older than 8 that will raise an error during compilation. We can raise this number again 9 after we drop 1.0.2 support in the distant future. */ 10 #define OPENSSL_API_COMPAT 0x10001000L 11 12 #if defined(_WIN32) 13 #define WIN32_LEAN_AND_MEAN 14 #include <windows.h> 15 #include <Wincrypt.h> 16 #include <Winsock2.h> 17 /* 18 undef some macros that are defined by wincrypt.h but are also types in 19 boringssl. openssl has worked around this but boring has not yet. see: 20 https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base 21 /win/wincrypt_shim.h 22 */ 23 #undef X509_NAME 24 #undef X509_EXTENSIONS 25 #undef PKCS7_SIGNER_INFO 26 #endif 27 28 #include <openssl/opensslv.h> 29 30 31 #if defined(LIBRESSL_VERSION_NUMBER) 32 #define CRYPTOGRAPHY_IS_LIBRESSL 1 33 #else 34 #define CRYPTOGRAPHY_IS_LIBRESSL 0 35 #endif 36 37 #if defined(OPENSSL_IS_BORINGSSL) 38 #define CRYPTOGRAPHY_IS_BORINGSSL 1 39 #else 40 #define CRYPTOGRAPHY_IS_BORINGSSL 0 41 #endif 42 43 #if CRYPTOGRAPHY_IS_LIBRESSL 44 #define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 \ 45 (LIBRESSL_VERSION_NUMBER < 0x3050000f) 46 47 #else 48 #define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 (0) 49 #endif 50 51 #if OPENSSL_VERSION_NUMBER < 0x10101000 52 #error "pyca/cryptography MUST be linked with Openssl 1.1.1 or later" 53 #endif 54 55 #define CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER \ 56 (OPENSSL_VERSION_NUMBER >= 0x10101040 && !CRYPTOGRAPHY_IS_LIBRESSL) 57 #define CRYPTOGRAPHY_OPENSSL_300_OR_GREATER \ 58 (OPENSSL_VERSION_NUMBER >= 0x30000000 && !CRYPTOGRAPHY_IS_LIBRESSL) 59 60 #define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B \ 61 (OPENSSL_VERSION_NUMBER < 0x10101020 || CRYPTOGRAPHY_IS_LIBRESSL) 62 #define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111D \ 63 (OPENSSL_VERSION_NUMBER < 0x10101040 || CRYPTOGRAPHY_IS_LIBRESSL) 64 #define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111E \ 65 (OPENSSL_VERSION_NUMBER < 0x10101050 || CRYPTOGRAPHY_IS_LIBRESSL) 66 #if (CRYPTOGRAPHY_OPENSSL_LESS_THAN_111D && !CRYPTOGRAPHY_IS_LIBRESSL && \ 67 !defined(OPENSSL_NO_ENGINE)) || defined(USE_OSRANDOM_RNG_FOR_TESTING) 68 #define CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE 1 69 #else 70 #define CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE 0 71 #endif 72 """ 73 74 TYPES = """ 75 static const int CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER; 76 static const int CRYPTOGRAPHY_OPENSSL_300_OR_GREATER; 77 78 static const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B; 79 static const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_111E; 80 static const int CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE; 81 82 static const int CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350; 83 84 static const int CRYPTOGRAPHY_IS_LIBRESSL; 85 static const int CRYPTOGRAPHY_IS_BORINGSSL; 86 """ 87 88 FUNCTIONS = """ 89 """ 90 91 CUSTOMIZATIONS = """ 92 """ 93 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/_cffi_src/openssl/crypto.py b/src/_cffi_src/openssl/crypto.py --- a/src/_cffi_src/openssl/crypto.py +++ b/src/_cffi_src/openssl/crypto.py @@ -74,7 +74,7 @@ # define OPENSSL_DIR SSLEAY_DIR #endif -#if CRYPTOGRAPHY_IS_LIBRESSL +#if CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360 static const long Cryptography_HAS_OPENSSL_CLEANUP = 0; void (*OPENSSL_cleanup)(void) = NULL; #else diff --git a/src/_cffi_src/openssl/cryptography.py b/src/_cffi_src/openssl/cryptography.py --- a/src/_cffi_src/openssl/cryptography.py +++ b/src/_cffi_src/openssl/cryptography.py @@ -43,9 +43,12 @@ #if CRYPTOGRAPHY_IS_LIBRESSL #define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 \ (LIBRESSL_VERSION_NUMBER < 0x3050000f) +#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360 \ + (LIBRESSL_VERSION_NUMBER < 0x3060000f) #else #define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 (0) +#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360 (0) #endif #if OPENSSL_VERSION_NUMBER < 0x10101000
{"golden_diff": "diff --git a/src/_cffi_src/openssl/crypto.py b/src/_cffi_src/openssl/crypto.py\n--- a/src/_cffi_src/openssl/crypto.py\n+++ b/src/_cffi_src/openssl/crypto.py\n@@ -74,7 +74,7 @@\n # define OPENSSL_DIR SSLEAY_DIR\n #endif\n \n-#if CRYPTOGRAPHY_IS_LIBRESSL\n+#if CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360\n static const long Cryptography_HAS_OPENSSL_CLEANUP = 0;\n void (*OPENSSL_cleanup)(void) = NULL;\n #else\ndiff --git a/src/_cffi_src/openssl/cryptography.py b/src/_cffi_src/openssl/cryptography.py\n--- a/src/_cffi_src/openssl/cryptography.py\n+++ b/src/_cffi_src/openssl/cryptography.py\n@@ -43,9 +43,12 @@\n #if CRYPTOGRAPHY_IS_LIBRESSL\n #define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 \\\n (LIBRESSL_VERSION_NUMBER < 0x3050000f)\n+#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360 \\\n+ (LIBRESSL_VERSION_NUMBER < 0x3060000f)\n \n #else\n #define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 (0)\n+#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360 (0)\n #endif\n \n #if OPENSSL_VERSION_NUMBER < 0x10101000\n", "issue": "Build failed on OpenBSD 7.2\nOpenBSD 7.2 upgraded LibreSSL to 3.6.0 (LIBRESSL_VERSION_NUMBER 0x3060000fL) that has OPENSSL_cleanup function.\r\n\r\nPlease update version check in src/_cffi_src/openssl/crypto.py to:\r\n```\r\n#if CRYPTOGRAPHY_IS_LIBRESSL && LIBRESSL_VERSION_NUMBER < 0x3060000fL\r\nstatic const long Cryptography_HAS_OPENSSL_CLEANUP = 0;\r\nvoid (*OPENSSL_cleanup)(void) = NULL;\r\n#else\r\nstatic const long Cryptography_HAS_OPENSSL_CLEANUP = 1;\r\n#endif\r\n```\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nINCLUDES = \"\"\"\n#include <openssl/crypto.h>\n\"\"\"\n\nTYPES = \"\"\"\nstatic const long Cryptography_HAS_MEM_FUNCTIONS;\nstatic const long Cryptography_HAS_OPENSSL_CLEANUP;\n\nstatic const int SSLEAY_VERSION;\nstatic const int SSLEAY_CFLAGS;\nstatic const int SSLEAY_PLATFORM;\nstatic const int SSLEAY_DIR;\nstatic const int SSLEAY_BUILT_ON;\nstatic const int OPENSSL_VERSION;\nstatic const int OPENSSL_CFLAGS;\nstatic const int OPENSSL_BUILT_ON;\nstatic const int OPENSSL_PLATFORM;\nstatic const int OPENSSL_DIR;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nvoid OPENSSL_cleanup(void);\n\n/* SSLeay was removed in 1.1.0 */\nunsigned long SSLeay(void);\nconst char *SSLeay_version(int);\n/* these functions were added to replace the SSLeay functions in 1.1.0 */\nunsigned long OpenSSL_version_num(void);\nconst char *OpenSSL_version(int);\n\nvoid *OPENSSL_malloc(size_t);\nvoid OPENSSL_free(void *);\n\n\n/* Signature is significantly different in LibreSSL, so expose via different\n symbol name */\nint Cryptography_CRYPTO_set_mem_functions(\n void *(*)(size_t, const char *, int),\n void *(*)(void *, size_t, const char *, int),\n void (*)(void *, const char *, int));\n\nvoid *Cryptography_malloc_wrapper(size_t, const char *, int);\nvoid *Cryptography_realloc_wrapper(void *, size_t, const char *, int);\nvoid Cryptography_free_wrapper(void *, const char *, int);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n/* In 1.1.0 SSLeay has finally been retired. We bidirectionally define the\n values so you can use either one. This is so we can use the new function\n names no matter what OpenSSL we're running on, but users on older pyOpenSSL\n releases won't see issues if they're running OpenSSL 1.1.0 */\n#if !defined(SSLEAY_VERSION)\n# define SSLeay OpenSSL_version_num\n# define SSLeay_version OpenSSL_version\n# define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER\n# define SSLEAY_VERSION OPENSSL_VERSION\n# define SSLEAY_CFLAGS OPENSSL_CFLAGS\n# define SSLEAY_BUILT_ON OPENSSL_BUILT_ON\n# define SSLEAY_PLATFORM OPENSSL_PLATFORM\n# define SSLEAY_DIR OPENSSL_DIR\n#endif\n#if !defined(OPENSSL_VERSION)\n# define OpenSSL_version_num SSLeay\n# define OpenSSL_version SSLeay_version\n# define OPENSSL_VERSION SSLEAY_VERSION\n# define OPENSSL_CFLAGS SSLEAY_CFLAGS\n# define OPENSSL_BUILT_ON SSLEAY_BUILT_ON\n# define OPENSSL_PLATFORM SSLEAY_PLATFORM\n# define OPENSSL_DIR SSLEAY_DIR\n#endif\n\n#if CRYPTOGRAPHY_IS_LIBRESSL\nstatic const long Cryptography_HAS_OPENSSL_CLEANUP = 0;\nvoid (*OPENSSL_cleanup)(void) = NULL;\n#else\nstatic const long Cryptography_HAS_OPENSSL_CLEANUP = 1;\n#endif\n\n#if CRYPTOGRAPHY_IS_LIBRESSL || CRYPTOGRAPHY_IS_BORINGSSL\nstatic const long Cryptography_HAS_MEM_FUNCTIONS = 0;\nint (*Cryptography_CRYPTO_set_mem_functions)(\n void *(*)(size_t, const char *, int),\n void *(*)(void *, size_t, const char *, int),\n void (*)(void *, const char *, int)) = NULL;\n\n#else\nstatic const long Cryptography_HAS_MEM_FUNCTIONS = 1;\n\nint Cryptography_CRYPTO_set_mem_functions(\n void *(*m)(size_t, const char *, int),\n void *(*r)(void *, size_t, const char *, int),\n void (*f)(void *, const char *, int)\n) {\n return CRYPTO_set_mem_functions(m, r, f);\n}\n#endif\n\nvoid *Cryptography_malloc_wrapper(size_t size, const char *path, int line) {\n return malloc(size);\n}\n\nvoid *Cryptography_realloc_wrapper(void *ptr, size_t size, const char *path,\n int line) {\n return realloc(ptr, size);\n}\n\nvoid Cryptography_free_wrapper(void *ptr, const char *path, int line) {\n free(ptr);\n}\n\"\"\"\n", "path": "src/_cffi_src/openssl/crypto.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nINCLUDES = \"\"\"\n/* define our OpenSSL API compatibility level to 1.0.1. Any symbols older than\n that will raise an error during compilation. We can raise this number again\n after we drop 1.0.2 support in the distant future. */\n#define OPENSSL_API_COMPAT 0x10001000L\n\n#if defined(_WIN32)\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\n#include <Wincrypt.h>\n#include <Winsock2.h>\n/*\n undef some macros that are defined by wincrypt.h but are also types in\n boringssl. openssl has worked around this but boring has not yet. see:\n https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base\n /win/wincrypt_shim.h\n*/\n#undef X509_NAME\n#undef X509_EXTENSIONS\n#undef PKCS7_SIGNER_INFO\n#endif\n\n#include <openssl/opensslv.h>\n\n\n#if defined(LIBRESSL_VERSION_NUMBER)\n#define CRYPTOGRAPHY_IS_LIBRESSL 1\n#else\n#define CRYPTOGRAPHY_IS_LIBRESSL 0\n#endif\n\n#if defined(OPENSSL_IS_BORINGSSL)\n#define CRYPTOGRAPHY_IS_BORINGSSL 1\n#else\n#define CRYPTOGRAPHY_IS_BORINGSSL 0\n#endif\n\n#if CRYPTOGRAPHY_IS_LIBRESSL\n#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 \\\n (LIBRESSL_VERSION_NUMBER < 0x3050000f)\n\n#else\n#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 (0)\n#endif\n\n#if OPENSSL_VERSION_NUMBER < 0x10101000\n #error \"pyca/cryptography MUST be linked with Openssl 1.1.1 or later\"\n#endif\n\n#define CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER \\\n (OPENSSL_VERSION_NUMBER >= 0x10101040 && !CRYPTOGRAPHY_IS_LIBRESSL)\n#define CRYPTOGRAPHY_OPENSSL_300_OR_GREATER \\\n (OPENSSL_VERSION_NUMBER >= 0x30000000 && !CRYPTOGRAPHY_IS_LIBRESSL)\n\n#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B \\\n (OPENSSL_VERSION_NUMBER < 0x10101020 || CRYPTOGRAPHY_IS_LIBRESSL)\n#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111D \\\n (OPENSSL_VERSION_NUMBER < 0x10101040 || CRYPTOGRAPHY_IS_LIBRESSL)\n#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111E \\\n (OPENSSL_VERSION_NUMBER < 0x10101050 || CRYPTOGRAPHY_IS_LIBRESSL)\n#if (CRYPTOGRAPHY_OPENSSL_LESS_THAN_111D && !CRYPTOGRAPHY_IS_LIBRESSL && \\\n !defined(OPENSSL_NO_ENGINE)) || defined(USE_OSRANDOM_RNG_FOR_TESTING)\n#define CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE 1\n#else\n#define CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE 0\n#endif\n\"\"\"\n\nTYPES = \"\"\"\nstatic const int CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER;\nstatic const int CRYPTOGRAPHY_OPENSSL_300_OR_GREATER;\n\nstatic const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B;\nstatic const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_111E;\nstatic const int CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE;\n\nstatic const int CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350;\n\nstatic const int CRYPTOGRAPHY_IS_LIBRESSL;\nstatic const int CRYPTOGRAPHY_IS_BORINGSSL;\n\"\"\"\n\nFUNCTIONS = \"\"\"\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n", "path": "src/_cffi_src/openssl/cryptography.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nINCLUDES = \"\"\"\n#include <openssl/crypto.h>\n\"\"\"\n\nTYPES = \"\"\"\nstatic const long Cryptography_HAS_MEM_FUNCTIONS;\nstatic const long Cryptography_HAS_OPENSSL_CLEANUP;\n\nstatic const int SSLEAY_VERSION;\nstatic const int SSLEAY_CFLAGS;\nstatic const int SSLEAY_PLATFORM;\nstatic const int SSLEAY_DIR;\nstatic const int SSLEAY_BUILT_ON;\nstatic const int OPENSSL_VERSION;\nstatic const int OPENSSL_CFLAGS;\nstatic const int OPENSSL_BUILT_ON;\nstatic const int OPENSSL_PLATFORM;\nstatic const int OPENSSL_DIR;\n\"\"\"\n\nFUNCTIONS = \"\"\"\nvoid OPENSSL_cleanup(void);\n\n/* SSLeay was removed in 1.1.0 */\nunsigned long SSLeay(void);\nconst char *SSLeay_version(int);\n/* these functions were added to replace the SSLeay functions in 1.1.0 */\nunsigned long OpenSSL_version_num(void);\nconst char *OpenSSL_version(int);\n\nvoid *OPENSSL_malloc(size_t);\nvoid OPENSSL_free(void *);\n\n\n/* Signature is significantly different in LibreSSL, so expose via different\n symbol name */\nint Cryptography_CRYPTO_set_mem_functions(\n void *(*)(size_t, const char *, int),\n void *(*)(void *, size_t, const char *, int),\n void (*)(void *, const char *, int));\n\nvoid *Cryptography_malloc_wrapper(size_t, const char *, int);\nvoid *Cryptography_realloc_wrapper(void *, size_t, const char *, int);\nvoid Cryptography_free_wrapper(void *, const char *, int);\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n/* In 1.1.0 SSLeay has finally been retired. We bidirectionally define the\n values so you can use either one. This is so we can use the new function\n names no matter what OpenSSL we're running on, but users on older pyOpenSSL\n releases won't see issues if they're running OpenSSL 1.1.0 */\n#if !defined(SSLEAY_VERSION)\n# define SSLeay OpenSSL_version_num\n# define SSLeay_version OpenSSL_version\n# define SSLEAY_VERSION_NUMBER OPENSSL_VERSION_NUMBER\n# define SSLEAY_VERSION OPENSSL_VERSION\n# define SSLEAY_CFLAGS OPENSSL_CFLAGS\n# define SSLEAY_BUILT_ON OPENSSL_BUILT_ON\n# define SSLEAY_PLATFORM OPENSSL_PLATFORM\n# define SSLEAY_DIR OPENSSL_DIR\n#endif\n#if !defined(OPENSSL_VERSION)\n# define OpenSSL_version_num SSLeay\n# define OpenSSL_version SSLeay_version\n# define OPENSSL_VERSION SSLEAY_VERSION\n# define OPENSSL_CFLAGS SSLEAY_CFLAGS\n# define OPENSSL_BUILT_ON SSLEAY_BUILT_ON\n# define OPENSSL_PLATFORM SSLEAY_PLATFORM\n# define OPENSSL_DIR SSLEAY_DIR\n#endif\n\n#if CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360\nstatic const long Cryptography_HAS_OPENSSL_CLEANUP = 0;\nvoid (*OPENSSL_cleanup)(void) = NULL;\n#else\nstatic const long Cryptography_HAS_OPENSSL_CLEANUP = 1;\n#endif\n\n#if CRYPTOGRAPHY_IS_LIBRESSL || CRYPTOGRAPHY_IS_BORINGSSL\nstatic const long Cryptography_HAS_MEM_FUNCTIONS = 0;\nint (*Cryptography_CRYPTO_set_mem_functions)(\n void *(*)(size_t, const char *, int),\n void *(*)(void *, size_t, const char *, int),\n void (*)(void *, const char *, int)) = NULL;\n\n#else\nstatic const long Cryptography_HAS_MEM_FUNCTIONS = 1;\n\nint Cryptography_CRYPTO_set_mem_functions(\n void *(*m)(size_t, const char *, int),\n void *(*r)(void *, size_t, const char *, int),\n void (*f)(void *, const char *, int)\n) {\n return CRYPTO_set_mem_functions(m, r, f);\n}\n#endif\n\nvoid *Cryptography_malloc_wrapper(size_t size, const char *path, int line) {\n return malloc(size);\n}\n\nvoid *Cryptography_realloc_wrapper(void *ptr, size_t size, const char *path,\n int line) {\n return realloc(ptr, size);\n}\n\nvoid Cryptography_free_wrapper(void *ptr, const char *path, int line) {\n free(ptr);\n}\n\"\"\"\n", "path": "src/_cffi_src/openssl/crypto.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nINCLUDES = \"\"\"\n/* define our OpenSSL API compatibility level to 1.0.1. Any symbols older than\n that will raise an error during compilation. We can raise this number again\n after we drop 1.0.2 support in the distant future. */\n#define OPENSSL_API_COMPAT 0x10001000L\n\n#if defined(_WIN32)\n#define WIN32_LEAN_AND_MEAN\n#include <windows.h>\n#include <Wincrypt.h>\n#include <Winsock2.h>\n/*\n undef some macros that are defined by wincrypt.h but are also types in\n boringssl. openssl has worked around this but boring has not yet. see:\n https://chromium.googlesource.com/chromium/src/+/refs/heads/main/base\n /win/wincrypt_shim.h\n*/\n#undef X509_NAME\n#undef X509_EXTENSIONS\n#undef PKCS7_SIGNER_INFO\n#endif\n\n#include <openssl/opensslv.h>\n\n\n#if defined(LIBRESSL_VERSION_NUMBER)\n#define CRYPTOGRAPHY_IS_LIBRESSL 1\n#else\n#define CRYPTOGRAPHY_IS_LIBRESSL 0\n#endif\n\n#if defined(OPENSSL_IS_BORINGSSL)\n#define CRYPTOGRAPHY_IS_BORINGSSL 1\n#else\n#define CRYPTOGRAPHY_IS_BORINGSSL 0\n#endif\n\n#if CRYPTOGRAPHY_IS_LIBRESSL\n#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 \\\n (LIBRESSL_VERSION_NUMBER < 0x3050000f)\n#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360 \\\n (LIBRESSL_VERSION_NUMBER < 0x3060000f)\n\n#else\n#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350 (0)\n#define CRYPTOGRAPHY_LIBRESSL_LESS_THAN_360 (0)\n#endif\n\n#if OPENSSL_VERSION_NUMBER < 0x10101000\n #error \"pyca/cryptography MUST be linked with Openssl 1.1.1 or later\"\n#endif\n\n#define CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER \\\n (OPENSSL_VERSION_NUMBER >= 0x10101040 && !CRYPTOGRAPHY_IS_LIBRESSL)\n#define CRYPTOGRAPHY_OPENSSL_300_OR_GREATER \\\n (OPENSSL_VERSION_NUMBER >= 0x30000000 && !CRYPTOGRAPHY_IS_LIBRESSL)\n\n#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B \\\n (OPENSSL_VERSION_NUMBER < 0x10101020 || CRYPTOGRAPHY_IS_LIBRESSL)\n#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111D \\\n (OPENSSL_VERSION_NUMBER < 0x10101040 || CRYPTOGRAPHY_IS_LIBRESSL)\n#define CRYPTOGRAPHY_OPENSSL_LESS_THAN_111E \\\n (OPENSSL_VERSION_NUMBER < 0x10101050 || CRYPTOGRAPHY_IS_LIBRESSL)\n#if (CRYPTOGRAPHY_OPENSSL_LESS_THAN_111D && !CRYPTOGRAPHY_IS_LIBRESSL && \\\n !defined(OPENSSL_NO_ENGINE)) || defined(USE_OSRANDOM_RNG_FOR_TESTING)\n#define CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE 1\n#else\n#define CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE 0\n#endif\n\"\"\"\n\nTYPES = \"\"\"\nstatic const int CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER;\nstatic const int CRYPTOGRAPHY_OPENSSL_300_OR_GREATER;\n\nstatic const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_111B;\nstatic const int CRYPTOGRAPHY_OPENSSL_LESS_THAN_111E;\nstatic const int CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE;\n\nstatic const int CRYPTOGRAPHY_LIBRESSL_LESS_THAN_350;\n\nstatic const int CRYPTOGRAPHY_IS_LIBRESSL;\nstatic const int CRYPTOGRAPHY_IS_BORINGSSL;\n\"\"\"\n\nFUNCTIONS = \"\"\"\n\"\"\"\n\nCUSTOMIZATIONS = \"\"\"\n\"\"\"\n", "path": "src/_cffi_src/openssl/cryptography.py"}]}
2,734
341
gh_patches_debug_33502
rasdani/github-patches
git_diff
rucio__rucio-3397
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Better handling of posix replica URLs Motivation ---------- With `rucio.rse.protocols.posix.Default` implementation, the replicas with "file" schema always have port `0` in them, eg `TEST1: file://:0/tmp/testfile` Modification ------------ Override `lfns2pfns()` in the posix protocol. I will submit a pull request for this shortly. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/rucio/rse/protocols/posix.py` Content: ``` 1 # Copyright European Organization for Nuclear Research (CERN) 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # You may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Authors: 9 # - Ralph Vigne, <[email protected]>, 2012-2014 10 # - Nicolo Magini, <[email protected]>, 2018 11 # 12 # PY3K COMPATIBLE 13 14 import os 15 import os.path 16 import shutil 17 from subprocess import call 18 19 from rucio.common import exception 20 from rucio.common.utils import adler32 21 from rucio.rse.protocols import protocol 22 23 24 class Default(protocol.RSEProtocol): 25 """ Implementing access to RSEs using the local filesystem.""" 26 27 def exists(self, pfn): 28 """ 29 Checks if the requested file is known by the referred RSE. 30 31 :param pfn: Physical file name 32 33 :returns: True if the file exists, False if it doesn't 34 35 :raises SourceNotFound: if the source file was not found on the referred storage. 36 """ 37 status = '' 38 try: 39 status = os.path.exists(self.pfn2path(pfn)) 40 except Exception as e: 41 raise exception.ServiceUnavailable(e) 42 return status 43 44 def connect(self): 45 """ 46 Establishes the actual connection to the referred RSE. 47 48 :param: credentials needed to establish a connection with the stroage. 49 50 :raises RSEAccessDenied: if no connection could be established. 51 """ 52 pass 53 54 def close(self): 55 """ Closes the connection to RSE.""" 56 pass 57 58 def get(self, pfn, dest, transfer_timeout=None): 59 """ Provides access to files stored inside connected the RSE. 60 61 :param pfn: Physical file name of requested file 62 :param dest: Name and path of the files when stored at the client 63 :param transfer_timeout Transfer timeout (in seconds) - dummy 64 65 :raises DestinationNotAccessible: if the destination storage was not accessible. 66 :raises ServiceUnavailable: if some generic error occured in the library. 67 :raises SourceNotFound: if the source file was not found on the referred storage. 68 """ 69 try: 70 shutil.copy(self.pfn2path(pfn), dest) 71 except IOError as e: 72 try: # To check if the error happend local or remote 73 with open(dest, 'wb'): 74 pass 75 call(['rm', '-rf', dest]) 76 except IOError as e: 77 if e.errno == 2: 78 raise exception.DestinationNotAccessible(e) 79 else: 80 raise exception.ServiceUnavailable(e) 81 if e.errno == 2: 82 raise exception.SourceNotFound(e) 83 else: 84 raise exception.ServiceUnavailable(e) 85 86 def put(self, source, target, source_dir=None, transfer_timeout=None): 87 """ 88 Allows to store files inside the referred RSE. 89 90 :param source: path to the source file on the client file system 91 :param target: path to the destination file on the storage 92 :param source_dir: Path where the to be transferred files are stored in the local file system 93 :param transfer_timeout Transfer timeout (in seconds) - dummy 94 95 :raises DestinationNotAccessible: if the destination storage was not accessible. 96 :raises ServiceUnavailable: if some generic error occured in the library. 97 :raises SourceNotFound: if the source file was not found on the referred storage. 98 """ 99 target = self.pfn2path(target) 100 101 if source_dir: 102 sf = source_dir + '/' + source 103 else: 104 sf = source 105 try: 106 dirs = os.path.dirname(target) 107 if not os.path.exists(dirs): 108 os.makedirs(dirs) 109 shutil.copy(sf, target) 110 except IOError as e: 111 if e.errno == 2: 112 raise exception.SourceNotFound(e) 113 elif not self.exists(self.rse['prefix']): 114 path = '' 115 for p in self.rse['prefix'].split('/'): 116 path += p + '/' 117 os.mkdir(path) 118 shutil.copy(sf, self.pfn2path(target)) 119 else: 120 raise exception.DestinationNotAccessible(e) 121 122 def delete(self, pfn): 123 """ Deletes a file from the connected RSE. 124 125 :param pfn: pfn to the to be deleted file 126 127 :raises ServiceUnavailable: if some generic error occured in the library. 128 :raises SourceNotFound: if the source file was not found on the referred storage. 129 """ 130 try: 131 os.remove(self.pfn2path(pfn)) 132 except OSError as e: 133 if e.errno == 2: 134 raise exception.SourceNotFound(e) 135 136 def rename(self, pfn, new_pfn): 137 """ Allows to rename a file stored inside the connected RSE. 138 139 :param path: path to the current file on the storage 140 :param new_path: path to the new file on the storage 141 142 :raises DestinationNotAccessible: if the destination storage was not accessible. 143 :raises ServiceUnavailable: if some generic error occured in the library. 144 :raises SourceNotFound: if the source file was not found on the referred storage. 145 """ 146 path = self.pfn2path(pfn) 147 new_path = self.pfn2path(new_pfn) 148 try: 149 if not os.path.exists(os.path.dirname(new_path)): 150 os.makedirs(os.path.dirname(new_path)) 151 os.rename(path, new_path) 152 except IOError as e: 153 if e.errno == 2: 154 if self.exists(self.pfn2path(path)): 155 raise exception.SourceNotFound(e) 156 else: 157 raise exception.DestinationNotAccessible(e) 158 else: 159 raise exception.ServiceUnavailable(e) 160 161 def pfn2path(self, pfn): 162 tmp = list(self.parse_pfns(pfn).values())[0] 163 return '/'.join([tmp['prefix'], tmp['path'], tmp['name']]) 164 165 def stat(self, pfn): 166 """ Determines the file size in bytes and checksum (adler32) of the provided file. 167 168 :param pfn: The PFN the file. 169 170 :returns: a dict containing the keys filesize and adler32. 171 """ 172 path = self.pfn2path(pfn) 173 return {'filesize': os.stat(path)[os.path.stat.ST_SIZE], 'adler32': adler32(path)} 174 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/rucio/rse/protocols/posix.py b/lib/rucio/rse/protocols/posix.py --- a/lib/rucio/rse/protocols/posix.py +++ b/lib/rucio/rse/protocols/posix.py @@ -8,6 +8,7 @@ # Authors: # - Ralph Vigne, <[email protected]>, 2012-2014 # - Nicolo Magini, <[email protected]>, 2018 +# - David Cameron, <[email protected]>, 2020 # # PY3K COMPATIBLE @@ -158,6 +159,43 @@ else: raise exception.ServiceUnavailable(e) + def lfns2pfns(self, lfns): + """ Returns fully qualified PFNs for the file referred by each lfn in + the lfns list. + + :param lfns: List of lfns. If lfn['path'] is present it is used as + the path to the file, otherwise the path is constructed + deterministically. + + :returns: Fully qualified PFNs. + """ + pfns = {} + prefix = self.attributes['prefix'] + + if not prefix.startswith('/'): + prefix = ''.join(['/', prefix]) + if not prefix.endswith('/'): + prefix = ''.join([prefix, '/']) + + lfns = [lfns] if isinstance(lfns, dict) else lfns + for lfn in lfns: + scope, name = str(lfn['scope']), lfn['name'] + if lfn.get('path'): + pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'], + '://', + self.attributes['hostname'], + prefix, + lfn['path'] if not lfn['path'].startswith('/') else lfn['path'][1:] + ]) + else: + pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'], + '://', + self.attributes['hostname'], + prefix, + self._get_path(scope=scope, name=name) + ]) + return pfns + def pfn2path(self, pfn): tmp = list(self.parse_pfns(pfn).values())[0] return '/'.join([tmp['prefix'], tmp['path'], tmp['name']])
{"golden_diff": "diff --git a/lib/rucio/rse/protocols/posix.py b/lib/rucio/rse/protocols/posix.py\n--- a/lib/rucio/rse/protocols/posix.py\n+++ b/lib/rucio/rse/protocols/posix.py\n@@ -8,6 +8,7 @@\n # Authors:\n # - Ralph Vigne, <[email protected]>, 2012-2014\n # - Nicolo Magini, <[email protected]>, 2018\n+# - David Cameron, <[email protected]>, 2020\n #\n # PY3K COMPATIBLE\n \n@@ -158,6 +159,43 @@\n else:\n raise exception.ServiceUnavailable(e)\n \n+ def lfns2pfns(self, lfns):\n+ \"\"\" Returns fully qualified PFNs for the file referred by each lfn in\n+ the lfns list.\n+\n+ :param lfns: List of lfns. If lfn['path'] is present it is used as\n+ the path to the file, otherwise the path is constructed\n+ deterministically.\n+\n+ :returns: Fully qualified PFNs.\n+ \"\"\"\n+ pfns = {}\n+ prefix = self.attributes['prefix']\n+\n+ if not prefix.startswith('/'):\n+ prefix = ''.join(['/', prefix])\n+ if not prefix.endswith('/'):\n+ prefix = ''.join([prefix, '/'])\n+\n+ lfns = [lfns] if isinstance(lfns, dict) else lfns\n+ for lfn in lfns:\n+ scope, name = str(lfn['scope']), lfn['name']\n+ if lfn.get('path'):\n+ pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'],\n+ '://',\n+ self.attributes['hostname'],\n+ prefix,\n+ lfn['path'] if not lfn['path'].startswith('/') else lfn['path'][1:]\n+ ])\n+ else:\n+ pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'],\n+ '://',\n+ self.attributes['hostname'],\n+ prefix,\n+ self._get_path(scope=scope, name=name)\n+ ])\n+ return pfns\n+\n def pfn2path(self, pfn):\n tmp = list(self.parse_pfns(pfn).values())[0]\n return '/'.join([tmp['prefix'], tmp['path'], tmp['name']])\n", "issue": "Better handling of posix replica URLs\nMotivation\r\n----------\r\n\r\nWith `rucio.rse.protocols.posix.Default` implementation, the replicas with \"file\" schema always have port `0` in them, eg\r\n\r\n`TEST1: file://:0/tmp/testfile`\r\n\r\nModification\r\n------------\r\n\r\nOverride `lfns2pfns()` in the posix protocol.\r\n\r\nI will submit a pull request for this shortly.\r\n\n", "before_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Ralph Vigne, <[email protected]>, 2012-2014\n# - Nicolo Magini, <[email protected]>, 2018\n#\n# PY3K COMPATIBLE\n\nimport os\nimport os.path\nimport shutil\nfrom subprocess import call\n\nfrom rucio.common import exception\nfrom rucio.common.utils import adler32\nfrom rucio.rse.protocols import protocol\n\n\nclass Default(protocol.RSEProtocol):\n \"\"\" Implementing access to RSEs using the local filesystem.\"\"\"\n\n def exists(self, pfn):\n \"\"\"\n Checks if the requested file is known by the referred RSE.\n\n :param pfn: Physical file name\n\n :returns: True if the file exists, False if it doesn't\n\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n status = ''\n try:\n status = os.path.exists(self.pfn2path(pfn))\n except Exception as e:\n raise exception.ServiceUnavailable(e)\n return status\n\n def connect(self):\n \"\"\"\n Establishes the actual connection to the referred RSE.\n\n :param: credentials needed to establish a connection with the stroage.\n\n :raises RSEAccessDenied: if no connection could be established.\n \"\"\"\n pass\n\n def close(self):\n \"\"\" Closes the connection to RSE.\"\"\"\n pass\n\n def get(self, pfn, dest, transfer_timeout=None):\n \"\"\" Provides access to files stored inside connected the RSE.\n\n :param pfn: Physical file name of requested file\n :param dest: Name and path of the files when stored at the client\n :param transfer_timeout Transfer timeout (in seconds) - dummy\n\n :raises DestinationNotAccessible: if the destination storage was not accessible.\n :raises ServiceUnavailable: if some generic error occured in the library.\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n try:\n shutil.copy(self.pfn2path(pfn), dest)\n except IOError as e:\n try: # To check if the error happend local or remote\n with open(dest, 'wb'):\n pass\n call(['rm', '-rf', dest])\n except IOError as e:\n if e.errno == 2:\n raise exception.DestinationNotAccessible(e)\n else:\n raise exception.ServiceUnavailable(e)\n if e.errno == 2:\n raise exception.SourceNotFound(e)\n else:\n raise exception.ServiceUnavailable(e)\n\n def put(self, source, target, source_dir=None, transfer_timeout=None):\n \"\"\"\n Allows to store files inside the referred RSE.\n\n :param source: path to the source file on the client file system\n :param target: path to the destination file on the storage\n :param source_dir: Path where the to be transferred files are stored in the local file system\n :param transfer_timeout Transfer timeout (in seconds) - dummy\n\n :raises DestinationNotAccessible: if the destination storage was not accessible.\n :raises ServiceUnavailable: if some generic error occured in the library.\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n target = self.pfn2path(target)\n\n if source_dir:\n sf = source_dir + '/' + source\n else:\n sf = source\n try:\n dirs = os.path.dirname(target)\n if not os.path.exists(dirs):\n os.makedirs(dirs)\n shutil.copy(sf, target)\n except IOError as e:\n if e.errno == 2:\n raise exception.SourceNotFound(e)\n elif not self.exists(self.rse['prefix']):\n path = ''\n for p in self.rse['prefix'].split('/'):\n path += p + '/'\n os.mkdir(path)\n shutil.copy(sf, self.pfn2path(target))\n else:\n raise exception.DestinationNotAccessible(e)\n\n def delete(self, pfn):\n \"\"\" Deletes a file from the connected RSE.\n\n :param pfn: pfn to the to be deleted file\n\n :raises ServiceUnavailable: if some generic error occured in the library.\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n try:\n os.remove(self.pfn2path(pfn))\n except OSError as e:\n if e.errno == 2:\n raise exception.SourceNotFound(e)\n\n def rename(self, pfn, new_pfn):\n \"\"\" Allows to rename a file stored inside the connected RSE.\n\n :param path: path to the current file on the storage\n :param new_path: path to the new file on the storage\n\n :raises DestinationNotAccessible: if the destination storage was not accessible.\n :raises ServiceUnavailable: if some generic error occured in the library.\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n path = self.pfn2path(pfn)\n new_path = self.pfn2path(new_pfn)\n try:\n if not os.path.exists(os.path.dirname(new_path)):\n os.makedirs(os.path.dirname(new_path))\n os.rename(path, new_path)\n except IOError as e:\n if e.errno == 2:\n if self.exists(self.pfn2path(path)):\n raise exception.SourceNotFound(e)\n else:\n raise exception.DestinationNotAccessible(e)\n else:\n raise exception.ServiceUnavailable(e)\n\n def pfn2path(self, pfn):\n tmp = list(self.parse_pfns(pfn).values())[0]\n return '/'.join([tmp['prefix'], tmp['path'], tmp['name']])\n\n def stat(self, pfn):\n \"\"\" Determines the file size in bytes and checksum (adler32) of the provided file.\n\n :param pfn: The PFN the file.\n\n :returns: a dict containing the keys filesize and adler32.\n \"\"\"\n path = self.pfn2path(pfn)\n return {'filesize': os.stat(path)[os.path.stat.ST_SIZE], 'adler32': adler32(path)}\n", "path": "lib/rucio/rse/protocols/posix.py"}], "after_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Ralph Vigne, <[email protected]>, 2012-2014\n# - Nicolo Magini, <[email protected]>, 2018\n# - David Cameron, <[email protected]>, 2020\n#\n# PY3K COMPATIBLE\n\nimport os\nimport os.path\nimport shutil\nfrom subprocess import call\n\nfrom rucio.common import exception\nfrom rucio.common.utils import adler32\nfrom rucio.rse.protocols import protocol\n\n\nclass Default(protocol.RSEProtocol):\n \"\"\" Implementing access to RSEs using the local filesystem.\"\"\"\n\n def exists(self, pfn):\n \"\"\"\n Checks if the requested file is known by the referred RSE.\n\n :param pfn: Physical file name\n\n :returns: True if the file exists, False if it doesn't\n\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n status = ''\n try:\n status = os.path.exists(self.pfn2path(pfn))\n except Exception as e:\n raise exception.ServiceUnavailable(e)\n return status\n\n def connect(self):\n \"\"\"\n Establishes the actual connection to the referred RSE.\n\n :param: credentials needed to establish a connection with the stroage.\n\n :raises RSEAccessDenied: if no connection could be established.\n \"\"\"\n pass\n\n def close(self):\n \"\"\" Closes the connection to RSE.\"\"\"\n pass\n\n def get(self, pfn, dest, transfer_timeout=None):\n \"\"\" Provides access to files stored inside connected the RSE.\n\n :param pfn: Physical file name of requested file\n :param dest: Name and path of the files when stored at the client\n :param transfer_timeout Transfer timeout (in seconds) - dummy\n\n :raises DestinationNotAccessible: if the destination storage was not accessible.\n :raises ServiceUnavailable: if some generic error occured in the library.\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n try:\n shutil.copy(self.pfn2path(pfn), dest)\n except IOError as e:\n try: # To check if the error happend local or remote\n with open(dest, 'wb'):\n pass\n call(['rm', '-rf', dest])\n except IOError as e:\n if e.errno == 2:\n raise exception.DestinationNotAccessible(e)\n else:\n raise exception.ServiceUnavailable(e)\n if e.errno == 2:\n raise exception.SourceNotFound(e)\n else:\n raise exception.ServiceUnavailable(e)\n\n def put(self, source, target, source_dir=None, transfer_timeout=None):\n \"\"\"\n Allows to store files inside the referred RSE.\n\n :param source: path to the source file on the client file system\n :param target: path to the destination file on the storage\n :param source_dir: Path where the to be transferred files are stored in the local file system\n :param transfer_timeout Transfer timeout (in seconds) - dummy\n\n :raises DestinationNotAccessible: if the destination storage was not accessible.\n :raises ServiceUnavailable: if some generic error occured in the library.\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n target = self.pfn2path(target)\n\n if source_dir:\n sf = source_dir + '/' + source\n else:\n sf = source\n try:\n dirs = os.path.dirname(target)\n if not os.path.exists(dirs):\n os.makedirs(dirs)\n shutil.copy(sf, target)\n except IOError as e:\n if e.errno == 2:\n raise exception.SourceNotFound(e)\n elif not self.exists(self.rse['prefix']):\n path = ''\n for p in self.rse['prefix'].split('/'):\n path += p + '/'\n os.mkdir(path)\n shutil.copy(sf, self.pfn2path(target))\n else:\n raise exception.DestinationNotAccessible(e)\n\n def delete(self, pfn):\n \"\"\" Deletes a file from the connected RSE.\n\n :param pfn: pfn to the to be deleted file\n\n :raises ServiceUnavailable: if some generic error occured in the library.\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n try:\n os.remove(self.pfn2path(pfn))\n except OSError as e:\n if e.errno == 2:\n raise exception.SourceNotFound(e)\n\n def rename(self, pfn, new_pfn):\n \"\"\" Allows to rename a file stored inside the connected RSE.\n\n :param path: path to the current file on the storage\n :param new_path: path to the new file on the storage\n\n :raises DestinationNotAccessible: if the destination storage was not accessible.\n :raises ServiceUnavailable: if some generic error occured in the library.\n :raises SourceNotFound: if the source file was not found on the referred storage.\n \"\"\"\n path = self.pfn2path(pfn)\n new_path = self.pfn2path(new_pfn)\n try:\n if not os.path.exists(os.path.dirname(new_path)):\n os.makedirs(os.path.dirname(new_path))\n os.rename(path, new_path)\n except IOError as e:\n if e.errno == 2:\n if self.exists(self.pfn2path(path)):\n raise exception.SourceNotFound(e)\n else:\n raise exception.DestinationNotAccessible(e)\n else:\n raise exception.ServiceUnavailable(e)\n\n def lfns2pfns(self, lfns):\n \"\"\" Returns fully qualified PFNs for the file referred by each lfn in\n the lfns list.\n\n :param lfns: List of lfns. If lfn['path'] is present it is used as\n the path to the file, otherwise the path is constructed\n deterministically.\n\n :returns: Fully qualified PFNs.\n \"\"\"\n pfns = {}\n prefix = self.attributes['prefix']\n\n if not prefix.startswith('/'):\n prefix = ''.join(['/', prefix])\n if not prefix.endswith('/'):\n prefix = ''.join([prefix, '/'])\n\n lfns = [lfns] if isinstance(lfns, dict) else lfns\n for lfn in lfns:\n scope, name = str(lfn['scope']), lfn['name']\n if lfn.get('path'):\n pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'],\n '://',\n self.attributes['hostname'],\n prefix,\n lfn['path'] if not lfn['path'].startswith('/') else lfn['path'][1:]\n ])\n else:\n pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'],\n '://',\n self.attributes['hostname'],\n prefix,\n self._get_path(scope=scope, name=name)\n ])\n return pfns\n\n def pfn2path(self, pfn):\n tmp = list(self.parse_pfns(pfn).values())[0]\n return '/'.join([tmp['prefix'], tmp['path'], tmp['name']])\n\n def stat(self, pfn):\n \"\"\" Determines the file size in bytes and checksum (adler32) of the provided file.\n\n :param pfn: The PFN the file.\n\n :returns: a dict containing the keys filesize and adler32.\n \"\"\"\n path = self.pfn2path(pfn)\n return {'filesize': os.stat(path)[os.path.stat.ST_SIZE], 'adler32': adler32(path)}\n", "path": "lib/rucio/rse/protocols/posix.py"}]}
2,167
559
gh_patches_debug_6430
rasdani/github-patches
git_diff
horovod__horovod-1342
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- terminate called after throwing an instance of 'gloo::EnforceNotMet' ifa != nullptr. Unable to find address for: eth0 hvd.init() throws following error ``` >>> hvd.init() terminate called after throwing an instance of 'gloo::EnforceNotMet' what(): [enforce fail at /tmp/pip-install-3dvzdqhs/horovod/third_party/gloo/gloo/transport/tcp/device.cc:88] ifa != nullptr. Unable to find address for: eth0 Aborted ``` What I understand is that gloo is hardcoded to check for eth0 and in my case (Centos 7.6) I have em1 and em2. I came acrros solution to export the below env variables but i am still seeing the issue ``` export GLOO_SOCKET_IFNAME=em2 export NCCL_SOCKET_IFNAME=em2 ``` How can I get pass this? **Environment:** Framework: Keras Tensorflow Version 1.13.1 Keras Version 2.2.4-tf Horovod version: horovod==0.17.0.post1 Python version: 3.6 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `horovod/run/gloo_run.py` Content: ``` 1 # Copyright 2019 Uber Technologies, Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 16 import os 17 import collections 18 import threading 19 import signal 20 21 from horovod.run.rendezvous.http_server import RendezvousServer 22 from horovod.run.common.util import env as env_util, safe_shell_exec 23 from horovod.run.util import threads 24 from psutil import net_if_addrs 25 from socket import AF_INET 26 27 try: 28 from shlex import quote 29 except ImportError: 30 from pipes import quote 31 32 33 class HostInfo: 34 def __init__(self, host_item): 35 hostname, slots = host_item.strip().split(':') 36 self.hostname = hostname 37 self.slots = int(slots) 38 39 40 class SlotInfo: 41 def __init__(self, hostname, rank, local_rank, cross_rank, size): 42 self.hostname = hostname 43 self.rank = rank 44 self.size = size 45 self.local_rank = local_rank 46 self.local_size = None 47 self.cross_rank = cross_rank 48 self.cross_size = None 49 50 51 def _allocate(hosts, np): 52 """ 53 Find the allocation of processes on hosts, this function will try to 54 allocate as many as possible processes on the same host to leverage 55 local network. 56 :param hosts: list of addresses and number of processes on each host. 57 For example, 58 'worker-0:2,worker-1:2' 59 '10.11.11.11:4,10.11.11.12,4' 60 :type hosts: string 61 :param np: total number of processes to be allocated 62 :type np: int 63 :return: a list of the allocation of process on hosts in a AllocInfo object. 64 Members in the object include: hostname, rank, local_rank, cross_rank, 65 total_size, local_size, cross_size 66 :rtype: list[dict()] 67 """ 68 69 host_list = [] 70 # split the host string to host list 71 for host_item in hosts.split(','): 72 host_list.append(HostInfo(host_item)) 73 74 rank = 0 75 alloc_list = [] 76 77 # key: local_rank; value: cross_size for this local_rank 78 local_sizes = collections.defaultdict(int) 79 # key: cross_rank; value: local_size for this cross_rank 80 cross_sizes = collections.defaultdict(int) 81 82 # allocate processes into slots 83 for host_idx, host_info in enumerate(host_list): 84 for local_rank in range(host_info.slots): 85 if rank == np: 86 break 87 cross_rank = host_idx 88 alloc_list.append( 89 SlotInfo( 90 host_info.hostname, 91 rank, 92 local_rank, 93 cross_rank, 94 np)) 95 cross_sizes[local_rank] += 1 96 local_sizes[cross_rank] += 1 97 rank += 1 98 99 if rank < np: 100 raise ValueError("Process number should not be larger than " 101 "total available slots.") 102 103 # Fill in the local_size and cross_size because we can only know these number after 104 # allocation is done. 105 for alloc_item in alloc_list: 106 alloc_item.local_size = local_sizes[alloc_item.cross_rank] 107 alloc_item.cross_size = cross_sizes[alloc_item.local_rank] 108 109 return alloc_list 110 111 112 def _launch_jobs(settings, host_alloc_plan, remote_host_names, _run_command): 113 """ 114 executes the jobs defined by run command on hosts. 115 :param hosts_alloc: list of dict indicating the allocating info. 116 For example, 117 [{'Hostname':'worker-0', 'Rank': 0, 'Local_rank': 0, 'Cross_rank':0, 118 'Size':2, 'Local_size':1, 'Cross_size':2}, 119 {'Hostname':'worker-1', 'Rank': 1, 'Local_rank': 0, 'Cross_rank':1, 120 'Size':2, 'Local_size':1, 'Cross_size':2} 121 ] 122 :type hosts_alloc: list(dict) 123 :param remote_host_names: names that are resolved to one of the addresses 124 of remote hosts interfaces. 125 :type remote_host_names: set 126 :param _run_command: command to execute 127 :type _run_command: string 128 :return: 129 :rtype: 130 """ 131 132 def _exec_command(_command, _index, event_): 133 if settings.verbose: 134 print(_command) 135 try: 136 exit_code = safe_shell_exec.execute(_command, index=_index, event=event_) 137 if exit_code != 0: 138 print('Process {idx} exit with status code {ec}.'.format(idx=_index, ec=exit_code)) 139 except Exception as e: 140 print('Exception happened during safe_shell_exec, exception ' 141 'message: {message}'.format(message=e)) 142 return 0 143 144 ssh_port_arg = '-p {ssh_port}'.format(ssh_port=settings.ssh_port) if settings.ssh_port else '' 145 146 # Create a event for communication between threads 147 event = threading.Event() 148 149 def set_event_on_sigterm(signum, frame): 150 event.set() 151 152 signal.signal(signal.SIGINT, set_event_on_sigterm) 153 signal.signal(signal.SIGTERM, set_event_on_sigterm) 154 155 args_list = [] 156 for alloc_info in host_alloc_plan: 157 # generate env for rendezvous 158 horovod_rendez_env = 'HOROVOD_RANK={rank} HOROVOD_SIZE={size} ' \ 159 'HOROVOD_LOCAL_RANK={local_rank} HOROVOD_LOCAL_SIZE={local_size} ' \ 160 'HOROVOD_CROSS_RANK={cross_rank} HOROVOD_CROSS_SIZE={cross_size} ' \ 161 .format(rank=alloc_info.rank, size=alloc_info.size, 162 local_rank=alloc_info.local_rank, local_size=alloc_info.local_size, 163 cross_rank=alloc_info.cross_rank, cross_size=alloc_info.cross_size) 164 165 host_name = alloc_info.hostname 166 167 env = os.environ.copy() 168 # TODO: Workaround for over-buffered outputs. Investigate how mpirun avoids this problem. 169 env['PYTHONUNBUFFERED'] = '1' 170 local_command = '{horovod_env} {env} {run_command}' .format( 171 horovod_env=horovod_rendez_env, 172 env=' '.join(['%s=%s' % (key, quote(value)) for key, value in env.items() 173 if env_util.is_exportable(key)]), 174 run_command=_run_command) 175 176 if host_name not in remote_host_names: 177 command = local_command 178 else: 179 command = 'ssh -o StrictHostKeyChecking=no {host} {ssh_port_arg} ' \ 180 '{local_command}'.format( 181 host=host_name, 182 ssh_port_arg=ssh_port_arg, 183 local_command=quote('cd {pwd} >& /dev/null ; {local_command}' 184 .format(pwd=os.getcwd(), local_command=local_command)) 185 ) 186 args_list.append([command, alloc_info.rank, event]) 187 188 # Each thread will use ssh command to launch the job on each remote host. If an 189 # error occurs in one thread, entire process will be terminated. Otherwise, 190 # threads will keep running and ssh session. In case, the main thread receives 191 # a SIGINT, the event will be set and the spawned threads will kill their 192 # corresponding middleman processes and thus the jobs will be killed as 193 # well. 194 threads.execute_function_multithreaded(_exec_command, 195 args_list, 196 block_until_all_done=True) 197 198 199 def gloo_run(settings, remote_host_names, common_intfs): 200 # allocate processes into slots 201 host_alloc_plan = _allocate(settings.hosts, settings.num_proc) 202 203 # create global rendezvous server 204 global_rendezv = RendezvousServer(settings.verbose) 205 # Start rendezvous server and get port that it is listening 206 global_rendezv_port = global_rendezv.start_server(host_alloc_plan) 207 208 # get the server IPv4 address 209 iface = list(common_intfs)[0] 210 server_ip = None 211 for addr in net_if_addrs()[iface]: 212 if addr.family == AF_INET: 213 server_ip = addr.address 214 215 if not server_ip: 216 raise RuntimeError( 217 'Cannot find an IPv4 address of the common interface.') 218 219 run_command = ( 220 'HOROVOD_GLOO_RENDEZVOUS_ADDR={addr} ' 221 'HOROVOD_GLOO_RENDEZVOUS_PORT={port} ' 222 'HOROVOD_CONTROLLER=gloo ' 223 'HOROVOD_CPU_OPERATIONS=gloo ' 224 'HOROVOD_IFACE={iface} ' 225 'NCCL_SOCKET_IFNAME={common_intfs} ' 226 '{command}' # expect a lot of environment variables 227 .format(addr=server_ip, 228 port=global_rendezv_port, 229 iface=iface, # TODO: add multiple ifaces in future 230 common_intfs=','.join(common_intfs), 231 command=' '.join(quote(par) for par in settings.command))) 232 233 _launch_jobs(settings, host_alloc_plan, remote_host_names, run_command) 234 return 235 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/horovod/run/gloo_run.py b/horovod/run/gloo_run.py --- a/horovod/run/gloo_run.py +++ b/horovod/run/gloo_run.py @@ -221,7 +221,7 @@ 'HOROVOD_GLOO_RENDEZVOUS_PORT={port} ' 'HOROVOD_CONTROLLER=gloo ' 'HOROVOD_CPU_OPERATIONS=gloo ' - 'HOROVOD_IFACE={iface} ' + 'HOROVOD_GLOO_IFACE={iface} ' 'NCCL_SOCKET_IFNAME={common_intfs} ' '{command}' # expect a lot of environment variables .format(addr=server_ip,
{"golden_diff": "diff --git a/horovod/run/gloo_run.py b/horovod/run/gloo_run.py\n--- a/horovod/run/gloo_run.py\n+++ b/horovod/run/gloo_run.py\n@@ -221,7 +221,7 @@\n 'HOROVOD_GLOO_RENDEZVOUS_PORT={port} '\n 'HOROVOD_CONTROLLER=gloo '\n 'HOROVOD_CPU_OPERATIONS=gloo '\n- 'HOROVOD_IFACE={iface} '\n+ 'HOROVOD_GLOO_IFACE={iface} '\n 'NCCL_SOCKET_IFNAME={common_intfs} '\n '{command}' # expect a lot of environment variables\n .format(addr=server_ip,\n", "issue": "terminate called after throwing an instance of 'gloo::EnforceNotMet' ifa != nullptr. Unable to find address for: eth0\nhvd.init() throws following error\r\n```\r\n>>> hvd.init()\r\nterminate called after throwing an instance of 'gloo::EnforceNotMet'\r\n what(): [enforce fail at /tmp/pip-install-3dvzdqhs/horovod/third_party/gloo/gloo/transport/tcp/device.cc:88] ifa != nullptr. Unable to find address for: eth0\r\nAborted\r\n```\r\nWhat I understand is that gloo is hardcoded to check for eth0 and in my case (Centos 7.6) I have em1 and em2. I came acrros solution to export the below env variables but i am still seeing the issue\r\n```\r\nexport GLOO_SOCKET_IFNAME=em2\r\nexport NCCL_SOCKET_IFNAME=em2\r\n```\r\nHow can I get pass this?\r\n\r\n**Environment:**\r\nFramework: Keras\r\nTensorflow Version 1.13.1\r\nKeras Version 2.2.4-tf\r\nHorovod version: horovod==0.17.0.post1\r\nPython version: 3.6\r\n\n", "before_files": [{"content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport collections\nimport threading\nimport signal\n\nfrom horovod.run.rendezvous.http_server import RendezvousServer\nfrom horovod.run.common.util import env as env_util, safe_shell_exec\nfrom horovod.run.util import threads\nfrom psutil import net_if_addrs\nfrom socket import AF_INET\n\ntry:\n from shlex import quote\nexcept ImportError:\n from pipes import quote\n\n\nclass HostInfo:\n def __init__(self, host_item):\n hostname, slots = host_item.strip().split(':')\n self.hostname = hostname\n self.slots = int(slots)\n\n\nclass SlotInfo:\n def __init__(self, hostname, rank, local_rank, cross_rank, size):\n self.hostname = hostname\n self.rank = rank\n self.size = size\n self.local_rank = local_rank\n self.local_size = None\n self.cross_rank = cross_rank\n self.cross_size = None\n\n\ndef _allocate(hosts, np):\n \"\"\"\n Find the allocation of processes on hosts, this function will try to\n allocate as many as possible processes on the same host to leverage\n local network.\n :param hosts: list of addresses and number of processes on each host.\n For example,\n 'worker-0:2,worker-1:2'\n '10.11.11.11:4,10.11.11.12,4'\n :type hosts: string\n :param np: total number of processes to be allocated\n :type np: int\n :return: a list of the allocation of process on hosts in a AllocInfo object.\n Members in the object include: hostname, rank, local_rank, cross_rank,\n total_size, local_size, cross_size\n :rtype: list[dict()]\n \"\"\"\n\n host_list = []\n # split the host string to host list\n for host_item in hosts.split(','):\n host_list.append(HostInfo(host_item))\n\n rank = 0\n alloc_list = []\n\n # key: local_rank; value: cross_size for this local_rank\n local_sizes = collections.defaultdict(int)\n # key: cross_rank; value: local_size for this cross_rank\n cross_sizes = collections.defaultdict(int)\n\n # allocate processes into slots\n for host_idx, host_info in enumerate(host_list):\n for local_rank in range(host_info.slots):\n if rank == np:\n break\n cross_rank = host_idx\n alloc_list.append(\n SlotInfo(\n host_info.hostname,\n rank,\n local_rank,\n cross_rank,\n np))\n cross_sizes[local_rank] += 1\n local_sizes[cross_rank] += 1\n rank += 1\n\n if rank < np:\n raise ValueError(\"Process number should not be larger than \"\n \"total available slots.\")\n\n # Fill in the local_size and cross_size because we can only know these number after\n # allocation is done.\n for alloc_item in alloc_list:\n alloc_item.local_size = local_sizes[alloc_item.cross_rank]\n alloc_item.cross_size = cross_sizes[alloc_item.local_rank]\n\n return alloc_list\n\n\ndef _launch_jobs(settings, host_alloc_plan, remote_host_names, _run_command):\n \"\"\"\n executes the jobs defined by run command on hosts.\n :param hosts_alloc: list of dict indicating the allocating info.\n For example,\n [{'Hostname':'worker-0', 'Rank': 0, 'Local_rank': 0, 'Cross_rank':0,\n 'Size':2, 'Local_size':1, 'Cross_size':2},\n {'Hostname':'worker-1', 'Rank': 1, 'Local_rank': 0, 'Cross_rank':1,\n 'Size':2, 'Local_size':1, 'Cross_size':2}\n ]\n :type hosts_alloc: list(dict)\n :param remote_host_names: names that are resolved to one of the addresses\n of remote hosts interfaces.\n :type remote_host_names: set\n :param _run_command: command to execute\n :type _run_command: string\n :return:\n :rtype:\n \"\"\"\n\n def _exec_command(_command, _index, event_):\n if settings.verbose:\n print(_command)\n try:\n exit_code = safe_shell_exec.execute(_command, index=_index, event=event_)\n if exit_code != 0:\n print('Process {idx} exit with status code {ec}.'.format(idx=_index, ec=exit_code))\n except Exception as e:\n print('Exception happened during safe_shell_exec, exception '\n 'message: {message}'.format(message=e))\n return 0\n\n ssh_port_arg = '-p {ssh_port}'.format(ssh_port=settings.ssh_port) if settings.ssh_port else ''\n\n # Create a event for communication between threads\n event = threading.Event()\n\n def set_event_on_sigterm(signum, frame):\n event.set()\n\n signal.signal(signal.SIGINT, set_event_on_sigterm)\n signal.signal(signal.SIGTERM, set_event_on_sigterm)\n\n args_list = []\n for alloc_info in host_alloc_plan:\n # generate env for rendezvous\n horovod_rendez_env = 'HOROVOD_RANK={rank} HOROVOD_SIZE={size} ' \\\n 'HOROVOD_LOCAL_RANK={local_rank} HOROVOD_LOCAL_SIZE={local_size} ' \\\n 'HOROVOD_CROSS_RANK={cross_rank} HOROVOD_CROSS_SIZE={cross_size} ' \\\n .format(rank=alloc_info.rank, size=alloc_info.size,\n local_rank=alloc_info.local_rank, local_size=alloc_info.local_size,\n cross_rank=alloc_info.cross_rank, cross_size=alloc_info.cross_size)\n\n host_name = alloc_info.hostname\n\n env = os.environ.copy()\n # TODO: Workaround for over-buffered outputs. Investigate how mpirun avoids this problem.\n env['PYTHONUNBUFFERED'] = '1'\n local_command = '{horovod_env} {env} {run_command}' .format(\n horovod_env=horovod_rendez_env,\n env=' '.join(['%s=%s' % (key, quote(value)) for key, value in env.items()\n if env_util.is_exportable(key)]),\n run_command=_run_command)\n\n if host_name not in remote_host_names:\n command = local_command\n else:\n command = 'ssh -o StrictHostKeyChecking=no {host} {ssh_port_arg} ' \\\n '{local_command}'.format(\n host=host_name,\n ssh_port_arg=ssh_port_arg,\n local_command=quote('cd {pwd} >& /dev/null ; {local_command}'\n .format(pwd=os.getcwd(), local_command=local_command))\n )\n args_list.append([command, alloc_info.rank, event])\n\n # Each thread will use ssh command to launch the job on each remote host. If an\n # error occurs in one thread, entire process will be terminated. Otherwise,\n # threads will keep running and ssh session. In case, the main thread receives\n # a SIGINT, the event will be set and the spawned threads will kill their\n # corresponding middleman processes and thus the jobs will be killed as\n # well.\n threads.execute_function_multithreaded(_exec_command,\n args_list,\n block_until_all_done=True)\n\n\ndef gloo_run(settings, remote_host_names, common_intfs):\n # allocate processes into slots\n host_alloc_plan = _allocate(settings.hosts, settings.num_proc)\n\n # create global rendezvous server\n global_rendezv = RendezvousServer(settings.verbose)\n # Start rendezvous server and get port that it is listening\n global_rendezv_port = global_rendezv.start_server(host_alloc_plan)\n\n # get the server IPv4 address\n iface = list(common_intfs)[0]\n server_ip = None\n for addr in net_if_addrs()[iface]:\n if addr.family == AF_INET:\n server_ip = addr.address\n\n if not server_ip:\n raise RuntimeError(\n 'Cannot find an IPv4 address of the common interface.')\n\n run_command = (\n 'HOROVOD_GLOO_RENDEZVOUS_ADDR={addr} '\n 'HOROVOD_GLOO_RENDEZVOUS_PORT={port} '\n 'HOROVOD_CONTROLLER=gloo '\n 'HOROVOD_CPU_OPERATIONS=gloo '\n 'HOROVOD_IFACE={iface} '\n 'NCCL_SOCKET_IFNAME={common_intfs} '\n '{command}' # expect a lot of environment variables\n .format(addr=server_ip,\n port=global_rendezv_port,\n iface=iface, # TODO: add multiple ifaces in future\n common_intfs=','.join(common_intfs),\n command=' '.join(quote(par) for par in settings.command)))\n\n _launch_jobs(settings, host_alloc_plan, remote_host_names, run_command)\n return\n", "path": "horovod/run/gloo_run.py"}], "after_files": [{"content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport os\nimport collections\nimport threading\nimport signal\n\nfrom horovod.run.rendezvous.http_server import RendezvousServer\nfrom horovod.run.common.util import env as env_util, safe_shell_exec\nfrom horovod.run.util import threads\nfrom psutil import net_if_addrs\nfrom socket import AF_INET\n\ntry:\n from shlex import quote\nexcept ImportError:\n from pipes import quote\n\n\nclass HostInfo:\n def __init__(self, host_item):\n hostname, slots = host_item.strip().split(':')\n self.hostname = hostname\n self.slots = int(slots)\n\n\nclass SlotInfo:\n def __init__(self, hostname, rank, local_rank, cross_rank, size):\n self.hostname = hostname\n self.rank = rank\n self.size = size\n self.local_rank = local_rank\n self.local_size = None\n self.cross_rank = cross_rank\n self.cross_size = None\n\n\ndef _allocate(hosts, np):\n \"\"\"\n Find the allocation of processes on hosts, this function will try to\n allocate as many as possible processes on the same host to leverage\n local network.\n :param hosts: list of addresses and number of processes on each host.\n For example,\n 'worker-0:2,worker-1:2'\n '10.11.11.11:4,10.11.11.12,4'\n :type hosts: string\n :param np: total number of processes to be allocated\n :type np: int\n :return: a list of the allocation of process on hosts in a AllocInfo object.\n Members in the object include: hostname, rank, local_rank, cross_rank,\n total_size, local_size, cross_size\n :rtype: list[dict()]\n \"\"\"\n\n host_list = []\n # split the host string to host list\n for host_item in hosts.split(','):\n host_list.append(HostInfo(host_item))\n\n rank = 0\n alloc_list = []\n\n # key: local_rank; value: cross_size for this local_rank\n local_sizes = collections.defaultdict(int)\n # key: cross_rank; value: local_size for this cross_rank\n cross_sizes = collections.defaultdict(int)\n\n # allocate processes into slots\n for host_idx, host_info in enumerate(host_list):\n for local_rank in range(host_info.slots):\n if rank == np:\n break\n cross_rank = host_idx\n alloc_list.append(\n SlotInfo(\n host_info.hostname,\n rank,\n local_rank,\n cross_rank,\n np))\n cross_sizes[local_rank] += 1\n local_sizes[cross_rank] += 1\n rank += 1\n\n if rank < np:\n raise ValueError(\"Process number should not be larger than \"\n \"total available slots.\")\n\n # Fill in the local_size and cross_size because we can only know these number after\n # allocation is done.\n for alloc_item in alloc_list:\n alloc_item.local_size = local_sizes[alloc_item.cross_rank]\n alloc_item.cross_size = cross_sizes[alloc_item.local_rank]\n\n return alloc_list\n\n\ndef _launch_jobs(settings, host_alloc_plan, remote_host_names, _run_command):\n \"\"\"\n executes the jobs defined by run command on hosts.\n :param hosts_alloc: list of dict indicating the allocating info.\n For example,\n [{'Hostname':'worker-0', 'Rank': 0, 'Local_rank': 0, 'Cross_rank':0,\n 'Size':2, 'Local_size':1, 'Cross_size':2},\n {'Hostname':'worker-1', 'Rank': 1, 'Local_rank': 0, 'Cross_rank':1,\n 'Size':2, 'Local_size':1, 'Cross_size':2}\n ]\n :type hosts_alloc: list(dict)\n :param remote_host_names: names that are resolved to one of the addresses\n of remote hosts interfaces.\n :type remote_host_names: set\n :param _run_command: command to execute\n :type _run_command: string\n :return:\n :rtype:\n \"\"\"\n\n def _exec_command(_command, _index, event_):\n if settings.verbose:\n print(_command)\n try:\n exit_code = safe_shell_exec.execute(_command, index=_index, event=event_)\n if exit_code != 0:\n print('Process {idx} exit with status code {ec}.'.format(idx=_index, ec=exit_code))\n except Exception as e:\n print('Exception happened during safe_shell_exec, exception '\n 'message: {message}'.format(message=e))\n return 0\n\n ssh_port_arg = '-p {ssh_port}'.format(ssh_port=settings.ssh_port) if settings.ssh_port else ''\n\n # Create a event for communication between threads\n event = threading.Event()\n\n def set_event_on_sigterm(signum, frame):\n event.set()\n\n signal.signal(signal.SIGINT, set_event_on_sigterm)\n signal.signal(signal.SIGTERM, set_event_on_sigterm)\n\n args_list = []\n for alloc_info in host_alloc_plan:\n # generate env for rendezvous\n horovod_rendez_env = 'HOROVOD_RANK={rank} HOROVOD_SIZE={size} ' \\\n 'HOROVOD_LOCAL_RANK={local_rank} HOROVOD_LOCAL_SIZE={local_size} ' \\\n 'HOROVOD_CROSS_RANK={cross_rank} HOROVOD_CROSS_SIZE={cross_size} ' \\\n .format(rank=alloc_info.rank, size=alloc_info.size,\n local_rank=alloc_info.local_rank, local_size=alloc_info.local_size,\n cross_rank=alloc_info.cross_rank, cross_size=alloc_info.cross_size)\n\n host_name = alloc_info.hostname\n\n env = os.environ.copy()\n # TODO: Workaround for over-buffered outputs. Investigate how mpirun avoids this problem.\n env['PYTHONUNBUFFERED'] = '1'\n local_command = '{horovod_env} {env} {run_command}' .format(\n horovod_env=horovod_rendez_env,\n env=' '.join(['%s=%s' % (key, quote(value)) for key, value in env.items()\n if env_util.is_exportable(key)]),\n run_command=_run_command)\n\n if host_name not in remote_host_names:\n command = local_command\n else:\n command = 'ssh -o StrictHostKeyChecking=no {host} {ssh_port_arg} ' \\\n '{local_command}'.format(\n host=host_name,\n ssh_port_arg=ssh_port_arg,\n local_command=quote('cd {pwd} >& /dev/null ; {local_command}'\n .format(pwd=os.getcwd(), local_command=local_command))\n )\n args_list.append([command, alloc_info.rank, event])\n\n # Each thread will use ssh command to launch the job on each remote host. If an\n # error occurs in one thread, entire process will be terminated. Otherwise,\n # threads will keep running and ssh session. In case, the main thread receives\n # a SIGINT, the event will be set and the spawned threads will kill their\n # corresponding middleman processes and thus the jobs will be killed as\n # well.\n threads.execute_function_multithreaded(_exec_command,\n args_list,\n block_until_all_done=True)\n\n\ndef gloo_run(settings, remote_host_names, common_intfs):\n # allocate processes into slots\n host_alloc_plan = _allocate(settings.hosts, settings.num_proc)\n\n # create global rendezvous server\n global_rendezv = RendezvousServer(settings.verbose)\n # Start rendezvous server and get port that it is listening\n global_rendezv_port = global_rendezv.start_server(host_alloc_plan)\n\n # get the server IPv4 address\n iface = list(common_intfs)[0]\n server_ip = None\n for addr in net_if_addrs()[iface]:\n if addr.family == AF_INET:\n server_ip = addr.address\n\n if not server_ip:\n raise RuntimeError(\n 'Cannot find an IPv4 address of the common interface.')\n\n run_command = (\n 'HOROVOD_GLOO_RENDEZVOUS_ADDR={addr} '\n 'HOROVOD_GLOO_RENDEZVOUS_PORT={port} '\n 'HOROVOD_CONTROLLER=gloo '\n 'HOROVOD_CPU_OPERATIONS=gloo '\n 'HOROVOD_GLOO_IFACE={iface} '\n 'NCCL_SOCKET_IFNAME={common_intfs} '\n '{command}' # expect a lot of environment variables\n .format(addr=server_ip,\n port=global_rendezv_port,\n iface=iface, # TODO: add multiple ifaces in future\n common_intfs=','.join(common_intfs),\n command=' '.join(quote(par) for par in settings.command)))\n\n _launch_jobs(settings, host_alloc_plan, remote_host_names, run_command)\n return\n", "path": "horovod/run/gloo_run.py"}]}
3,234
167
gh_patches_debug_29111
rasdani/github-patches
git_diff
pwndbg__pwndbg-365
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Broken `entry` command ### Description The `entry` command pass arguments differently then the `run` command. ### Steps to reproduce ``` [dc@dc:pwndbg|dev *$%]$ gdb python Loaded 113 commands. Type pwndbg [filter] for a list. Reading symbols from python...(no debugging symbols found)...done. pwndbg> set exception-verbose on Set whether to print a full stacktracefor exceptions raised in Pwndbg commands to True pwndbg> run -c "print(1); print(2)" Starting program: /usr/bin/python -c "print(1); print(2)" [Thread debugging using libthread_db enabled] Using host libthread_db library "/usr/lib/libthread_db.so.1". 1 2 [Inferior 1 (process 20590) exited normally] pwndbg> entry -c "print(1); print(2)" ('-c', 'print(1); print(2)') Running '%s' run -c print(1); print(2) /bin/bash: -c: line 0: syntax error near unexpected token `(' /bin/bash: -c: line 0: `exec /usr/bin/python -c print(1); print(2)' Traceback (most recent call last): File "/home/dc/installed/pwndbg/pwndbg/commands/__init__.py", line 100, in __call__ return self.function(*args, **kwargs) File "/home/dc/installed/pwndbg/pwndbg/commands/__init__.py", line 181, in _OnlyWithFile return function(*a, **kw) File "/home/dc/installed/pwndbg/pwndbg/commands/start.py", line 72, in entry gdb.execute(run, from_tty=False) gdb.error: During startup program exited with code 1. If that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues (Please don't forget to search if it hasn't been reported before) PS: Pull requests are welcome ``` ### My version ``` pwndbg> version Gdb: GNU gdb (GDB) 8.0.1 Python: 3.6.3 (default, Oct 24 2017, 14:48:20) [GCC 7.2.0] Pwndbg: 1.0.0 build: 5811010 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/commands/start.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 Launches the target process after setting a breakpoint at a convenient 5 entry point. 6 """ 7 from __future__ import absolute_import 8 from __future__ import division 9 from __future__ import print_function 10 from __future__ import unicode_literals 11 12 import gdb 13 14 import pwndbg.commands 15 import pwndbg.elf 16 import pwndbg.events 17 import pwndbg.symbol 18 19 break_on_first_instruction = False 20 21 22 @pwndbg.events.start 23 def on_start(): 24 global break_on_first_instruction 25 if break_on_first_instruction: 26 spec = "*%#x" % (int(pwndbg.elf.entry())) 27 gdb.Breakpoint(spec, temporary=True) 28 break_on_first_instruction = False 29 30 31 @pwndbg.commands.Command 32 def start(*a): 33 """ 34 Set a breakpoint at a convenient location in the binary, 35 generally 'main', 'init', or the entry point. 36 """ 37 run = 'run ' + ' '.join(a) 38 39 symbols = ["main", 40 "_main", 41 "start", 42 "_start", 43 "init", 44 "_init"] 45 46 for symbol in symbols: 47 address = pwndbg.symbol.address(symbol) 48 49 if not address: 50 continue 51 52 b = gdb.Breakpoint(symbol, temporary=True) 53 gdb.execute(run, from_tty=False, to_string=True) 54 return 55 56 # Try a breakpoint at the binary entry 57 entry(*a) 58 59 60 @pwndbg.commands.Command 61 @pwndbg.commands.OnlyWithFile 62 def entry(*a): 63 """ 64 Set a breakpoint at the first instruction executed in 65 the target binary. 66 """ 67 global break_on_first_instruction 68 break_on_first_instruction = True 69 run = 'run ' + ' '.join(a) 70 gdb.execute(run, from_tty=False) 71 ``` Path: `pwndbg/prompt.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 from __future__ import absolute_import 4 from __future__ import division 5 from __future__ import print_function 6 from __future__ import unicode_literals 7 8 import gdb 9 10 import pwndbg.events 11 import pwndbg.memoize 12 13 hint_msg = 'Loaded %i commands. Type pwndbg [filter] for a list.' % len(pwndbg.commands.Command.commands) 14 15 print(pwndbg.color.red(hint_msg)) 16 cur = (gdb.selected_inferior(), gdb.selected_thread()) 17 18 19 def prompt_hook(*a): 20 global cur 21 new = (gdb.selected_inferior(), gdb.selected_thread()) 22 23 if cur != new: 24 pwndbg.events.after_reload(start=False) 25 cur = new 26 27 if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped: 28 prompt_hook_on_stop(*a) 29 30 31 @pwndbg.memoize.reset_on_stop 32 def prompt_hook_on_stop(*a): 33 pwndbg.commands.context.context() 34 35 36 gdb.prompt_hook = prompt_hook 37 ``` Path: `pwndbg/dt.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 Prints structures in a manner similar to Windbg's "dt" command. 5 """ 6 from __future__ import absolute_import 7 from __future__ import division 8 from __future__ import print_function 9 from __future__ import unicode_literals 10 11 import glob 12 import os 13 import re 14 import subprocess 15 import tempfile 16 17 import gdb 18 19 import pwndbg.memory 20 import pwndbg.typeinfo 21 22 23 def get_type(v): 24 t = v.type 25 while not t.name: 26 if t.code == gdb.TYPE_CODE_PTR: 27 t = t.target() 28 return t.name 29 30 def get_typename(t): 31 return(str(t)) 32 33 def get_arrsize(f): 34 t = f.type 35 if t.code != gdb.TYPE_CODE_ARRAY: 36 return 0 37 t2 = t.target() 38 s = t2.sizeof 39 return int(t.sizeof / t2.sizeof) 40 41 def get_field_by_name(obj, field): 42 # Dereference once 43 if obj.type.code == gdb.TYPE_CODE_PTR: 44 obj = obj.dereference() 45 for f in re.split('(->|\.|\[\d+\])', field): 46 if not f: continue 47 if f == '->': 48 obj = obj.dereference() 49 elif f == '.': 50 pass 51 elif f.startswith('['): 52 n = int(f.strip('[]')) 53 obj = obj.cast(obj.dereference().type.pointer()) 54 obj += n 55 obj = obj.dereference() 56 else: 57 obj = obj[f] 58 return obj 59 60 def happy(typename): 61 prefix = '' 62 if 'unsigned' in typename: 63 prefix = 'u' 64 typename = typename.replace('unsigned ', '') 65 return prefix + { 66 'char': 'char', 67 'short int': 'short', 68 'long int': 'long', 69 'int': 'int', 70 'long long': 'longlong', 71 'float': 'float', 72 'double': 'double' 73 }[typename] 74 75 def dt(name='', addr=None, obj = None): 76 """ 77 Dump out a structure type Windbg style. 78 """ 79 # Return value is a list of strings.of 80 # We concatenate at the end. 81 rv = [] 82 83 if obj and not name: 84 t = obj.type 85 while t.code == (gdb.TYPE_CODE_PTR): 86 t = t.target() 87 obj = obj.dereference() 88 name = str(t) 89 90 # Lookup the type name specified by the user 91 else: 92 t = pwndbg.typeinfo.load(name) 93 94 # If it's not a struct (e.g. int or char*), bail 95 if t.code not in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_TYPEDEF, gdb.TYPE_CODE_UNION): 96 raise Exception("Not a structure: %s" % t) 97 98 # If an address was specified, create a Value of the 99 # specified type at that address. 100 if addr is not None: 101 obj = pwndbg.memory.poi(t, addr) 102 103 # Header, optionally include the name 104 header = name 105 if obj: header = "%s @ %s" % (header, hex(int(obj.address))) 106 rv.append(header) 107 108 if t.strip_typedefs().code == gdb.TYPE_CODE_ARRAY: 109 return "Arrays not supported yet" 110 if t.strip_typedefs().code not in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION): 111 t = {name: obj or gdb.Value(0).cast(t)} 112 113 for name, field in t.items(): 114 # Offset into the parent structure 115 o = getattr(field, 'bitpos', 0)/8 116 extra = str(field.type) 117 ftype = field.type.strip_typedefs() 118 119 if obj and obj.type.strip_typedefs().code in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION): 120 v = obj[name] 121 122 if ftype.code == gdb.TYPE_CODE_INT: 123 v = hex(int(v)) 124 if ftype.code in (gdb.TYPE_CODE_PTR, gdb.TYPE_CODE_ARRAY) \ 125 and ftype.target() == pwndbg.typeinfo.uchar: 126 data = pwndbg.memory.read(v.address, ftype.sizeof) 127 v = ' '.join('%02x' % b for b in data) 128 129 extra = v 130 131 # Adjust trailing lines in 'extra' to line up 132 # This is necessary when there are nested structures. 133 # Ideally we'd expand recursively if the type is complex. 134 extra_lines = [] 135 for i, line in enumerate(str(extra).splitlines()): 136 if i == 0: extra_lines.append(line) 137 else: extra_lines.append(35*' ' + line) 138 extra = '\n'.join(extra_lines) 139 140 line = " +0x%04x %-20s : %s" % (o, name, extra) 141 rv.append(line) 142 143 return ('\n'.join(rv)) 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/commands/start.py b/pwndbg/commands/start.py --- a/pwndbg/commands/start.py +++ b/pwndbg/commands/start.py @@ -9,6 +9,8 @@ from __future__ import print_function from __future__ import unicode_literals +import shlex + import gdb import pwndbg.commands @@ -66,5 +68,5 @@ """ global break_on_first_instruction break_on_first_instruction = True - run = 'run ' + ' '.join(a) + run = 'run ' + ' '.join(map(shlex.quote, a)) gdb.execute(run, from_tty=False) diff --git a/pwndbg/dt.py b/pwndbg/dt.py --- a/pwndbg/dt.py +++ b/pwndbg/dt.py @@ -112,7 +112,8 @@ for name, field in t.items(): # Offset into the parent structure - o = getattr(field, 'bitpos', 0)/8 + o = getattr(field, 'bitpos', 0) // 8 + b = getattr(field, 'bitpos', 0) % 8 extra = str(field.type) ftype = field.type.strip_typedefs() @@ -137,7 +138,9 @@ else: extra_lines.append(35*' ' + line) extra = '\n'.join(extra_lines) - line = " +0x%04x %-20s : %s" % (o, name, extra) + bitpos = '' if not b else ('.%i' % b) + + line = " +0x%04x%s %-20s : %s" % (o, bitpos, name, extra) rv.append(line) return ('\n'.join(rv)) diff --git a/pwndbg/prompt.py b/pwndbg/prompt.py --- a/pwndbg/prompt.py +++ b/pwndbg/prompt.py @@ -33,4 +33,5 @@ pwndbg.commands.context.context() + gdb.prompt_hook = prompt_hook
{"golden_diff": "diff --git a/pwndbg/commands/start.py b/pwndbg/commands/start.py\n--- a/pwndbg/commands/start.py\n+++ b/pwndbg/commands/start.py\n@@ -9,6 +9,8 @@\n from __future__ import print_function\n from __future__ import unicode_literals\n \n+import shlex\n+\n import gdb\n \n import pwndbg.commands\n@@ -66,5 +68,5 @@\n \"\"\"\n global break_on_first_instruction\n break_on_first_instruction = True\n- run = 'run ' + ' '.join(a)\n+ run = 'run ' + ' '.join(map(shlex.quote, a))\n gdb.execute(run, from_tty=False)\ndiff --git a/pwndbg/dt.py b/pwndbg/dt.py\n--- a/pwndbg/dt.py\n+++ b/pwndbg/dt.py\n@@ -112,7 +112,8 @@\n \n for name, field in t.items():\n # Offset into the parent structure\n- o = getattr(field, 'bitpos', 0)/8\n+ o = getattr(field, 'bitpos', 0) // 8\n+ b = getattr(field, 'bitpos', 0) % 8\n extra = str(field.type)\n ftype = field.type.strip_typedefs()\n \n@@ -137,7 +138,9 @@\n else: extra_lines.append(35*' ' + line)\n extra = '\\n'.join(extra_lines)\n \n- line = \" +0x%04x %-20s : %s\" % (o, name, extra)\n+ bitpos = '' if not b else ('.%i' % b)\n+\n+ line = \" +0x%04x%s %-20s : %s\" % (o, bitpos, name, extra)\n rv.append(line)\n \n return ('\\n'.join(rv))\ndiff --git a/pwndbg/prompt.py b/pwndbg/prompt.py\n--- a/pwndbg/prompt.py\n+++ b/pwndbg/prompt.py\n@@ -33,4 +33,5 @@\n pwndbg.commands.context.context()\n \n \n+\n gdb.prompt_hook = prompt_hook\n", "issue": "Broken `entry` command\n### Description\r\n\r\nThe `entry` command pass arguments differently then the `run` command.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\n[dc@dc:pwndbg|dev *$%]$ gdb python\r\nLoaded 113 commands. Type pwndbg [filter] for a list.\r\nReading symbols from python...(no debugging symbols found)...done.\r\npwndbg> set exception-verbose on\r\nSet whether to print a full stacktracefor exceptions raised in Pwndbg commands to True\r\npwndbg> run -c \"print(1); print(2)\"\r\nStarting program: /usr/bin/python -c \"print(1); print(2)\"\r\n[Thread debugging using libthread_db enabled]\r\nUsing host libthread_db library \"/usr/lib/libthread_db.so.1\".\r\n1\r\n2\r\n[Inferior 1 (process 20590) exited normally]\r\npwndbg> entry -c \"print(1); print(2)\"\r\n('-c', 'print(1); print(2)')\r\nRunning '%s' run -c print(1); print(2)\r\n/bin/bash: -c: line 0: syntax error near unexpected token `('\r\n/bin/bash: -c: line 0: `exec /usr/bin/python -c print(1); print(2)'\r\nTraceback (most recent call last):\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/__init__.py\", line 100, in __call__\r\n return self.function(*args, **kwargs)\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/__init__.py\", line 181, in _OnlyWithFile\r\n return function(*a, **kw)\r\n File \"/home/dc/installed/pwndbg/pwndbg/commands/start.py\", line 72, in entry\r\n gdb.execute(run, from_tty=False)\r\ngdb.error: During startup program exited with code 1.\r\n\r\nIf that is an issue, you can report it on https://github.com/pwndbg/pwndbg/issues\r\n(Please don't forget to search if it hasn't been reported before)\r\nPS: Pull requests are welcome\r\n```\r\n\r\n### My version\r\n\r\n```\r\npwndbg> version\r\nGdb: GNU gdb (GDB) 8.0.1\r\nPython: 3.6.3 (default, Oct 24 2017, 14:48:20) [GCC 7.2.0]\r\nPwndbg: 1.0.0 build: 5811010\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLaunches the target process after setting a breakpoint at a convenient\nentry point.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.commands\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.symbol\n\nbreak_on_first_instruction = False\n\n\[email protected]\ndef on_start():\n global break_on_first_instruction\n if break_on_first_instruction:\n spec = \"*%#x\" % (int(pwndbg.elf.entry()))\n gdb.Breakpoint(spec, temporary=True)\n break_on_first_instruction = False\n\n\[email protected]\ndef start(*a):\n \"\"\"\n Set a breakpoint at a convenient location in the binary,\n generally 'main', 'init', or the entry point.\n \"\"\"\n run = 'run ' + ' '.join(a)\n\n symbols = [\"main\",\n \"_main\",\n \"start\",\n \"_start\",\n \"init\",\n \"_init\"]\n\n for symbol in symbols:\n address = pwndbg.symbol.address(symbol)\n\n if not address:\n continue\n\n b = gdb.Breakpoint(symbol, temporary=True)\n gdb.execute(run, from_tty=False, to_string=True)\n return\n\n # Try a breakpoint at the binary entry\n entry(*a)\n\n\[email protected]\[email protected]\ndef entry(*a):\n \"\"\"\n Set a breakpoint at the first instruction executed in\n the target binary.\n \"\"\"\n global break_on_first_instruction\n break_on_first_instruction = True\n run = 'run ' + ' '.join(a)\n gdb.execute(run, from_tty=False)\n", "path": "pwndbg/commands/start.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.memoize\n\nhint_msg = 'Loaded %i commands. Type pwndbg [filter] for a list.' % len(pwndbg.commands.Command.commands)\n\nprint(pwndbg.color.red(hint_msg))\ncur = (gdb.selected_inferior(), gdb.selected_thread())\n\n\ndef prompt_hook(*a):\n global cur\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.events.after_reload(start=False)\n cur = new\n\n if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n prompt_hook_on_stop(*a)\n\n\[email protected]_on_stop\ndef prompt_hook_on_stop(*a):\n pwndbg.commands.context.context()\n\n\ngdb.prompt_hook = prompt_hook\n", "path": "pwndbg/prompt.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPrints structures in a manner similar to Windbg's \"dt\" command.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport re\nimport subprocess\nimport tempfile\n\nimport gdb\n\nimport pwndbg.memory\nimport pwndbg.typeinfo\n\n\ndef get_type(v):\n t = v.type\n while not t.name:\n if t.code == gdb.TYPE_CODE_PTR:\n t = t.target()\n return t.name\n\ndef get_typename(t):\n return(str(t))\n\ndef get_arrsize(f):\n t = f.type\n if t.code != gdb.TYPE_CODE_ARRAY:\n return 0\n t2 = t.target()\n s = t2.sizeof\n return int(t.sizeof / t2.sizeof)\n\ndef get_field_by_name(obj, field):\n # Dereference once\n if obj.type.code == gdb.TYPE_CODE_PTR:\n obj = obj.dereference()\n for f in re.split('(->|\\.|\\[\\d+\\])', field):\n if not f: continue\n if f == '->':\n obj = obj.dereference()\n elif f == '.':\n pass\n elif f.startswith('['):\n n = int(f.strip('[]'))\n obj = obj.cast(obj.dereference().type.pointer())\n obj += n\n obj = obj.dereference()\n else:\n obj = obj[f]\n return obj\n\ndef happy(typename):\n prefix = ''\n if 'unsigned' in typename:\n prefix = 'u'\n typename = typename.replace('unsigned ', '')\n return prefix + {\n 'char': 'char',\n 'short int': 'short',\n 'long int': 'long',\n 'int': 'int',\n 'long long': 'longlong',\n 'float': 'float',\n 'double': 'double'\n }[typename]\n\ndef dt(name='', addr=None, obj = None):\n \"\"\"\n Dump out a structure type Windbg style.\n \"\"\"\n # Return value is a list of strings.of\n # We concatenate at the end.\n rv = []\n\n if obj and not name:\n t = obj.type\n while t.code == (gdb.TYPE_CODE_PTR):\n t = t.target()\n obj = obj.dereference()\n name = str(t)\n\n # Lookup the type name specified by the user\n else:\n t = pwndbg.typeinfo.load(name)\n\n # If it's not a struct (e.g. int or char*), bail\n if t.code not in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_TYPEDEF, gdb.TYPE_CODE_UNION):\n raise Exception(\"Not a structure: %s\" % t)\n\n # If an address was specified, create a Value of the\n # specified type at that address.\n if addr is not None:\n obj = pwndbg.memory.poi(t, addr)\n\n # Header, optionally include the name\n header = name\n if obj: header = \"%s @ %s\" % (header, hex(int(obj.address)))\n rv.append(header)\n\n if t.strip_typedefs().code == gdb.TYPE_CODE_ARRAY:\n return \"Arrays not supported yet\"\n if t.strip_typedefs().code not in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION):\n t = {name: obj or gdb.Value(0).cast(t)}\n\n for name, field in t.items():\n # Offset into the parent structure\n o = getattr(field, 'bitpos', 0)/8\n extra = str(field.type)\n ftype = field.type.strip_typedefs()\n\n if obj and obj.type.strip_typedefs().code in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION):\n v = obj[name]\n\n if ftype.code == gdb.TYPE_CODE_INT:\n v = hex(int(v))\n if ftype.code in (gdb.TYPE_CODE_PTR, gdb.TYPE_CODE_ARRAY) \\\n and ftype.target() == pwndbg.typeinfo.uchar:\n data = pwndbg.memory.read(v.address, ftype.sizeof)\n v = ' '.join('%02x' % b for b in data)\n\n extra = v\n\n # Adjust trailing lines in 'extra' to line up\n # This is necessary when there are nested structures.\n # Ideally we'd expand recursively if the type is complex.\n extra_lines = []\n for i, line in enumerate(str(extra).splitlines()):\n if i == 0: extra_lines.append(line)\n else: extra_lines.append(35*' ' + line)\n extra = '\\n'.join(extra_lines)\n\n line = \" +0x%04x %-20s : %s\" % (o, name, extra)\n rv.append(line)\n\n return ('\\n'.join(rv))\n", "path": "pwndbg/dt.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nLaunches the target process after setting a breakpoint at a convenient\nentry point.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport shlex\n\nimport gdb\n\nimport pwndbg.commands\nimport pwndbg.elf\nimport pwndbg.events\nimport pwndbg.symbol\n\nbreak_on_first_instruction = False\n\n\[email protected]\ndef on_start():\n global break_on_first_instruction\n if break_on_first_instruction:\n spec = \"*%#x\" % (int(pwndbg.elf.entry()))\n gdb.Breakpoint(spec, temporary=True)\n break_on_first_instruction = False\n\n\[email protected]\ndef start(*a):\n \"\"\"\n Set a breakpoint at a convenient location in the binary,\n generally 'main', 'init', or the entry point.\n \"\"\"\n run = 'run ' + ' '.join(a)\n\n symbols = [\"main\",\n \"_main\",\n \"start\",\n \"_start\",\n \"init\",\n \"_init\"]\n\n for symbol in symbols:\n address = pwndbg.symbol.address(symbol)\n\n if not address:\n continue\n\n b = gdb.Breakpoint(symbol, temporary=True)\n gdb.execute(run, from_tty=False, to_string=True)\n return\n\n # Try a breakpoint at the binary entry\n entry(*a)\n\n\[email protected]\[email protected]\ndef entry(*a):\n \"\"\"\n Set a breakpoint at the first instruction executed in\n the target binary.\n \"\"\"\n global break_on_first_instruction\n break_on_first_instruction = True\n run = 'run ' + ' '.join(map(shlex.quote, a))\n gdb.execute(run, from_tty=False)\n", "path": "pwndbg/commands/start.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gdb\n\nimport pwndbg.events\nimport pwndbg.memoize\n\nhint_msg = 'Loaded %i commands. Type pwndbg [filter] for a list.' % len(pwndbg.commands.Command.commands)\n\nprint(pwndbg.color.red(hint_msg))\ncur = (gdb.selected_inferior(), gdb.selected_thread())\n\n\ndef prompt_hook(*a):\n global cur\n new = (gdb.selected_inferior(), gdb.selected_thread())\n\n if cur != new:\n pwndbg.events.after_reload(start=False)\n cur = new\n\n if pwndbg.proc.alive and pwndbg.proc.thread_is_stopped:\n prompt_hook_on_stop(*a)\n\n\[email protected]_on_stop\ndef prompt_hook_on_stop(*a):\n pwndbg.commands.context.context()\n\n\n\ngdb.prompt_hook = prompt_hook\n", "path": "pwndbg/prompt.py"}, {"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nPrints structures in a manner similar to Windbg's \"dt\" command.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport glob\nimport os\nimport re\nimport subprocess\nimport tempfile\n\nimport gdb\n\nimport pwndbg.memory\nimport pwndbg.typeinfo\n\n\ndef get_type(v):\n t = v.type\n while not t.name:\n if t.code == gdb.TYPE_CODE_PTR:\n t = t.target()\n return t.name\n\ndef get_typename(t):\n return(str(t))\n\ndef get_arrsize(f):\n t = f.type\n if t.code != gdb.TYPE_CODE_ARRAY:\n return 0\n t2 = t.target()\n s = t2.sizeof\n return int(t.sizeof / t2.sizeof)\n\ndef get_field_by_name(obj, field):\n # Dereference once\n if obj.type.code == gdb.TYPE_CODE_PTR:\n obj = obj.dereference()\n for f in re.split('(->|\\.|\\[\\d+\\])', field):\n if not f: continue\n if f == '->':\n obj = obj.dereference()\n elif f == '.':\n pass\n elif f.startswith('['):\n n = int(f.strip('[]'))\n obj = obj.cast(obj.dereference().type.pointer())\n obj += n\n obj = obj.dereference()\n else:\n obj = obj[f]\n return obj\n\ndef happy(typename):\n prefix = ''\n if 'unsigned' in typename:\n prefix = 'u'\n typename = typename.replace('unsigned ', '')\n return prefix + {\n 'char': 'char',\n 'short int': 'short',\n 'long int': 'long',\n 'int': 'int',\n 'long long': 'longlong',\n 'float': 'float',\n 'double': 'double'\n }[typename]\n\ndef dt(name='', addr=None, obj = None):\n \"\"\"\n Dump out a structure type Windbg style.\n \"\"\"\n # Return value is a list of strings.of\n # We concatenate at the end.\n rv = []\n\n if obj and not name:\n t = obj.type\n while t.code == (gdb.TYPE_CODE_PTR):\n t = t.target()\n obj = obj.dereference()\n name = str(t)\n\n # Lookup the type name specified by the user\n else:\n t = pwndbg.typeinfo.load(name)\n\n # If it's not a struct (e.g. int or char*), bail\n if t.code not in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_TYPEDEF, gdb.TYPE_CODE_UNION):\n raise Exception(\"Not a structure: %s\" % t)\n\n # If an address was specified, create a Value of the\n # specified type at that address.\n if addr is not None:\n obj = pwndbg.memory.poi(t, addr)\n\n # Header, optionally include the name\n header = name\n if obj: header = \"%s @ %s\" % (header, hex(int(obj.address)))\n rv.append(header)\n\n if t.strip_typedefs().code == gdb.TYPE_CODE_ARRAY:\n return \"Arrays not supported yet\"\n if t.strip_typedefs().code not in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION):\n t = {name: obj or gdb.Value(0).cast(t)}\n\n for name, field in t.items():\n # Offset into the parent structure\n o = getattr(field, 'bitpos', 0) // 8\n b = getattr(field, 'bitpos', 0) % 8\n extra = str(field.type)\n ftype = field.type.strip_typedefs()\n\n if obj and obj.type.strip_typedefs().code in (gdb.TYPE_CODE_STRUCT, gdb.TYPE_CODE_UNION):\n v = obj[name]\n\n if ftype.code == gdb.TYPE_CODE_INT:\n v = hex(int(v))\n if ftype.code in (gdb.TYPE_CODE_PTR, gdb.TYPE_CODE_ARRAY) \\\n and ftype.target() == pwndbg.typeinfo.uchar:\n data = pwndbg.memory.read(v.address, ftype.sizeof)\n v = ' '.join('%02x' % b for b in data)\n\n extra = v\n\n # Adjust trailing lines in 'extra' to line up\n # This is necessary when there are nested structures.\n # Ideally we'd expand recursively if the type is complex.\n extra_lines = []\n for i, line in enumerate(str(extra).splitlines()):\n if i == 0: extra_lines.append(line)\n else: extra_lines.append(35*' ' + line)\n extra = '\\n'.join(extra_lines)\n\n bitpos = '' if not b else ('.%i' % b)\n\n line = \" +0x%04x%s %-20s : %s\" % (o, bitpos, name, extra)\n rv.append(line)\n\n return ('\\n'.join(rv))\n", "path": "pwndbg/dt.py"}]}
3,092
494
gh_patches_debug_3923
rasdani/github-patches
git_diff
deepset-ai__haystack-6173
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create a script for 2.0 API Reference docs --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/pydoc/renderers.py` Content: ``` 1 import os 2 import sys 3 import io 4 import dataclasses 5 import typing as t 6 import base64 7 import warnings 8 from pathlib import Path 9 10 import requests 11 import docspec 12 from pydoc_markdown.interfaces import Context, Renderer 13 from pydoc_markdown.contrib.renderers.markdown import MarkdownRenderer 14 15 16 README_FRONTMATTER = """--- 17 title: {title} 18 excerpt: {excerpt} 19 category: {category} 20 slug: {slug} 21 parentDoc: {parent_doc} 22 order: {order} 23 hidden: false 24 --- 25 26 """ 27 28 29 def create_headers(version: str): 30 # Utility function to create Readme.io headers. 31 # We assume the README_API_KEY env var is set since we check outside 32 # to show clearer error messages. 33 README_API_KEY = os.getenv("README_API_KEY") 34 token = base64.b64encode(f"{README_API_KEY}:".encode()).decode() 35 return {"authorization": f"Basic {token}", "x-readme-version": version} 36 37 38 @dataclasses.dataclass 39 class ReadmeRenderer(Renderer): 40 """ 41 This custom Renderer is heavily based on the `MarkdownRenderer`, 42 it just prepends a front matter so that the output can be published 43 directly to readme.io. 44 """ 45 46 # These settings will be used in the front matter output 47 title: str 48 category_slug: str 49 excerpt: str 50 slug: str 51 order: int 52 parent_doc_slug: str = "" 53 # Docs categories fetched from Readme.io 54 categories: t.Dict[str, str] = dataclasses.field(init=False) 55 # This exposes a special `markdown` settings value that can be used to pass 56 # parameters to the underlying `MarkdownRenderer` 57 markdown: MarkdownRenderer = dataclasses.field(default_factory=MarkdownRenderer) 58 59 def init(self, context: Context) -> None: 60 self.markdown.init(context) 61 self.version = self._doc_version() 62 self.categories = self._readme_categories(self.version) 63 64 def _doc_version(self) -> str: 65 """ 66 Returns the docs version. 67 """ 68 root = Path(__file__).absolute().parent.parent.parent 69 full_version = (root / "VERSION.txt").read_text() 70 major, minor = full_version.split(".")[:2] 71 if "rc0" in full_version: 72 return f"v{major}.{minor}-unstable" 73 return f"v{major}.{minor}" 74 75 def _readme_categories(self, version: str) -> t.Dict[str, str]: 76 """ 77 Fetch the categories of the given version from Readme.io. 78 README_API_KEY env var must be set to correctly get the categories. 79 Returns dictionary containing all the categories slugs and their ids. 80 """ 81 README_API_KEY = os.getenv("README_API_KEY") 82 if not README_API_KEY: 83 warnings.warn("README_API_KEY env var is not set, using a placeholder category ID") 84 return {"haystack-classes": "ID"} 85 86 headers = create_headers(version) 87 88 res = requests.get("https://dash.readme.com/api/v1/categories", headers=headers, timeout=60) 89 90 if not res.ok: 91 sys.exit(f"Error requesting {version} categories") 92 93 return {c["slug"]: c["id"] for c in res.json()} 94 95 def _doc_id(self, doc_slug: str, version: str) -> str: 96 """ 97 Fetch the doc id of the given doc slug and version from Readme.io. 98 README_API_KEY env var must be set to correctly get the id. 99 If doc_slug is an empty string return an empty string. 100 """ 101 if not doc_slug: 102 # Not all docs have a parent doc, in case we get no slug 103 # we just return an empty string. 104 return "" 105 106 README_API_KEY = os.getenv("README_API_KEY") 107 if not README_API_KEY: 108 warnings.warn("README_API_KEY env var is not set, using a placeholder doc ID") 109 return "fake-doc-id" 110 111 headers = create_headers(version) 112 res = requests.get(f"https://dash.readme.com/api/v1/docs/{doc_slug}", headers=headers, timeout=60) 113 if not res.ok: 114 sys.exit(f"Error requesting {doc_slug} doc for version {version}") 115 116 return res.json()["id"] 117 118 def render(self, modules: t.List[docspec.Module]) -> None: 119 if self.markdown.filename is None: 120 sys.stdout.write(self._frontmatter()) 121 self.markdown.render_single_page(sys.stdout, modules) 122 else: 123 with io.open(self.markdown.filename, "w", encoding=self.markdown.encoding) as fp: 124 fp.write(self._frontmatter()) 125 self.markdown.render_single_page(t.cast(t.TextIO, fp), modules) 126 127 def _frontmatter(self) -> str: 128 return README_FRONTMATTER.format( 129 title=self.title, 130 category=self.categories[self.category_slug], 131 parent_doc=self._doc_id(self.parent_doc_slug, self.version), 132 excerpt=self.excerpt, 133 slug=self.slug, 134 order=self.order, 135 ) 136 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/pydoc/renderers.py b/docs/pydoc/renderers.py --- a/docs/pydoc/renderers.py +++ b/docs/pydoc/renderers.py @@ -133,3 +133,16 @@ slug=self.slug, order=self.order, ) + + [email protected] +class ReadmePreviewRenderer(ReadmeRenderer): + """ + This custom Renderer behaves just like the ReadmeRenderer but renders docs with the hardcoded version 2.0 to generate correct category ids. + """ + + def _doc_version(self) -> str: + """ + Returns the hardcoded docs version 2.0. + """ + return "v2.0"
{"golden_diff": "diff --git a/docs/pydoc/renderers.py b/docs/pydoc/renderers.py\n--- a/docs/pydoc/renderers.py\n+++ b/docs/pydoc/renderers.py\n@@ -133,3 +133,16 @@\n slug=self.slug,\n order=self.order,\n )\n+\n+\[email protected]\n+class ReadmePreviewRenderer(ReadmeRenderer):\n+ \"\"\"\n+ This custom Renderer behaves just like the ReadmeRenderer but renders docs with the hardcoded version 2.0 to generate correct category ids.\n+ \"\"\"\n+\n+ def _doc_version(self) -> str:\n+ \"\"\"\n+ Returns the hardcoded docs version 2.0.\n+ \"\"\"\n+ return \"v2.0\"\n", "issue": "Create a script for 2.0 API Reference docs\n\n", "before_files": [{"content": "import os\nimport sys\nimport io\nimport dataclasses\nimport typing as t\nimport base64\nimport warnings\nfrom pathlib import Path\n\nimport requests\nimport docspec\nfrom pydoc_markdown.interfaces import Context, Renderer\nfrom pydoc_markdown.contrib.renderers.markdown import MarkdownRenderer\n\n\nREADME_FRONTMATTER = \"\"\"---\ntitle: {title}\nexcerpt: {excerpt}\ncategory: {category}\nslug: {slug}\nparentDoc: {parent_doc}\norder: {order}\nhidden: false\n---\n\n\"\"\"\n\n\ndef create_headers(version: str):\n # Utility function to create Readme.io headers.\n # We assume the README_API_KEY env var is set since we check outside\n # to show clearer error messages.\n README_API_KEY = os.getenv(\"README_API_KEY\")\n token = base64.b64encode(f\"{README_API_KEY}:\".encode()).decode()\n return {\"authorization\": f\"Basic {token}\", \"x-readme-version\": version}\n\n\[email protected]\nclass ReadmeRenderer(Renderer):\n \"\"\"\n This custom Renderer is heavily based on the `MarkdownRenderer`,\n it just prepends a front matter so that the output can be published\n directly to readme.io.\n \"\"\"\n\n # These settings will be used in the front matter output\n title: str\n category_slug: str\n excerpt: str\n slug: str\n order: int\n parent_doc_slug: str = \"\"\n # Docs categories fetched from Readme.io\n categories: t.Dict[str, str] = dataclasses.field(init=False)\n # This exposes a special `markdown` settings value that can be used to pass\n # parameters to the underlying `MarkdownRenderer`\n markdown: MarkdownRenderer = dataclasses.field(default_factory=MarkdownRenderer)\n\n def init(self, context: Context) -> None:\n self.markdown.init(context)\n self.version = self._doc_version()\n self.categories = self._readme_categories(self.version)\n\n def _doc_version(self) -> str:\n \"\"\"\n Returns the docs version.\n \"\"\"\n root = Path(__file__).absolute().parent.parent.parent\n full_version = (root / \"VERSION.txt\").read_text()\n major, minor = full_version.split(\".\")[:2]\n if \"rc0\" in full_version:\n return f\"v{major}.{minor}-unstable\"\n return f\"v{major}.{minor}\"\n\n def _readme_categories(self, version: str) -> t.Dict[str, str]:\n \"\"\"\n Fetch the categories of the given version from Readme.io.\n README_API_KEY env var must be set to correctly get the categories.\n Returns dictionary containing all the categories slugs and their ids.\n \"\"\"\n README_API_KEY = os.getenv(\"README_API_KEY\")\n if not README_API_KEY:\n warnings.warn(\"README_API_KEY env var is not set, using a placeholder category ID\")\n return {\"haystack-classes\": \"ID\"}\n\n headers = create_headers(version)\n\n res = requests.get(\"https://dash.readme.com/api/v1/categories\", headers=headers, timeout=60)\n\n if not res.ok:\n sys.exit(f\"Error requesting {version} categories\")\n\n return {c[\"slug\"]: c[\"id\"] for c in res.json()}\n\n def _doc_id(self, doc_slug: str, version: str) -> str:\n \"\"\"\n Fetch the doc id of the given doc slug and version from Readme.io.\n README_API_KEY env var must be set to correctly get the id.\n If doc_slug is an empty string return an empty string.\n \"\"\"\n if not doc_slug:\n # Not all docs have a parent doc, in case we get no slug\n # we just return an empty string.\n return \"\"\n\n README_API_KEY = os.getenv(\"README_API_KEY\")\n if not README_API_KEY:\n warnings.warn(\"README_API_KEY env var is not set, using a placeholder doc ID\")\n return \"fake-doc-id\"\n\n headers = create_headers(version)\n res = requests.get(f\"https://dash.readme.com/api/v1/docs/{doc_slug}\", headers=headers, timeout=60)\n if not res.ok:\n sys.exit(f\"Error requesting {doc_slug} doc for version {version}\")\n\n return res.json()[\"id\"]\n\n def render(self, modules: t.List[docspec.Module]) -> None:\n if self.markdown.filename is None:\n sys.stdout.write(self._frontmatter())\n self.markdown.render_single_page(sys.stdout, modules)\n else:\n with io.open(self.markdown.filename, \"w\", encoding=self.markdown.encoding) as fp:\n fp.write(self._frontmatter())\n self.markdown.render_single_page(t.cast(t.TextIO, fp), modules)\n\n def _frontmatter(self) -> str:\n return README_FRONTMATTER.format(\n title=self.title,\n category=self.categories[self.category_slug],\n parent_doc=self._doc_id(self.parent_doc_slug, self.version),\n excerpt=self.excerpt,\n slug=self.slug,\n order=self.order,\n )\n", "path": "docs/pydoc/renderers.py"}], "after_files": [{"content": "import os\nimport sys\nimport io\nimport dataclasses\nimport typing as t\nimport base64\nimport warnings\nfrom pathlib import Path\n\nimport requests\nimport docspec\nfrom pydoc_markdown.interfaces import Context, Renderer\nfrom pydoc_markdown.contrib.renderers.markdown import MarkdownRenderer\n\n\nREADME_FRONTMATTER = \"\"\"---\ntitle: {title}\nexcerpt: {excerpt}\ncategory: {category}\nslug: {slug}\nparentDoc: {parent_doc}\norder: {order}\nhidden: false\n---\n\n\"\"\"\n\n\ndef create_headers(version: str):\n # Utility function to create Readme.io headers.\n # We assume the README_API_KEY env var is set since we check outside\n # to show clearer error messages.\n README_API_KEY = os.getenv(\"README_API_KEY\")\n token = base64.b64encode(f\"{README_API_KEY}:\".encode()).decode()\n return {\"authorization\": f\"Basic {token}\", \"x-readme-version\": version}\n\n\[email protected]\nclass ReadmeRenderer(Renderer):\n \"\"\"\n This custom Renderer is heavily based on the `MarkdownRenderer`,\n it just prepends a front matter so that the output can be published\n directly to readme.io.\n \"\"\"\n\n # These settings will be used in the front matter output\n title: str\n category_slug: str\n excerpt: str\n slug: str\n order: int\n parent_doc_slug: str = \"\"\n # Docs categories fetched from Readme.io\n categories: t.Dict[str, str] = dataclasses.field(init=False)\n # This exposes a special `markdown` settings value that can be used to pass\n # parameters to the underlying `MarkdownRenderer`\n markdown: MarkdownRenderer = dataclasses.field(default_factory=MarkdownRenderer)\n\n def init(self, context: Context) -> None:\n self.markdown.init(context)\n self.version = self._doc_version()\n self.categories = self._readme_categories(self.version)\n\n def _doc_version(self) -> str:\n \"\"\"\n Returns the docs version.\n \"\"\"\n root = Path(__file__).absolute().parent.parent.parent\n full_version = (root / \"VERSION.txt\").read_text()\n major, minor = full_version.split(\".\")[:2]\n if \"rc0\" in full_version:\n return f\"v{major}.{minor}-unstable\"\n return f\"v{major}.{minor}\"\n\n def _readme_categories(self, version: str) -> t.Dict[str, str]:\n \"\"\"\n Fetch the categories of the given version from Readme.io.\n README_API_KEY env var must be set to correctly get the categories.\n Returns dictionary containing all the categories slugs and their ids.\n \"\"\"\n README_API_KEY = os.getenv(\"README_API_KEY\")\n if not README_API_KEY:\n warnings.warn(\"README_API_KEY env var is not set, using a placeholder category ID\")\n return {\"haystack-classes\": \"ID\"}\n\n headers = create_headers(version)\n\n res = requests.get(\"https://dash.readme.com/api/v1/categories\", headers=headers, timeout=60)\n\n if not res.ok:\n sys.exit(f\"Error requesting {version} categories\")\n\n return {c[\"slug\"]: c[\"id\"] for c in res.json()}\n\n def _doc_id(self, doc_slug: str, version: str) -> str:\n \"\"\"\n Fetch the doc id of the given doc slug and version from Readme.io.\n README_API_KEY env var must be set to correctly get the id.\n If doc_slug is an empty string return an empty string.\n \"\"\"\n if not doc_slug:\n # Not all docs have a parent doc, in case we get no slug\n # we just return an empty string.\n return \"\"\n\n README_API_KEY = os.getenv(\"README_API_KEY\")\n if not README_API_KEY:\n warnings.warn(\"README_API_KEY env var is not set, using a placeholder doc ID\")\n return \"fake-doc-id\"\n\n headers = create_headers(version)\n res = requests.get(f\"https://dash.readme.com/api/v1/docs/{doc_slug}\", headers=headers, timeout=60)\n if not res.ok:\n sys.exit(f\"Error requesting {doc_slug} doc for version {version}\")\n\n return res.json()[\"id\"]\n\n def render(self, modules: t.List[docspec.Module]) -> None:\n if self.markdown.filename is None:\n sys.stdout.write(self._frontmatter())\n self.markdown.render_single_page(sys.stdout, modules)\n else:\n with io.open(self.markdown.filename, \"w\", encoding=self.markdown.encoding) as fp:\n fp.write(self._frontmatter())\n self.markdown.render_single_page(t.cast(t.TextIO, fp), modules)\n\n def _frontmatter(self) -> str:\n return README_FRONTMATTER.format(\n title=self.title,\n category=self.categories[self.category_slug],\n parent_doc=self._doc_id(self.parent_doc_slug, self.version),\n excerpt=self.excerpt,\n slug=self.slug,\n order=self.order,\n )\n\n\[email protected]\nclass ReadmePreviewRenderer(ReadmeRenderer):\n \"\"\"\n This custom Renderer behaves just like the ReadmeRenderer but renders docs with the hardcoded version 2.0 to generate correct category ids.\n \"\"\"\n\n def _doc_version(self) -> str:\n \"\"\"\n Returns the hardcoded docs version 2.0.\n \"\"\"\n return \"v2.0\"\n", "path": "docs/pydoc/renderers.py"}]}
1,650
157
gh_patches_debug_31872
rasdani/github-patches
git_diff
CTFd__CTFd-2316
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- HTML_SANITIZATION controllable from config panel We should maybe make HTML_SANITIZATION controlable from the admin panel so that Admins can decide how they want to deal with HTML. Of course this maybe could be done another way, it's just the general idea about the configuration value. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/admin/__init__.py` Content: ``` 1 import csv # noqa: I001 2 import datetime 3 import os 4 from io import StringIO 5 6 from flask import Blueprint, abort 7 from flask import current_app as app 8 from flask import ( 9 jsonify, 10 redirect, 11 render_template, 12 render_template_string, 13 request, 14 send_file, 15 url_for, 16 ) 17 18 admin = Blueprint("admin", __name__) 19 20 # isort:imports-firstparty 21 from CTFd.admin import challenges # noqa: F401,I001 22 from CTFd.admin import notifications # noqa: F401,I001 23 from CTFd.admin import pages # noqa: F401,I001 24 from CTFd.admin import scoreboard # noqa: F401,I001 25 from CTFd.admin import statistics # noqa: F401,I001 26 from CTFd.admin import submissions # noqa: F401,I001 27 from CTFd.admin import teams # noqa: F401,I001 28 from CTFd.admin import users # noqa: F401,I001 29 from CTFd.cache import ( 30 cache, 31 clear_challenges, 32 clear_config, 33 clear_pages, 34 clear_standings, 35 ) 36 from CTFd.models import ( 37 Awards, 38 Challenges, 39 Configs, 40 Notifications, 41 Pages, 42 Solves, 43 Submissions, 44 Teams, 45 Tracking, 46 Unlocks, 47 Users, 48 db, 49 ) 50 from CTFd.utils import config as ctf_config 51 from CTFd.utils import get_config, set_config 52 from CTFd.utils.csv import dump_csv, load_challenges_csv, load_teams_csv, load_users_csv 53 from CTFd.utils.decorators import admins_only 54 from CTFd.utils.exports import background_import_ctf 55 from CTFd.utils.exports import export_ctf as export_ctf_util 56 from CTFd.utils.security.auth import logout_user 57 from CTFd.utils.uploads import delete_file 58 from CTFd.utils.user import is_admin 59 60 61 @admin.route("/admin", methods=["GET"]) 62 def view(): 63 if is_admin(): 64 return redirect(url_for("admin.statistics")) 65 return redirect(url_for("auth.login")) 66 67 68 @admin.route("/admin/plugins/<plugin>", methods=["GET", "POST"]) 69 @admins_only 70 def plugin(plugin): 71 if request.method == "GET": 72 plugins_path = os.path.join(app.root_path, "plugins") 73 74 config_html_plugins = [ 75 name 76 for name in os.listdir(plugins_path) 77 if os.path.isfile(os.path.join(plugins_path, name, "config.html")) 78 ] 79 80 if plugin in config_html_plugins: 81 config_html = open( 82 os.path.join(app.root_path, "plugins", plugin, "config.html") 83 ).read() 84 return render_template_string(config_html) 85 abort(404) 86 elif request.method == "POST": 87 for k, v in request.form.items(): 88 if k == "nonce": 89 continue 90 set_config(k, v) 91 with app.app_context(): 92 clear_config() 93 return "1" 94 95 96 @admin.route("/admin/import", methods=["GET", "POST"]) 97 @admins_only 98 def import_ctf(): 99 if request.method == "GET": 100 start_time = cache.get("import_start_time") 101 end_time = cache.get("import_end_time") 102 import_status = cache.get("import_status") 103 import_error = cache.get("import_error") 104 return render_template( 105 "admin/import.html", 106 start_time=start_time, 107 end_time=end_time, 108 import_status=import_status, 109 import_error=import_error, 110 ) 111 elif request.method == "POST": 112 backup = request.files["backup"] 113 background_import_ctf(backup) 114 return redirect(url_for("admin.import_ctf")) 115 116 117 @admin.route("/admin/export", methods=["GET", "POST"]) 118 @admins_only 119 def export_ctf(): 120 backup = export_ctf_util() 121 ctf_name = ctf_config.ctf_name() 122 day = datetime.datetime.now().strftime("%Y-%m-%d_%T") 123 full_name = u"{}.{}.zip".format(ctf_name, day) 124 return send_file( 125 backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name 126 ) 127 128 129 @admin.route("/admin/import/csv", methods=["POST"]) 130 @admins_only 131 def import_csv(): 132 csv_type = request.form["csv_type"] 133 # Try really hard to load data in properly no matter what nonsense Excel gave you 134 raw = request.files["csv_file"].stream.read() 135 try: 136 csvdata = raw.decode("utf-8-sig") 137 except UnicodeDecodeError: 138 try: 139 csvdata = raw.decode("cp1252") 140 except UnicodeDecodeError: 141 csvdata = raw.decode("latin-1") 142 csvfile = StringIO(csvdata) 143 144 loaders = { 145 "challenges": load_challenges_csv, 146 "users": load_users_csv, 147 "teams": load_teams_csv, 148 } 149 150 loader = loaders[csv_type] 151 reader = csv.DictReader(csvfile) 152 success = loader(reader) 153 if success is True: 154 return redirect(url_for("admin.config")) 155 else: 156 return jsonify(success), 500 157 158 159 @admin.route("/admin/export/csv") 160 @admins_only 161 def export_csv(): 162 table = request.args.get("table") 163 164 output = dump_csv(name=table) 165 166 return send_file( 167 output, 168 as_attachment=True, 169 cache_timeout=-1, 170 attachment_filename="{name}-{table}.csv".format( 171 name=ctf_config.ctf_name(), table=table 172 ), 173 ) 174 175 176 @admin.route("/admin/config", methods=["GET", "POST"]) 177 @admins_only 178 def config(): 179 # Clear the config cache so that we don't get stale values 180 clear_config() 181 182 configs = Configs.query.all() 183 configs = {c.key: get_config(c.key) for c in configs} 184 185 themes = ctf_config.get_themes() 186 187 # Remove current theme but ignore failure 188 try: 189 themes.remove(get_config("ctf_theme")) 190 except ValueError: 191 pass 192 193 return render_template("admin/config.html", themes=themes, **configs) 194 195 196 @admin.route("/admin/reset", methods=["GET", "POST"]) 197 @admins_only 198 def reset(): 199 if request.method == "POST": 200 require_setup = False 201 logout = False 202 next_url = url_for("admin.statistics") 203 204 data = request.form 205 206 if data.get("pages"): 207 _pages = Pages.query.all() 208 for p in _pages: 209 for f in p.files: 210 delete_file(file_id=f.id) 211 212 Pages.query.delete() 213 214 if data.get("notifications"): 215 Notifications.query.delete() 216 217 if data.get("challenges"): 218 _challenges = Challenges.query.all() 219 for c in _challenges: 220 for f in c.files: 221 delete_file(file_id=f.id) 222 Challenges.query.delete() 223 224 if data.get("accounts"): 225 Users.query.delete() 226 Teams.query.delete() 227 require_setup = True 228 logout = True 229 230 if data.get("submissions"): 231 Solves.query.delete() 232 Submissions.query.delete() 233 Awards.query.delete() 234 Unlocks.query.delete() 235 Tracking.query.delete() 236 237 if require_setup: 238 set_config("setup", False) 239 cache.clear() 240 logout_user() 241 next_url = url_for("views.setup") 242 243 db.session.commit() 244 245 clear_pages() 246 clear_standings() 247 clear_challenges() 248 clear_config() 249 250 if logout is True: 251 cache.clear() 252 logout_user() 253 254 db.session.close() 255 return redirect(next_url) 256 257 return render_template("admin/reset.html") 258 ``` Path: `CTFd/utils/config/pages.py` Content: ``` 1 from flask import current_app 2 3 from CTFd.cache import cache 4 from CTFd.models import Pages, db 5 from CTFd.utils import get_config, markdown 6 from CTFd.utils.dates import isoformat, unix_time_to_utc 7 from CTFd.utils.formatters import safe_format 8 from CTFd.utils.security.sanitize import sanitize_html 9 10 11 def format_variables(content): 12 ctf_name = get_config("ctf_name") 13 ctf_description = get_config("ctf_description") 14 ctf_start = get_config("start") 15 if ctf_start: 16 ctf_start = isoformat(unix_time_to_utc(int(ctf_start))) 17 18 ctf_end = get_config("end") 19 if ctf_end: 20 ctf_end = isoformat(unix_time_to_utc(int(ctf_end))) 21 22 ctf_freeze = get_config("freeze") 23 if ctf_freeze: 24 ctf_freeze = isoformat(unix_time_to_utc(int(ctf_freeze))) 25 26 content = safe_format( 27 content, 28 ctf_name=ctf_name, 29 ctf_description=ctf_description, 30 ctf_start=ctf_start, 31 ctf_end=ctf_end, 32 ctf_freeze=ctf_freeze, 33 ) 34 return content 35 36 37 def build_html(html, sanitize=False): 38 html = format_variables(html) 39 if current_app.config["HTML_SANITIZATION"] is True or sanitize is True: 40 html = sanitize_html(html) 41 return html 42 43 44 def build_markdown(md, sanitize=False): 45 html = markdown(md) 46 html = format_variables(html) 47 if current_app.config["HTML_SANITIZATION"] is True or sanitize is True: 48 html = sanitize_html(html) 49 return html 50 51 52 @cache.memoize() 53 def get_pages(): 54 db_pages = Pages.query.filter( 55 Pages.route != "index", Pages.draft.isnot(True), Pages.hidden.isnot(True) 56 ).all() 57 return db_pages 58 59 60 @cache.memoize() 61 def get_page(route): 62 page = db.session.execute( 63 Pages.__table__.select() 64 .where(Pages.route == route) 65 .where(Pages.draft.isnot(True)) 66 ).fetchone() 67 if page: 68 # Convert the row into a transient ORM object so this change isn't commited accidentally 69 p = Pages(**page) 70 return p 71 return None 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py --- a/CTFd/admin/__init__.py +++ b/CTFd/admin/__init__.py @@ -48,7 +48,7 @@ db, ) from CTFd.utils import config as ctf_config -from CTFd.utils import get_config, set_config +from CTFd.utils import get_app_config, get_config, set_config from CTFd.utils.csv import dump_csv, load_challenges_csv, load_teams_csv, load_users_csv from CTFd.utils.decorators import admins_only from CTFd.utils.exports import background_import_ctf @@ -190,7 +190,14 @@ except ValueError: pass - return render_template("admin/config.html", themes=themes, **configs) + force_html_sanitization = get_app_config("HTML_SANITIZATION") + + return render_template( + "admin/config.html", + themes=themes, + **configs, + force_html_sanitization=force_html_sanitization + ) @admin.route("/admin/reset", methods=["GET", "POST"]) diff --git a/CTFd/utils/config/pages.py b/CTFd/utils/config/pages.py --- a/CTFd/utils/config/pages.py +++ b/CTFd/utils/config/pages.py @@ -36,7 +36,11 @@ def build_html(html, sanitize=False): html = format_variables(html) - if current_app.config["HTML_SANITIZATION"] is True or sanitize is True: + if ( + current_app.config["HTML_SANITIZATION"] is True + or bool(get_config("html_sanitization")) is True + or sanitize is True + ): html = sanitize_html(html) return html @@ -44,7 +48,11 @@ def build_markdown(md, sanitize=False): html = markdown(md) html = format_variables(html) - if current_app.config["HTML_SANITIZATION"] is True or sanitize is True: + if ( + current_app.config["HTML_SANITIZATION"] is True + or bool(get_config("html_sanitization")) is True + or sanitize is True + ): html = sanitize_html(html) return html
{"golden_diff": "diff --git a/CTFd/admin/__init__.py b/CTFd/admin/__init__.py\n--- a/CTFd/admin/__init__.py\n+++ b/CTFd/admin/__init__.py\n@@ -48,7 +48,7 @@\n db,\n )\n from CTFd.utils import config as ctf_config\n-from CTFd.utils import get_config, set_config\n+from CTFd.utils import get_app_config, get_config, set_config\n from CTFd.utils.csv import dump_csv, load_challenges_csv, load_teams_csv, load_users_csv\n from CTFd.utils.decorators import admins_only\n from CTFd.utils.exports import background_import_ctf\n@@ -190,7 +190,14 @@\n except ValueError:\n pass\n \n- return render_template(\"admin/config.html\", themes=themes, **configs)\n+ force_html_sanitization = get_app_config(\"HTML_SANITIZATION\")\n+\n+ return render_template(\n+ \"admin/config.html\",\n+ themes=themes,\n+ **configs,\n+ force_html_sanitization=force_html_sanitization\n+ )\n \n \n @admin.route(\"/admin/reset\", methods=[\"GET\", \"POST\"])\ndiff --git a/CTFd/utils/config/pages.py b/CTFd/utils/config/pages.py\n--- a/CTFd/utils/config/pages.py\n+++ b/CTFd/utils/config/pages.py\n@@ -36,7 +36,11 @@\n \n def build_html(html, sanitize=False):\n html = format_variables(html)\n- if current_app.config[\"HTML_SANITIZATION\"] is True or sanitize is True:\n+ if (\n+ current_app.config[\"HTML_SANITIZATION\"] is True\n+ or bool(get_config(\"html_sanitization\")) is True\n+ or sanitize is True\n+ ):\n html = sanitize_html(html)\n return html\n \n@@ -44,7 +48,11 @@\n def build_markdown(md, sanitize=False):\n html = markdown(md)\n html = format_variables(html)\n- if current_app.config[\"HTML_SANITIZATION\"] is True or sanitize is True:\n+ if (\n+ current_app.config[\"HTML_SANITIZATION\"] is True\n+ or bool(get_config(\"html_sanitization\")) is True\n+ or sanitize is True\n+ ):\n html = sanitize_html(html)\n return html\n", "issue": "HTML_SANITIZATION controllable from config panel\nWe should maybe make HTML_SANITIZATION controlable from the admin panel so that Admins can decide how they want to deal with HTML. \r\n\r\nOf course this maybe could be done another way, it's just the general idea about the configuration value. \n", "before_files": [{"content": "import csv # noqa: I001\nimport datetime\nimport os\nfrom io import StringIO\n\nfrom flask import Blueprint, abort\nfrom flask import current_app as app\nfrom flask import (\n jsonify,\n redirect,\n render_template,\n render_template_string,\n request,\n send_file,\n url_for,\n)\n\nadmin = Blueprint(\"admin\", __name__)\n\n# isort:imports-firstparty\nfrom CTFd.admin import challenges # noqa: F401,I001\nfrom CTFd.admin import notifications # noqa: F401,I001\nfrom CTFd.admin import pages # noqa: F401,I001\nfrom CTFd.admin import scoreboard # noqa: F401,I001\nfrom CTFd.admin import statistics # noqa: F401,I001\nfrom CTFd.admin import submissions # noqa: F401,I001\nfrom CTFd.admin import teams # noqa: F401,I001\nfrom CTFd.admin import users # noqa: F401,I001\nfrom CTFd.cache import (\n cache,\n clear_challenges,\n clear_config,\n clear_pages,\n clear_standings,\n)\nfrom CTFd.models import (\n Awards,\n Challenges,\n Configs,\n Notifications,\n Pages,\n Solves,\n Submissions,\n Teams,\n Tracking,\n Unlocks,\n Users,\n db,\n)\nfrom CTFd.utils import config as ctf_config\nfrom CTFd.utils import get_config, set_config\nfrom CTFd.utils.csv import dump_csv, load_challenges_csv, load_teams_csv, load_users_csv\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.exports import background_import_ctf\nfrom CTFd.utils.exports import export_ctf as export_ctf_util\nfrom CTFd.utils.security.auth import logout_user\nfrom CTFd.utils.uploads import delete_file\nfrom CTFd.utils.user import is_admin\n\n\[email protected](\"/admin\", methods=[\"GET\"])\ndef view():\n if is_admin():\n return redirect(url_for(\"admin.statistics\"))\n return redirect(url_for(\"auth.login\"))\n\n\[email protected](\"/admin/plugins/<plugin>\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef plugin(plugin):\n if request.method == \"GET\":\n plugins_path = os.path.join(app.root_path, \"plugins\")\n\n config_html_plugins = [\n name\n for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, \"config.html\"))\n ]\n\n if plugin in config_html_plugins:\n config_html = open(\n os.path.join(app.root_path, \"plugins\", plugin, \"config.html\")\n ).read()\n return render_template_string(config_html)\n abort(404)\n elif request.method == \"POST\":\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n set_config(k, v)\n with app.app_context():\n clear_config()\n return \"1\"\n\n\[email protected](\"/admin/import\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef import_ctf():\n if request.method == \"GET\":\n start_time = cache.get(\"import_start_time\")\n end_time = cache.get(\"import_end_time\")\n import_status = cache.get(\"import_status\")\n import_error = cache.get(\"import_error\")\n return render_template(\n \"admin/import.html\",\n start_time=start_time,\n end_time=end_time,\n import_status=import_status,\n import_error=import_error,\n )\n elif request.method == \"POST\":\n backup = request.files[\"backup\"]\n background_import_ctf(backup)\n return redirect(url_for(\"admin.import_ctf\"))\n\n\[email protected](\"/admin/export\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef export_ctf():\n backup = export_ctf_util()\n ctf_name = ctf_config.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d_%T\")\n full_name = u\"{}.{}.zip\".format(ctf_name, day)\n return send_file(\n backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name\n )\n\n\[email protected](\"/admin/import/csv\", methods=[\"POST\"])\n@admins_only\ndef import_csv():\n csv_type = request.form[\"csv_type\"]\n # Try really hard to load data in properly no matter what nonsense Excel gave you\n raw = request.files[\"csv_file\"].stream.read()\n try:\n csvdata = raw.decode(\"utf-8-sig\")\n except UnicodeDecodeError:\n try:\n csvdata = raw.decode(\"cp1252\")\n except UnicodeDecodeError:\n csvdata = raw.decode(\"latin-1\")\n csvfile = StringIO(csvdata)\n\n loaders = {\n \"challenges\": load_challenges_csv,\n \"users\": load_users_csv,\n \"teams\": load_teams_csv,\n }\n\n loader = loaders[csv_type]\n reader = csv.DictReader(csvfile)\n success = loader(reader)\n if success is True:\n return redirect(url_for(\"admin.config\"))\n else:\n return jsonify(success), 500\n\n\[email protected](\"/admin/export/csv\")\n@admins_only\ndef export_csv():\n table = request.args.get(\"table\")\n\n output = dump_csv(name=table)\n\n return send_file(\n output,\n as_attachment=True,\n cache_timeout=-1,\n attachment_filename=\"{name}-{table}.csv\".format(\n name=ctf_config.ctf_name(), table=table\n ),\n )\n\n\[email protected](\"/admin/config\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef config():\n # Clear the config cache so that we don't get stale values\n clear_config()\n\n configs = Configs.query.all()\n configs = {c.key: get_config(c.key) for c in configs}\n\n themes = ctf_config.get_themes()\n\n # Remove current theme but ignore failure\n try:\n themes.remove(get_config(\"ctf_theme\"))\n except ValueError:\n pass\n\n return render_template(\"admin/config.html\", themes=themes, **configs)\n\n\[email protected](\"/admin/reset\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef reset():\n if request.method == \"POST\":\n require_setup = False\n logout = False\n next_url = url_for(\"admin.statistics\")\n\n data = request.form\n\n if data.get(\"pages\"):\n _pages = Pages.query.all()\n for p in _pages:\n for f in p.files:\n delete_file(file_id=f.id)\n\n Pages.query.delete()\n\n if data.get(\"notifications\"):\n Notifications.query.delete()\n\n if data.get(\"challenges\"):\n _challenges = Challenges.query.all()\n for c in _challenges:\n for f in c.files:\n delete_file(file_id=f.id)\n Challenges.query.delete()\n\n if data.get(\"accounts\"):\n Users.query.delete()\n Teams.query.delete()\n require_setup = True\n logout = True\n\n if data.get(\"submissions\"):\n Solves.query.delete()\n Submissions.query.delete()\n Awards.query.delete()\n Unlocks.query.delete()\n Tracking.query.delete()\n\n if require_setup:\n set_config(\"setup\", False)\n cache.clear()\n logout_user()\n next_url = url_for(\"views.setup\")\n\n db.session.commit()\n\n clear_pages()\n clear_standings()\n clear_challenges()\n clear_config()\n\n if logout is True:\n cache.clear()\n logout_user()\n\n db.session.close()\n return redirect(next_url)\n\n return render_template(\"admin/reset.html\")\n", "path": "CTFd/admin/__init__.py"}, {"content": "from flask import current_app\n\nfrom CTFd.cache import cache\nfrom CTFd.models import Pages, db\nfrom CTFd.utils import get_config, markdown\nfrom CTFd.utils.dates import isoformat, unix_time_to_utc\nfrom CTFd.utils.formatters import safe_format\nfrom CTFd.utils.security.sanitize import sanitize_html\n\n\ndef format_variables(content):\n ctf_name = get_config(\"ctf_name\")\n ctf_description = get_config(\"ctf_description\")\n ctf_start = get_config(\"start\")\n if ctf_start:\n ctf_start = isoformat(unix_time_to_utc(int(ctf_start)))\n\n ctf_end = get_config(\"end\")\n if ctf_end:\n ctf_end = isoformat(unix_time_to_utc(int(ctf_end)))\n\n ctf_freeze = get_config(\"freeze\")\n if ctf_freeze:\n ctf_freeze = isoformat(unix_time_to_utc(int(ctf_freeze)))\n\n content = safe_format(\n content,\n ctf_name=ctf_name,\n ctf_description=ctf_description,\n ctf_start=ctf_start,\n ctf_end=ctf_end,\n ctf_freeze=ctf_freeze,\n )\n return content\n\n\ndef build_html(html, sanitize=False):\n html = format_variables(html)\n if current_app.config[\"HTML_SANITIZATION\"] is True or sanitize is True:\n html = sanitize_html(html)\n return html\n\n\ndef build_markdown(md, sanitize=False):\n html = markdown(md)\n html = format_variables(html)\n if current_app.config[\"HTML_SANITIZATION\"] is True or sanitize is True:\n html = sanitize_html(html)\n return html\n\n\[email protected]()\ndef get_pages():\n db_pages = Pages.query.filter(\n Pages.route != \"index\", Pages.draft.isnot(True), Pages.hidden.isnot(True)\n ).all()\n return db_pages\n\n\[email protected]()\ndef get_page(route):\n page = db.session.execute(\n Pages.__table__.select()\n .where(Pages.route == route)\n .where(Pages.draft.isnot(True))\n ).fetchone()\n if page:\n # Convert the row into a transient ORM object so this change isn't commited accidentally\n p = Pages(**page)\n return p\n return None\n", "path": "CTFd/utils/config/pages.py"}], "after_files": [{"content": "import csv # noqa: I001\nimport datetime\nimport os\nfrom io import StringIO\n\nfrom flask import Blueprint, abort\nfrom flask import current_app as app\nfrom flask import (\n jsonify,\n redirect,\n render_template,\n render_template_string,\n request,\n send_file,\n url_for,\n)\n\nadmin = Blueprint(\"admin\", __name__)\n\n# isort:imports-firstparty\nfrom CTFd.admin import challenges # noqa: F401,I001\nfrom CTFd.admin import notifications # noqa: F401,I001\nfrom CTFd.admin import pages # noqa: F401,I001\nfrom CTFd.admin import scoreboard # noqa: F401,I001\nfrom CTFd.admin import statistics # noqa: F401,I001\nfrom CTFd.admin import submissions # noqa: F401,I001\nfrom CTFd.admin import teams # noqa: F401,I001\nfrom CTFd.admin import users # noqa: F401,I001\nfrom CTFd.cache import (\n cache,\n clear_challenges,\n clear_config,\n clear_pages,\n clear_standings,\n)\nfrom CTFd.models import (\n Awards,\n Challenges,\n Configs,\n Notifications,\n Pages,\n Solves,\n Submissions,\n Teams,\n Tracking,\n Unlocks,\n Users,\n db,\n)\nfrom CTFd.utils import config as ctf_config\nfrom CTFd.utils import get_app_config, get_config, set_config\nfrom CTFd.utils.csv import dump_csv, load_challenges_csv, load_teams_csv, load_users_csv\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.exports import background_import_ctf\nfrom CTFd.utils.exports import export_ctf as export_ctf_util\nfrom CTFd.utils.security.auth import logout_user\nfrom CTFd.utils.uploads import delete_file\nfrom CTFd.utils.user import is_admin\n\n\[email protected](\"/admin\", methods=[\"GET\"])\ndef view():\n if is_admin():\n return redirect(url_for(\"admin.statistics\"))\n return redirect(url_for(\"auth.login\"))\n\n\[email protected](\"/admin/plugins/<plugin>\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef plugin(plugin):\n if request.method == \"GET\":\n plugins_path = os.path.join(app.root_path, \"plugins\")\n\n config_html_plugins = [\n name\n for name in os.listdir(plugins_path)\n if os.path.isfile(os.path.join(plugins_path, name, \"config.html\"))\n ]\n\n if plugin in config_html_plugins:\n config_html = open(\n os.path.join(app.root_path, \"plugins\", plugin, \"config.html\")\n ).read()\n return render_template_string(config_html)\n abort(404)\n elif request.method == \"POST\":\n for k, v in request.form.items():\n if k == \"nonce\":\n continue\n set_config(k, v)\n with app.app_context():\n clear_config()\n return \"1\"\n\n\[email protected](\"/admin/import\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef import_ctf():\n if request.method == \"GET\":\n start_time = cache.get(\"import_start_time\")\n end_time = cache.get(\"import_end_time\")\n import_status = cache.get(\"import_status\")\n import_error = cache.get(\"import_error\")\n return render_template(\n \"admin/import.html\",\n start_time=start_time,\n end_time=end_time,\n import_status=import_status,\n import_error=import_error,\n )\n elif request.method == \"POST\":\n backup = request.files[\"backup\"]\n background_import_ctf(backup)\n return redirect(url_for(\"admin.import_ctf\"))\n\n\[email protected](\"/admin/export\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef export_ctf():\n backup = export_ctf_util()\n ctf_name = ctf_config.ctf_name()\n day = datetime.datetime.now().strftime(\"%Y-%m-%d_%T\")\n full_name = u\"{}.{}.zip\".format(ctf_name, day)\n return send_file(\n backup, cache_timeout=-1, as_attachment=True, attachment_filename=full_name\n )\n\n\[email protected](\"/admin/import/csv\", methods=[\"POST\"])\n@admins_only\ndef import_csv():\n csv_type = request.form[\"csv_type\"]\n # Try really hard to load data in properly no matter what nonsense Excel gave you\n raw = request.files[\"csv_file\"].stream.read()\n try:\n csvdata = raw.decode(\"utf-8-sig\")\n except UnicodeDecodeError:\n try:\n csvdata = raw.decode(\"cp1252\")\n except UnicodeDecodeError:\n csvdata = raw.decode(\"latin-1\")\n csvfile = StringIO(csvdata)\n\n loaders = {\n \"challenges\": load_challenges_csv,\n \"users\": load_users_csv,\n \"teams\": load_teams_csv,\n }\n\n loader = loaders[csv_type]\n reader = csv.DictReader(csvfile)\n success = loader(reader)\n if success is True:\n return redirect(url_for(\"admin.config\"))\n else:\n return jsonify(success), 500\n\n\[email protected](\"/admin/export/csv\")\n@admins_only\ndef export_csv():\n table = request.args.get(\"table\")\n\n output = dump_csv(name=table)\n\n return send_file(\n output,\n as_attachment=True,\n cache_timeout=-1,\n attachment_filename=\"{name}-{table}.csv\".format(\n name=ctf_config.ctf_name(), table=table\n ),\n )\n\n\[email protected](\"/admin/config\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef config():\n # Clear the config cache so that we don't get stale values\n clear_config()\n\n configs = Configs.query.all()\n configs = {c.key: get_config(c.key) for c in configs}\n\n themes = ctf_config.get_themes()\n\n # Remove current theme but ignore failure\n try:\n themes.remove(get_config(\"ctf_theme\"))\n except ValueError:\n pass\n\n force_html_sanitization = get_app_config(\"HTML_SANITIZATION\")\n\n return render_template(\n \"admin/config.html\",\n themes=themes,\n **configs,\n force_html_sanitization=force_html_sanitization\n )\n\n\[email protected](\"/admin/reset\", methods=[\"GET\", \"POST\"])\n@admins_only\ndef reset():\n if request.method == \"POST\":\n require_setup = False\n logout = False\n next_url = url_for(\"admin.statistics\")\n\n data = request.form\n\n if data.get(\"pages\"):\n _pages = Pages.query.all()\n for p in _pages:\n for f in p.files:\n delete_file(file_id=f.id)\n\n Pages.query.delete()\n\n if data.get(\"notifications\"):\n Notifications.query.delete()\n\n if data.get(\"challenges\"):\n _challenges = Challenges.query.all()\n for c in _challenges:\n for f in c.files:\n delete_file(file_id=f.id)\n Challenges.query.delete()\n\n if data.get(\"accounts\"):\n Users.query.delete()\n Teams.query.delete()\n require_setup = True\n logout = True\n\n if data.get(\"submissions\"):\n Solves.query.delete()\n Submissions.query.delete()\n Awards.query.delete()\n Unlocks.query.delete()\n Tracking.query.delete()\n\n if require_setup:\n set_config(\"setup\", False)\n cache.clear()\n logout_user()\n next_url = url_for(\"views.setup\")\n\n db.session.commit()\n\n clear_pages()\n clear_standings()\n clear_challenges()\n clear_config()\n\n if logout is True:\n cache.clear()\n logout_user()\n\n db.session.close()\n return redirect(next_url)\n\n return render_template(\"admin/reset.html\")\n", "path": "CTFd/admin/__init__.py"}, {"content": "from flask import current_app\n\nfrom CTFd.cache import cache\nfrom CTFd.models import Pages, db\nfrom CTFd.utils import get_config, markdown\nfrom CTFd.utils.dates import isoformat, unix_time_to_utc\nfrom CTFd.utils.formatters import safe_format\nfrom CTFd.utils.security.sanitize import sanitize_html\n\n\ndef format_variables(content):\n ctf_name = get_config(\"ctf_name\")\n ctf_description = get_config(\"ctf_description\")\n ctf_start = get_config(\"start\")\n if ctf_start:\n ctf_start = isoformat(unix_time_to_utc(int(ctf_start)))\n\n ctf_end = get_config(\"end\")\n if ctf_end:\n ctf_end = isoformat(unix_time_to_utc(int(ctf_end)))\n\n ctf_freeze = get_config(\"freeze\")\n if ctf_freeze:\n ctf_freeze = isoformat(unix_time_to_utc(int(ctf_freeze)))\n\n content = safe_format(\n content,\n ctf_name=ctf_name,\n ctf_description=ctf_description,\n ctf_start=ctf_start,\n ctf_end=ctf_end,\n ctf_freeze=ctf_freeze,\n )\n return content\n\n\ndef build_html(html, sanitize=False):\n html = format_variables(html)\n if (\n current_app.config[\"HTML_SANITIZATION\"] is True\n or bool(get_config(\"html_sanitization\")) is True\n or sanitize is True\n ):\n html = sanitize_html(html)\n return html\n\n\ndef build_markdown(md, sanitize=False):\n html = markdown(md)\n html = format_variables(html)\n if (\n current_app.config[\"HTML_SANITIZATION\"] is True\n or bool(get_config(\"html_sanitization\")) is True\n or sanitize is True\n ):\n html = sanitize_html(html)\n return html\n\n\[email protected]()\ndef get_pages():\n db_pages = Pages.query.filter(\n Pages.route != \"index\", Pages.draft.isnot(True), Pages.hidden.isnot(True)\n ).all()\n return db_pages\n\n\[email protected]()\ndef get_page(route):\n page = db.session.execute(\n Pages.__table__.select()\n .where(Pages.route == route)\n .where(Pages.draft.isnot(True))\n ).fetchone()\n if page:\n # Convert the row into a transient ORM object so this change isn't commited accidentally\n p = Pages(**page)\n return p\n return None\n", "path": "CTFd/utils/config/pages.py"}]}
3,316
509
gh_patches_debug_33737
rasdani/github-patches
git_diff
kserve__kserve-1720
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Alibi explainer example times out /kind bug **What steps did you take and what happened:** I am rolling out the Alibi Explainer example (as explained here https://github.com/kubeflow/kfserving/tree/v0.6.0/docs/samples/explanation/alibi/imagenet), although I have had to slightly adapt the InferenceService spec (see https://github.com/kubeflow/kfserving/issues/1707) as follows: ``` apiVersion: "serving.kubeflow.org/v1beta1" kind: "InferenceService" metadata: name: imagenet namespace: karl-schriek spec: predictor: tensorflow: storageUri: "gs://seldon-models/tfserving/imagenet/model" resources: requests: cpu: 0.1 memory: 5Gi limits: memory: 10Gi explainer: alibi: type: AnchorImages storageUri: "gs://seldon-models/tfserving/imagenet/explainer" config: batch_size: "1" # reduced from 25 in attempt to prevent timeout stop_on_first: "True" min_samples_start: "1" # added in attempt to prevent timeout resources: requests: cpu: 0.5 memory: 5Gi limits: memory: 10Gi ``` Predictor and explainer both roll out successfully. Requesting predictions work fine. Requesting an explaination on a single image results in an eventual timeout. The explainer Pod reports the following: ``` [I 210709 11:10:54 anchor_images:47] Calling explain on image of shape ((1, 299, 299, 3),) [I 210709 11:10:54 anchor_images:48] anchor image call with {'batch_size': 1, 'stop_on_first': True} ``` After about 20 minutes it eventually also says the following: ``` skimage.measure.label's indexing starts from 0. In future version it will start from 1. To disable this warning, explicitely set the `start_label` parameter to 1. [E 210709 11:30:11 web:1793] Uncaught exception POST /v1/models/imagenet:explain (127.0.0.1) HTTPServerRequest(protocol='http', host='imagenet-explainer-default.karl-schriek.svc.cluster.local', method='POST', uri='/v1/models/imagenet:explain', version='HTTP/1.1', remote_ip='127.0.0.1') Traceback (most recent call last): File "/usr/local/lib/python3.7/site-packages/tornado/web.py", line 1704, in _execute result = await result File "/kfserving/kfserving/handlers/http.py", line 105, in post response = await model(body, model_type=ModelType.EXPLAINER) File "/kfserving/kfserving/kfmodel.py", line 58, in __call__ else self.explain(request) File "/alibiexplainer/alibiexplainer/explainer.py", line 84, in explain explanation = self.wrapper.explain(request["instances"]) File "/alibiexplainer/alibiexplainer/anchor_images.py", line 49, in explain anchor_exp = self.anchors_image.explain(arr[0], **self.kwargs) File "/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_image.py", line 409, in explain return self.build_explanation(image, result, self.instance_label, params) File "/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_image.py", line 447, in build_explanation explanation = Explanation(meta=copy.deepcopy(self.meta), data=data) AttributeError: 'AnchorImage' object has no attribute 'meta' [E 210709 11:30:11 web:2243] 500 POST /v1/models/imagenet:explain (127.0.0.1) 1157265.30ms ``` The documentation states that the explanation may take a while, but surely it shouldn't time out with just a single image? This looks like a timeout, but I am not really sure if the `AttributeError: 'AnchorImage' object has no attribute 'meta'` suggests otherwise. **What did you expect to happen:** I would expect the request to succeed within a reasonable space of time. - Istio Version: 1.10.2 - Knative Version: 0.24 - KFServing Version: 0.6.0 - Kubeflow version: 1.3+ - Kubernetes version: 1.20 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/alibiexplainer/setup.py` Content: ``` 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from setuptools import setup, find_packages 16 17 tests_require = [ 18 'pytest', 19 'pytest-tornasync', 20 'mypy' 21 ] 22 23 setup( 24 name='alibiexplainer', 25 version='0.6.0', 26 author_email='[email protected]', 27 license='../../LICENSE.txt', 28 url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer', 29 description='Model Explaination Server. \ 30 Not intended for use outside KFServing Frameworks Images', 31 long_description=open('README.md').read(), 32 python_requires='>=3.6', 33 packages=find_packages("alibiexplainer"), 34 install_requires=[ 35 "tensorflow==2.3.2", 36 "kfserving>=0.6.0", 37 "pandas>=0.24.2", 38 "nest_asyncio>=1.4.0", 39 "alibi==0.5.5", 40 "scikit-learn == 0.20.3", 41 "argparse>=1.4.0", 42 "requests>=2.22.0", 43 "joblib>=0.13.2", 44 "dill>=0.3.0", 45 "grpcio>=1.22.0", 46 "xgboost==1.0.2", 47 "shap==0.36.0", 48 "numpy<1.19.0" 49 ], 50 tests_require=tests_require, 51 extras_require={'test': tests_require} 52 ) 53 ``` Path: `docs/samples/explanation/alibi/imagenet/train_explainer.py` Content: ``` 1 from tensorflow.keras.applications.inception_v3 import InceptionV3 2 from alibi.explainers import AnchorImage 3 import dill 4 5 model = InceptionV3(weights='imagenet') 6 7 segmentation_fn = 'slic' 8 kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5} 9 image_shape = (299, 299, 3) 10 explainer = AnchorImage(lambda x: model.predict(x), image_shape, segmentation_fn=segmentation_fn, 11 segmentation_kwargs=kwargs, 12 images_background=None) 13 14 15 explainer.predict_fn = None # Clear explainer predict_fn as its a lambda and will be reset when loaded 16 with open("explainer.dill", 'wb') as f: 17 dill.dump(explainer, f) 18 ``` Path: `python/alibiexplainer/alibiexplainer/anchor_text.py` Content: ``` 1 # Copyright 2019 kubeflow.org. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 import kfserving 15 import logging 16 import numpy as np 17 import spacy 18 import alibi 19 from alibi.api.interfaces import Explanation 20 from alibi.utils.download import spacy_model 21 from alibi.utils.wrappers import ArgmaxTransformer 22 from alibiexplainer.explainer_wrapper import ExplainerWrapper 23 from typing import Callable, List, Optional 24 25 logging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL) 26 27 28 class AnchorText(ExplainerWrapper): 29 def __init__( 30 self, 31 predict_fn: Callable, 32 explainer: Optional[alibi.explainers.AnchorText], 33 spacy_language_model: str = "en_core_web_md", 34 **kwargs 35 ): 36 self.predict_fn = predict_fn 37 self.kwargs = kwargs 38 logging.info("Anchor Text args %s", self.kwargs) 39 if explainer is None: 40 logging.info("Loading Spacy Language model for %s", spacy_language_model) 41 spacy_model(model=spacy_language_model) 42 self.nlp = spacy.load(spacy_language_model) 43 logging.info("Language model loaded") 44 self.anchors_text = explainer 45 46 def explain(self, inputs: List) -> Explanation: 47 if self.anchors_text is None: 48 self.anchors_text = alibi.explainers.AnchorText(self.nlp, self.predict_fn) 49 50 # We assume the input has batch dimension but Alibi explainers presently assume no batch 51 input_words = inputs[0] 52 53 # check if predictor returns predicted class or prediction probabilities for each class 54 # if needed adjust predictor so it returns the predicted class 55 if np.argmax(self.predict_fn([input_words]).shape) == 0: 56 self.anchors_text.predictor = self.predict_fn 57 else: 58 self.anchors_text.predictor = ArgmaxTransformer(self.predict_fn) 59 60 anchor_exp = self.anchors_text.explain(input_words, **self.kwargs) 61 return anchor_exp 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/samples/explanation/alibi/imagenet/train_explainer.py b/docs/samples/explanation/alibi/imagenet/train_explainer.py deleted file mode 100644 --- a/docs/samples/explanation/alibi/imagenet/train_explainer.py +++ /dev/null @@ -1,17 +0,0 @@ -from tensorflow.keras.applications.inception_v3 import InceptionV3 -from alibi.explainers import AnchorImage -import dill - -model = InceptionV3(weights='imagenet') - -segmentation_fn = 'slic' -kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5} -image_shape = (299, 299, 3) -explainer = AnchorImage(lambda x: model.predict(x), image_shape, segmentation_fn=segmentation_fn, - segmentation_kwargs=kwargs, - images_background=None) - - -explainer.predict_fn = None # Clear explainer predict_fn as its a lambda and will be reset when loaded -with open("explainer.dill", 'wb') as f: - dill.dump(explainer, f) diff --git a/python/alibiexplainer/alibiexplainer/anchor_text.py b/python/alibiexplainer/alibiexplainer/anchor_text.py --- a/python/alibiexplainer/alibiexplainer/anchor_text.py +++ b/python/alibiexplainer/alibiexplainer/anchor_text.py @@ -45,7 +45,9 @@ def explain(self, inputs: List) -> Explanation: if self.anchors_text is None: - self.anchors_text = alibi.explainers.AnchorText(self.nlp, self.predict_fn) + self.anchors_text = alibi.explainers.AnchorText(predictor=self.predict_fn, + sampling_strategy='unknown', + nlp=self.nlp) # We assume the input has batch dimension but Alibi explainers presently assume no batch input_words = inputs[0] diff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py --- a/python/alibiexplainer/setup.py +++ b/python/alibiexplainer/setup.py @@ -36,7 +36,7 @@ "kfserving>=0.6.0", "pandas>=0.24.2", "nest_asyncio>=1.4.0", - "alibi==0.5.5", + "alibi==0.6.0", "scikit-learn == 0.20.3", "argparse>=1.4.0", "requests>=2.22.0", @@ -44,8 +44,9 @@ "dill>=0.3.0", "grpcio>=1.22.0", "xgboost==1.0.2", - "shap==0.36.0", - "numpy<1.19.0" + "shap==0.39.0", + "numpy<1.19.0", + 'spacy[lookups]>=2.0.0, <4.0.0' ], tests_require=tests_require, extras_require={'test': tests_require}
{"golden_diff": "diff --git a/docs/samples/explanation/alibi/imagenet/train_explainer.py b/docs/samples/explanation/alibi/imagenet/train_explainer.py\ndeleted file mode 100644\n--- a/docs/samples/explanation/alibi/imagenet/train_explainer.py\n+++ /dev/null\n@@ -1,17 +0,0 @@\n-from tensorflow.keras.applications.inception_v3 import InceptionV3\n-from alibi.explainers import AnchorImage\n-import dill\n-\n-model = InceptionV3(weights='imagenet')\n-\n-segmentation_fn = 'slic'\n-kwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}\n-image_shape = (299, 299, 3)\n-explainer = AnchorImage(lambda x: model.predict(x), image_shape, segmentation_fn=segmentation_fn,\n- segmentation_kwargs=kwargs,\n- images_background=None)\n-\n-\n-explainer.predict_fn = None # Clear explainer predict_fn as its a lambda and will be reset when loaded\n-with open(\"explainer.dill\", 'wb') as f:\n- dill.dump(explainer, f)\ndiff --git a/python/alibiexplainer/alibiexplainer/anchor_text.py b/python/alibiexplainer/alibiexplainer/anchor_text.py\n--- a/python/alibiexplainer/alibiexplainer/anchor_text.py\n+++ b/python/alibiexplainer/alibiexplainer/anchor_text.py\n@@ -45,7 +45,9 @@\n \n def explain(self, inputs: List) -> Explanation:\n if self.anchors_text is None:\n- self.anchors_text = alibi.explainers.AnchorText(self.nlp, self.predict_fn)\n+ self.anchors_text = alibi.explainers.AnchorText(predictor=self.predict_fn,\n+ sampling_strategy='unknown',\n+ nlp=self.nlp)\n \n # We assume the input has batch dimension but Alibi explainers presently assume no batch\n input_words = inputs[0]\ndiff --git a/python/alibiexplainer/setup.py b/python/alibiexplainer/setup.py\n--- a/python/alibiexplainer/setup.py\n+++ b/python/alibiexplainer/setup.py\n@@ -36,7 +36,7 @@\n \"kfserving>=0.6.0\",\n \"pandas>=0.24.2\",\n \"nest_asyncio>=1.4.0\",\n- \"alibi==0.5.5\",\n+ \"alibi==0.6.0\",\n \"scikit-learn == 0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n@@ -44,8 +44,9 @@\n \"dill>=0.3.0\",\n \"grpcio>=1.22.0\",\n \"xgboost==1.0.2\",\n- \"shap==0.36.0\",\n- \"numpy<1.19.0\"\n+ \"shap==0.39.0\",\n+ \"numpy<1.19.0\",\n+ 'spacy[lookups]>=2.0.0, <4.0.0'\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n", "issue": "Alibi explainer example times out\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\n\r\nI am rolling out the Alibi Explainer example (as explained here https://github.com/kubeflow/kfserving/tree/v0.6.0/docs/samples/explanation/alibi/imagenet), although I have had to slightly adapt the InferenceService spec (see https://github.com/kubeflow/kfserving/issues/1707) as follows:\r\n\r\n```\r\napiVersion: \"serving.kubeflow.org/v1beta1\"\r\nkind: \"InferenceService\"\r\nmetadata:\r\n name: imagenet\r\n namespace: karl-schriek\r\nspec:\r\n predictor:\r\n tensorflow:\r\n storageUri: \"gs://seldon-models/tfserving/imagenet/model\"\r\n resources:\r\n requests:\r\n cpu: 0.1\r\n memory: 5Gi \r\n limits:\r\n memory: 10Gi\r\n explainer:\r\n alibi:\r\n type: AnchorImages\r\n storageUri: \"gs://seldon-models/tfserving/imagenet/explainer\"\r\n config:\r\n batch_size: \"1\" # reduced from 25 in attempt to prevent timeout\r\n stop_on_first: \"True\"\r\n min_samples_start: \"1\" # added in attempt to prevent timeout\r\n resources:\r\n requests:\r\n cpu: 0.5\r\n memory: 5Gi \r\n limits:\r\n memory: 10Gi \r\n```\r\n\r\nPredictor and explainer both roll out successfully. Requesting predictions work fine. Requesting an explaination on a single image results in an eventual timeout. The explainer Pod reports the following:\r\n\r\n```\r\n[I 210709 11:10:54 anchor_images:47] Calling explain on image of shape ((1, 299, 299, 3),)\r\n[I 210709 11:10:54 anchor_images:48] anchor image call with {'batch_size': 1, 'stop_on_first': True}\r\n```\r\n\r\nAfter about 20 minutes it eventually also says the following:\r\n\r\n```\r\nskimage.measure.label's indexing starts from 0. In future version it will start from 1. To disable this warning, explicitely set the `start_label` parameter to 1.\r\n[E 210709 11:30:11 web:1793] Uncaught exception POST /v1/models/imagenet:explain (127.0.0.1)\r\n HTTPServerRequest(protocol='http', host='imagenet-explainer-default.karl-schriek.svc.cluster.local', method='POST', uri='/v1/models/imagenet:explain', version='HTTP/1.1', remote_ip='127.0.0.1')\r\n Traceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/tornado/web.py\", line 1704, in _execute\r\n result = await result\r\n File \"/kfserving/kfserving/handlers/http.py\", line 105, in post\r\n response = await model(body, model_type=ModelType.EXPLAINER)\r\n File \"/kfserving/kfserving/kfmodel.py\", line 58, in __call__\r\n else self.explain(request)\r\n File \"/alibiexplainer/alibiexplainer/explainer.py\", line 84, in explain\r\n explanation = self.wrapper.explain(request[\"instances\"])\r\n File \"/alibiexplainer/alibiexplainer/anchor_images.py\", line 49, in explain\r\n anchor_exp = self.anchors_image.explain(arr[0], **self.kwargs)\r\n File \"/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_image.py\", line 409, in explain\r\n return self.build_explanation(image, result, self.instance_label, params)\r\n File \"/usr/local/lib/python3.7/site-packages/alibi/explainers/anchor_image.py\", line 447, in build_explanation\r\n explanation = Explanation(meta=copy.deepcopy(self.meta), data=data)\r\n AttributeError: 'AnchorImage' object has no attribute 'meta'\r\n[E 210709 11:30:11 web:2243] 500 POST /v1/models/imagenet:explain (127.0.0.1) 1157265.30ms\r\n```\r\n\r\nThe documentation states that the explanation may take a while, but surely it shouldn't time out with just a single image? This looks like a timeout, but I am not really sure if the `AttributeError: 'AnchorImage' object has no attribute 'meta'` suggests otherwise.\r\n\r\n**What did you expect to happen:**\r\n\r\nI would expect the request to succeed within a reasonable space of time.\r\n\r\n\r\n- Istio Version: 1.10.2\r\n- Knative Version: 0.24\r\n- KFServing Version: 0.6.0\r\n- Kubeflow version: 1.3+\r\n- Kubernetes version: 1.20\r\n\n", "before_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='alibiexplainer',\n version='0.6.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',\n description='Model Explaination Server. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"tensorflow==2.3.2\",\n \"kfserving>=0.6.0\",\n \"pandas>=0.24.2\",\n \"nest_asyncio>=1.4.0\",\n \"alibi==0.5.5\",\n \"scikit-learn == 0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"dill>=0.3.0\",\n \"grpcio>=1.22.0\",\n \"xgboost==1.0.2\",\n \"shap==0.36.0\",\n \"numpy<1.19.0\"\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/alibiexplainer/setup.py"}, {"content": "from tensorflow.keras.applications.inception_v3 import InceptionV3\nfrom alibi.explainers import AnchorImage\nimport dill\n\nmodel = InceptionV3(weights='imagenet')\n\nsegmentation_fn = 'slic'\nkwargs = {'n_segments': 15, 'compactness': 20, 'sigma': .5}\nimage_shape = (299, 299, 3)\nexplainer = AnchorImage(lambda x: model.predict(x), image_shape, segmentation_fn=segmentation_fn,\n segmentation_kwargs=kwargs,\n images_background=None)\n\n\nexplainer.predict_fn = None # Clear explainer predict_fn as its a lambda and will be reset when loaded\nwith open(\"explainer.dill\", 'wb') as f:\n dill.dump(explainer, f)\n", "path": "docs/samples/explanation/alibi/imagenet/train_explainer.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport kfserving\nimport logging\nimport numpy as np\nimport spacy\nimport alibi\nfrom alibi.api.interfaces import Explanation\nfrom alibi.utils.download import spacy_model\nfrom alibi.utils.wrappers import ArgmaxTransformer\nfrom alibiexplainer.explainer_wrapper import ExplainerWrapper\nfrom typing import Callable, List, Optional\n\nlogging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)\n\n\nclass AnchorText(ExplainerWrapper):\n def __init__(\n self,\n predict_fn: Callable,\n explainer: Optional[alibi.explainers.AnchorText],\n spacy_language_model: str = \"en_core_web_md\",\n **kwargs\n ):\n self.predict_fn = predict_fn\n self.kwargs = kwargs\n logging.info(\"Anchor Text args %s\", self.kwargs)\n if explainer is None:\n logging.info(\"Loading Spacy Language model for %s\", spacy_language_model)\n spacy_model(model=spacy_language_model)\n self.nlp = spacy.load(spacy_language_model)\n logging.info(\"Language model loaded\")\n self.anchors_text = explainer\n\n def explain(self, inputs: List) -> Explanation:\n if self.anchors_text is None:\n self.anchors_text = alibi.explainers.AnchorText(self.nlp, self.predict_fn)\n\n # We assume the input has batch dimension but Alibi explainers presently assume no batch\n input_words = inputs[0]\n\n # check if predictor returns predicted class or prediction probabilities for each class\n # if needed adjust predictor so it returns the predicted class\n if np.argmax(self.predict_fn([input_words]).shape) == 0:\n self.anchors_text.predictor = self.predict_fn\n else:\n self.anchors_text.predictor = ArgmaxTransformer(self.predict_fn)\n\n anchor_exp = self.anchors_text.explain(input_words, **self.kwargs)\n return anchor_exp\n", "path": "python/alibiexplainer/alibiexplainer/anchor_text.py"}], "after_files": [{"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom setuptools import setup, find_packages\n\ntests_require = [\n 'pytest',\n 'pytest-tornasync',\n 'mypy'\n]\n\nsetup(\n name='alibiexplainer',\n version='0.6.0',\n author_email='[email protected]',\n license='../../LICENSE.txt',\n url='https://github.com/kubeflow/kfserving/python/kfserving/alibiexplainer',\n description='Model Explaination Server. \\\n Not intended for use outside KFServing Frameworks Images',\n long_description=open('README.md').read(),\n python_requires='>=3.6',\n packages=find_packages(\"alibiexplainer\"),\n install_requires=[\n \"tensorflow==2.3.2\",\n \"kfserving>=0.6.0\",\n \"pandas>=0.24.2\",\n \"nest_asyncio>=1.4.0\",\n \"alibi==0.6.0\",\n \"scikit-learn == 0.20.3\",\n \"argparse>=1.4.0\",\n \"requests>=2.22.0\",\n \"joblib>=0.13.2\",\n \"dill>=0.3.0\",\n \"grpcio>=1.22.0\",\n \"xgboost==1.0.2\",\n \"shap==0.39.0\",\n \"numpy<1.19.0\",\n 'spacy[lookups]>=2.0.0, <4.0.0'\n ],\n tests_require=tests_require,\n extras_require={'test': tests_require}\n)\n", "path": "python/alibiexplainer/setup.py"}, {"content": null, "path": "docs/samples/explanation/alibi/imagenet/train_explainer.py"}, {"content": "# Copyright 2019 kubeflow.org.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport kfserving\nimport logging\nimport numpy as np\nimport spacy\nimport alibi\nfrom alibi.api.interfaces import Explanation\nfrom alibi.utils.download import spacy_model\nfrom alibi.utils.wrappers import ArgmaxTransformer\nfrom alibiexplainer.explainer_wrapper import ExplainerWrapper\nfrom typing import Callable, List, Optional\n\nlogging.basicConfig(level=kfserving.constants.KFSERVING_LOGLEVEL)\n\n\nclass AnchorText(ExplainerWrapper):\n def __init__(\n self,\n predict_fn: Callable,\n explainer: Optional[alibi.explainers.AnchorText],\n spacy_language_model: str = \"en_core_web_md\",\n **kwargs\n ):\n self.predict_fn = predict_fn\n self.kwargs = kwargs\n logging.info(\"Anchor Text args %s\", self.kwargs)\n if explainer is None:\n logging.info(\"Loading Spacy Language model for %s\", spacy_language_model)\n spacy_model(model=spacy_language_model)\n self.nlp = spacy.load(spacy_language_model)\n logging.info(\"Language model loaded\")\n self.anchors_text = explainer\n\n def explain(self, inputs: List) -> Explanation:\n if self.anchors_text is None:\n self.anchors_text = alibi.explainers.AnchorText(predictor=self.predict_fn,\n sampling_strategy='unknown',\n nlp=self.nlp)\n\n # We assume the input has batch dimension but Alibi explainers presently assume no batch\n input_words = inputs[0]\n\n # check if predictor returns predicted class or prediction probabilities for each class\n # if needed adjust predictor so it returns the predicted class\n if np.argmax(self.predict_fn([input_words]).shape) == 0:\n self.anchors_text.predictor = self.predict_fn\n else:\n self.anchors_text.predictor = ArgmaxTransformer(self.predict_fn)\n\n anchor_exp = self.anchors_text.explain(input_words, **self.kwargs)\n return anchor_exp\n", "path": "python/alibiexplainer/alibiexplainer/anchor_text.py"}]}
2,843
730
gh_patches_debug_32755
rasdani/github-patches
git_diff
pytorch__vision-7701
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- datapoints cannot be deepcopied ```py from torchvision.prototype import features from copy import copy, deepcopy import torch image = features.Image(torch.rand(3, 16, 16)) image_copy = copy(image) # works image_deepcopy = deepcopy(image) # fails ``` ``` RuntimeError: The default implementation of __deepcopy__() for non-wrapper subclasses only works for subclass types that implement new_empty() and for which that function returns another instance of the same subclass. You should either properly implement new_empty() for your subclass or override __deepcopy__() if it is intended behavior for new_empty() to return an instance of a different type. ``` The error message is giving us two options: 1. Override [`new_empty`](https://pytorch.org/docs/stable/generated/torch.Tensor.new_empty.html): according to the error message, this is preferred since we want to return an instance of the same type. Some of our features have internal guards for the shape like https://github.com/pytorch/vision/blob/b4b246a52ebbe29bff9099c6fbabff0b82d7edd3/torchvision/prototype/features/_image.py#L49-L50 and the plan is to expand this to all features, since the transformations rely on a certain shape. However, you can call `new_empty(size)` with an arbitrary size and the default implementation of `__deepcopy__` does so with an [empty list `[]`](https://github.com/pytorch/pytorch/blob/432c508e71111f9d5382322e0e6b1bc1c66bf0ec/torch/_tensor.py#L181). Later on, [the correct shape of the copied tensor is set](https://github.com/pytorch/pytorch/blob/432c508e71111f9d5382322e0e6b1bc1c66bf0ec/torch/_tensor.py#L191-L193), but we still need to be able to create the tensor with an incorrect shape. We could left-pad the `size` with zeros to get to the correct number of dimensions in case it is needed. 2. Override `__deepcopy__`: Maybe I'm to naive here, but a deepcopy should just be a `.clone()` of the tensor as well as a deepcopy of attributes where it is needed, e.g. `Label.categories`. Not sure if there is a downside, but that seems easier than 1. while achieving the same goal. cc @bjuncek --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchvision/datapoints/_datapoint.py` Content: ``` 1 from __future__ import annotations 2 3 from types import ModuleType 4 from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union 5 6 import PIL.Image 7 import torch 8 from torch._C import DisableTorchFunctionSubclass 9 from torch.types import _device, _dtype, _size 10 from torchvision.transforms import InterpolationMode 11 12 13 D = TypeVar("D", bound="Datapoint") 14 _FillType = Union[int, float, Sequence[int], Sequence[float], None] 15 _FillTypeJIT = Optional[List[float]] 16 17 18 class Datapoint(torch.Tensor): 19 __F: Optional[ModuleType] = None 20 21 @staticmethod 22 def _to_tensor( 23 data: Any, 24 dtype: Optional[torch.dtype] = None, 25 device: Optional[Union[torch.device, str, int]] = None, 26 requires_grad: Optional[bool] = None, 27 ) -> torch.Tensor: 28 if requires_grad is None: 29 requires_grad = data.requires_grad if isinstance(data, torch.Tensor) else False 30 return torch.as_tensor(data, dtype=dtype, device=device).requires_grad_(requires_grad) 31 32 @classmethod 33 def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D: 34 raise NotImplementedError 35 36 _NO_WRAPPING_EXCEPTIONS = { 37 torch.Tensor.clone: lambda cls, input, output: cls.wrap_like(input, output), 38 torch.Tensor.to: lambda cls, input, output: cls.wrap_like(input, output), 39 # We don't need to wrap the output of `Tensor.requires_grad_`, since it is an inplace operation and thus 40 # retains the type automatically 41 torch.Tensor.requires_grad_: lambda cls, input, output: output, 42 } 43 44 @classmethod 45 def __torch_function__( 46 cls, 47 func: Callable[..., torch.Tensor], 48 types: Tuple[Type[torch.Tensor], ...], 49 args: Sequence[Any] = (), 50 kwargs: Optional[Mapping[str, Any]] = None, 51 ) -> torch.Tensor: 52 """For general information about how the __torch_function__ protocol works, 53 see https://pytorch.org/docs/stable/notes/extending.html#extending-torch 54 55 TL;DR: Every time a PyTorch operator is called, it goes through the inputs and looks for the 56 ``__torch_function__`` method. If one is found, it is invoked with the operator as ``func`` as well as the 57 ``args`` and ``kwargs`` of the original call. 58 59 The default behavior of :class:`~torch.Tensor`'s is to retain a custom tensor type. For the :class:`Datapoint` 60 use case, this has two downsides: 61 62 1. Since some :class:`Datapoint`'s require metadata to be constructed, the default wrapping, i.e. 63 ``return cls(func(*args, **kwargs))``, will fail for them. 64 2. For most operations, there is no way of knowing if the input type is still valid for the output. 65 66 For these reasons, the automatic output wrapping is turned off for most operators. The only exceptions are 67 listed in :attr:`Datapoint._NO_WRAPPING_EXCEPTIONS` 68 """ 69 # Since super().__torch_function__ has no hook to prevent the coercing of the output into the input type, we 70 # need to reimplement the functionality. 71 72 if not all(issubclass(cls, t) for t in types): 73 return NotImplemented 74 75 with DisableTorchFunctionSubclass(): 76 output = func(*args, **kwargs or dict()) 77 78 wrapper = cls._NO_WRAPPING_EXCEPTIONS.get(func) 79 # Apart from `func` needing to be an exception, we also require the primary operand, i.e. `args[0]`, to be 80 # an instance of the class that `__torch_function__` was invoked on. The __torch_function__ protocol will 81 # invoke this method on *all* types involved in the computation by walking the MRO upwards. For example, 82 # `torch.Tensor(...).to(datapoints.Image(...))` will invoke `datapoints.Image.__torch_function__` with 83 # `args = (torch.Tensor(), datapoints.Image())` first. Without this guard, the original `torch.Tensor` would 84 # be wrapped into a `datapoints.Image`. 85 if wrapper and isinstance(args[0], cls): 86 return wrapper(cls, args[0], output) 87 88 # Inplace `func`'s, canonically identified with a trailing underscore in their name like `.add_(...)`, 89 # will retain the input type. Thus, we need to unwrap here. 90 if isinstance(output, cls): 91 return output.as_subclass(torch.Tensor) 92 93 return output 94 95 def _make_repr(self, **kwargs: Any) -> str: 96 # This is a poor man's implementation of the proposal in https://github.com/pytorch/pytorch/issues/76532. 97 # If that ever gets implemented, remove this in favor of the solution on the `torch.Tensor` class. 98 extra_repr = ", ".join(f"{key}={value}" for key, value in kwargs.items()) 99 return f"{super().__repr__()[:-1]}, {extra_repr})" 100 101 @property 102 def _F(self) -> ModuleType: 103 # This implements a lazy import of the functional to get around the cyclic import. This import is deferred 104 # until the first time we need reference to the functional module and it's shared across all instances of 105 # the class. This approach avoids the DataLoader issue described at 106 # https://github.com/pytorch/vision/pull/6476#discussion_r953588621 107 if Datapoint.__F is None: 108 from ..transforms.v2 import functional 109 110 Datapoint.__F = functional 111 return Datapoint.__F 112 113 # Add properties for common attributes like shape, dtype, device, ndim etc 114 # this way we return the result without passing into __torch_function__ 115 @property 116 def shape(self) -> _size: # type: ignore[override] 117 with DisableTorchFunctionSubclass(): 118 return super().shape 119 120 @property 121 def ndim(self) -> int: # type: ignore[override] 122 with DisableTorchFunctionSubclass(): 123 return super().ndim 124 125 @property 126 def device(self, *args: Any, **kwargs: Any) -> _device: # type: ignore[override] 127 with DisableTorchFunctionSubclass(): 128 return super().device 129 130 @property 131 def dtype(self) -> _dtype: # type: ignore[override] 132 with DisableTorchFunctionSubclass(): 133 return super().dtype 134 135 def horizontal_flip(self) -> Datapoint: 136 return self 137 138 def vertical_flip(self) -> Datapoint: 139 return self 140 141 # TODO: We have to ignore override mypy error as there is torch.Tensor built-in deprecated op: Tensor.resize 142 # https://github.com/pytorch/pytorch/blob/e8727994eb7cdb2ab642749d6549bc497563aa06/torch/_tensor.py#L588-L593 143 def resize( # type: ignore[override] 144 self, 145 size: List[int], 146 interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, 147 max_size: Optional[int] = None, 148 antialias: Optional[Union[str, bool]] = "warn", 149 ) -> Datapoint: 150 return self 151 152 def crop(self, top: int, left: int, height: int, width: int) -> Datapoint: 153 return self 154 155 def center_crop(self, output_size: List[int]) -> Datapoint: 156 return self 157 158 def resized_crop( 159 self, 160 top: int, 161 left: int, 162 height: int, 163 width: int, 164 size: List[int], 165 interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, 166 antialias: Optional[Union[str, bool]] = "warn", 167 ) -> Datapoint: 168 return self 169 170 def pad( 171 self, 172 padding: List[int], 173 fill: Optional[Union[int, float, List[float]]] = None, 174 padding_mode: str = "constant", 175 ) -> Datapoint: 176 return self 177 178 def rotate( 179 self, 180 angle: float, 181 interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, 182 expand: bool = False, 183 center: Optional[List[float]] = None, 184 fill: _FillTypeJIT = None, 185 ) -> Datapoint: 186 return self 187 188 def affine( 189 self, 190 angle: Union[int, float], 191 translate: List[float], 192 scale: float, 193 shear: List[float], 194 interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST, 195 fill: _FillTypeJIT = None, 196 center: Optional[List[float]] = None, 197 ) -> Datapoint: 198 return self 199 200 def perspective( 201 self, 202 startpoints: Optional[List[List[int]]], 203 endpoints: Optional[List[List[int]]], 204 interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, 205 fill: _FillTypeJIT = None, 206 coefficients: Optional[List[float]] = None, 207 ) -> Datapoint: 208 return self 209 210 def elastic( 211 self, 212 displacement: torch.Tensor, 213 interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR, 214 fill: _FillTypeJIT = None, 215 ) -> Datapoint: 216 return self 217 218 def rgb_to_grayscale(self, num_output_channels: int = 1) -> Datapoint: 219 return self 220 221 def adjust_brightness(self, brightness_factor: float) -> Datapoint: 222 return self 223 224 def adjust_saturation(self, saturation_factor: float) -> Datapoint: 225 return self 226 227 def adjust_contrast(self, contrast_factor: float) -> Datapoint: 228 return self 229 230 def adjust_sharpness(self, sharpness_factor: float) -> Datapoint: 231 return self 232 233 def adjust_hue(self, hue_factor: float) -> Datapoint: 234 return self 235 236 def adjust_gamma(self, gamma: float, gain: float = 1) -> Datapoint: 237 return self 238 239 def posterize(self, bits: int) -> Datapoint: 240 return self 241 242 def solarize(self, threshold: float) -> Datapoint: 243 return self 244 245 def autocontrast(self) -> Datapoint: 246 return self 247 248 def equalize(self) -> Datapoint: 249 return self 250 251 def invert(self) -> Datapoint: 252 return self 253 254 def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Datapoint: 255 return self 256 257 258 _InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint] 259 _InputTypeJIT = torch.Tensor 260 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchvision/datapoints/_datapoint.py b/torchvision/datapoints/_datapoint.py --- a/torchvision/datapoints/_datapoint.py +++ b/torchvision/datapoints/_datapoint.py @@ -1,7 +1,7 @@ from __future__ import annotations from types import ModuleType -from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union +from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union import PIL.Image import torch @@ -36,6 +36,7 @@ _NO_WRAPPING_EXCEPTIONS = { torch.Tensor.clone: lambda cls, input, output: cls.wrap_like(input, output), torch.Tensor.to: lambda cls, input, output: cls.wrap_like(input, output), + torch.Tensor.detach: lambda cls, input, output: cls.wrap_like(input, output), # We don't need to wrap the output of `Tensor.requires_grad_`, since it is an inplace operation and thus # retains the type automatically torch.Tensor.requires_grad_: lambda cls, input, output: output, @@ -132,6 +133,15 @@ with DisableTorchFunctionSubclass(): return super().dtype + def __deepcopy__(self: D, memo: Dict[int, Any]) -> D: + # We need to detach first, since a plain `Tensor.clone` will be part of the computation graph, which does + # *not* happen for `deepcopy(Tensor)`. A side-effect from detaching is that the `Tensor.requires_grad` + # attribute is cleared, so we need to refill it before we return. + # Note: We don't explicitly handle deep-copying of the metadata here. The only metadata we currently have is + # `BoundingBox.format` and `BoundingBox.spatial_size`, which are immutable and thus implicitly deep-copied by + # `BoundingBox.clone()`. + return self.detach().clone().requires_grad_(self.requires_grad) # type: ignore[return-value] + def horizontal_flip(self) -> Datapoint: return self
{"golden_diff": "diff --git a/torchvision/datapoints/_datapoint.py b/torchvision/datapoints/_datapoint.py\n--- a/torchvision/datapoints/_datapoint.py\n+++ b/torchvision/datapoints/_datapoint.py\n@@ -1,7 +1,7 @@\n from __future__ import annotations\n \n from types import ModuleType\n-from typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n+from typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n \n import PIL.Image\n import torch\n@@ -36,6 +36,7 @@\n _NO_WRAPPING_EXCEPTIONS = {\n torch.Tensor.clone: lambda cls, input, output: cls.wrap_like(input, output),\n torch.Tensor.to: lambda cls, input, output: cls.wrap_like(input, output),\n+ torch.Tensor.detach: lambda cls, input, output: cls.wrap_like(input, output),\n # We don't need to wrap the output of `Tensor.requires_grad_`, since it is an inplace operation and thus\n # retains the type automatically\n torch.Tensor.requires_grad_: lambda cls, input, output: output,\n@@ -132,6 +133,15 @@\n with DisableTorchFunctionSubclass():\n return super().dtype\n \n+ def __deepcopy__(self: D, memo: Dict[int, Any]) -> D:\n+ # We need to detach first, since a plain `Tensor.clone` will be part of the computation graph, which does\n+ # *not* happen for `deepcopy(Tensor)`. A side-effect from detaching is that the `Tensor.requires_grad`\n+ # attribute is cleared, so we need to refill it before we return.\n+ # Note: We don't explicitly handle deep-copying of the metadata here. The only metadata we currently have is\n+ # `BoundingBox.format` and `BoundingBox.spatial_size`, which are immutable and thus implicitly deep-copied by\n+ # `BoundingBox.clone()`.\n+ return self.detach().clone().requires_grad_(self.requires_grad) # type: ignore[return-value]\n+\n def horizontal_flip(self) -> Datapoint:\n return self\n", "issue": "datapoints cannot be deepcopied\n```py\r\nfrom torchvision.prototype import features\r\nfrom copy import copy, deepcopy\r\nimport torch\r\n\r\nimage = features.Image(torch.rand(3, 16, 16))\r\nimage_copy = copy(image) # works\r\nimage_deepcopy = deepcopy(image) # fails\r\n```\r\n\r\n```\r\nRuntimeError: The default implementation of __deepcopy__() for non-wrapper subclasses only works for subclass types \r\nthat implement new_empty() and for which that function returns another instance of the same subclass. \r\nYou should either properly implement new_empty() for your subclass or override __deepcopy__() if it is intended \r\nbehavior for new_empty() to return an instance of a different type.\r\n```\r\n\r\nThe error message is giving us two options:\r\n\r\n1. Override [`new_empty`](https://pytorch.org/docs/stable/generated/torch.Tensor.new_empty.html): according to the error message, this is preferred since we want to return an instance of the same type. Some of our features have internal guards for the shape like \r\n\r\n https://github.com/pytorch/vision/blob/b4b246a52ebbe29bff9099c6fbabff0b82d7edd3/torchvision/prototype/features/_image.py#L49-L50\r\n\r\n and the plan is to expand this to all features, since the transformations rely on a certain shape. However, you can call `new_empty(size)` with an arbitrary size and the default implementation of `__deepcopy__` does so with an [empty list `[]`](https://github.com/pytorch/pytorch/blob/432c508e71111f9d5382322e0e6b1bc1c66bf0ec/torch/_tensor.py#L181). Later on, [the correct shape of the copied tensor is set](https://github.com/pytorch/pytorch/blob/432c508e71111f9d5382322e0e6b1bc1c66bf0ec/torch/_tensor.py#L191-L193), but we still need to be able to create the tensor with an incorrect shape. We could left-pad the `size` with zeros to get to the correct number of dimensions in case it is needed.\r\n\r\n2. Override `__deepcopy__`: Maybe I'm to naive here, but a deepcopy should just be a `.clone()` of the tensor as well as a deepcopy of attributes where it is needed, e.g. `Label.categories`. Not sure if there is a downside, but that seems easier than 1. while achieving the same goal.\n\ncc @bjuncek\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom types import ModuleType\nfrom typing import Any, Callable, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n\nimport PIL.Image\nimport torch\nfrom torch._C import DisableTorchFunctionSubclass\nfrom torch.types import _device, _dtype, _size\nfrom torchvision.transforms import InterpolationMode\n\n\nD = TypeVar(\"D\", bound=\"Datapoint\")\n_FillType = Union[int, float, Sequence[int], Sequence[float], None]\n_FillTypeJIT = Optional[List[float]]\n\n\nclass Datapoint(torch.Tensor):\n __F: Optional[ModuleType] = None\n\n @staticmethod\n def _to_tensor(\n data: Any,\n dtype: Optional[torch.dtype] = None,\n device: Optional[Union[torch.device, str, int]] = None,\n requires_grad: Optional[bool] = None,\n ) -> torch.Tensor:\n if requires_grad is None:\n requires_grad = data.requires_grad if isinstance(data, torch.Tensor) else False\n return torch.as_tensor(data, dtype=dtype, device=device).requires_grad_(requires_grad)\n\n @classmethod\n def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:\n raise NotImplementedError\n\n _NO_WRAPPING_EXCEPTIONS = {\n torch.Tensor.clone: lambda cls, input, output: cls.wrap_like(input, output),\n torch.Tensor.to: lambda cls, input, output: cls.wrap_like(input, output),\n # We don't need to wrap the output of `Tensor.requires_grad_`, since it is an inplace operation and thus\n # retains the type automatically\n torch.Tensor.requires_grad_: lambda cls, input, output: output,\n }\n\n @classmethod\n def __torch_function__(\n cls,\n func: Callable[..., torch.Tensor],\n types: Tuple[Type[torch.Tensor], ...],\n args: Sequence[Any] = (),\n kwargs: Optional[Mapping[str, Any]] = None,\n ) -> torch.Tensor:\n \"\"\"For general information about how the __torch_function__ protocol works,\n see https://pytorch.org/docs/stable/notes/extending.html#extending-torch\n\n TL;DR: Every time a PyTorch operator is called, it goes through the inputs and looks for the\n ``__torch_function__`` method. If one is found, it is invoked with the operator as ``func`` as well as the\n ``args`` and ``kwargs`` of the original call.\n\n The default behavior of :class:`~torch.Tensor`'s is to retain a custom tensor type. For the :class:`Datapoint`\n use case, this has two downsides:\n\n 1. Since some :class:`Datapoint`'s require metadata to be constructed, the default wrapping, i.e.\n ``return cls(func(*args, **kwargs))``, will fail for them.\n 2. For most operations, there is no way of knowing if the input type is still valid for the output.\n\n For these reasons, the automatic output wrapping is turned off for most operators. The only exceptions are\n listed in :attr:`Datapoint._NO_WRAPPING_EXCEPTIONS`\n \"\"\"\n # Since super().__torch_function__ has no hook to prevent the coercing of the output into the input type, we\n # need to reimplement the functionality.\n\n if not all(issubclass(cls, t) for t in types):\n return NotImplemented\n\n with DisableTorchFunctionSubclass():\n output = func(*args, **kwargs or dict())\n\n wrapper = cls._NO_WRAPPING_EXCEPTIONS.get(func)\n # Apart from `func` needing to be an exception, we also require the primary operand, i.e. `args[0]`, to be\n # an instance of the class that `__torch_function__` was invoked on. The __torch_function__ protocol will\n # invoke this method on *all* types involved in the computation by walking the MRO upwards. For example,\n # `torch.Tensor(...).to(datapoints.Image(...))` will invoke `datapoints.Image.__torch_function__` with\n # `args = (torch.Tensor(), datapoints.Image())` first. Without this guard, the original `torch.Tensor` would\n # be wrapped into a `datapoints.Image`.\n if wrapper and isinstance(args[0], cls):\n return wrapper(cls, args[0], output)\n\n # Inplace `func`'s, canonically identified with a trailing underscore in their name like `.add_(...)`,\n # will retain the input type. Thus, we need to unwrap here.\n if isinstance(output, cls):\n return output.as_subclass(torch.Tensor)\n\n return output\n\n def _make_repr(self, **kwargs: Any) -> str:\n # This is a poor man's implementation of the proposal in https://github.com/pytorch/pytorch/issues/76532.\n # If that ever gets implemented, remove this in favor of the solution on the `torch.Tensor` class.\n extra_repr = \", \".join(f\"{key}={value}\" for key, value in kwargs.items())\n return f\"{super().__repr__()[:-1]}, {extra_repr})\"\n\n @property\n def _F(self) -> ModuleType:\n # This implements a lazy import of the functional to get around the cyclic import. This import is deferred\n # until the first time we need reference to the functional module and it's shared across all instances of\n # the class. This approach avoids the DataLoader issue described at\n # https://github.com/pytorch/vision/pull/6476#discussion_r953588621\n if Datapoint.__F is None:\n from ..transforms.v2 import functional\n\n Datapoint.__F = functional\n return Datapoint.__F\n\n # Add properties for common attributes like shape, dtype, device, ndim etc\n # this way we return the result without passing into __torch_function__\n @property\n def shape(self) -> _size: # type: ignore[override]\n with DisableTorchFunctionSubclass():\n return super().shape\n\n @property\n def ndim(self) -> int: # type: ignore[override]\n with DisableTorchFunctionSubclass():\n return super().ndim\n\n @property\n def device(self, *args: Any, **kwargs: Any) -> _device: # type: ignore[override]\n with DisableTorchFunctionSubclass():\n return super().device\n\n @property\n def dtype(self) -> _dtype: # type: ignore[override]\n with DisableTorchFunctionSubclass():\n return super().dtype\n\n def horizontal_flip(self) -> Datapoint:\n return self\n\n def vertical_flip(self) -> Datapoint:\n return self\n\n # TODO: We have to ignore override mypy error as there is torch.Tensor built-in deprecated op: Tensor.resize\n # https://github.com/pytorch/pytorch/blob/e8727994eb7cdb2ab642749d6549bc497563aa06/torch/_tensor.py#L588-L593\n def resize( # type: ignore[override]\n self,\n size: List[int],\n interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,\n max_size: Optional[int] = None,\n antialias: Optional[Union[str, bool]] = \"warn\",\n ) -> Datapoint:\n return self\n\n def crop(self, top: int, left: int, height: int, width: int) -> Datapoint:\n return self\n\n def center_crop(self, output_size: List[int]) -> Datapoint:\n return self\n\n def resized_crop(\n self,\n top: int,\n left: int,\n height: int,\n width: int,\n size: List[int],\n interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,\n antialias: Optional[Union[str, bool]] = \"warn\",\n ) -> Datapoint:\n return self\n\n def pad(\n self,\n padding: List[int],\n fill: Optional[Union[int, float, List[float]]] = None,\n padding_mode: str = \"constant\",\n ) -> Datapoint:\n return self\n\n def rotate(\n self,\n angle: float,\n interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,\n expand: bool = False,\n center: Optional[List[float]] = None,\n fill: _FillTypeJIT = None,\n ) -> Datapoint:\n return self\n\n def affine(\n self,\n angle: Union[int, float],\n translate: List[float],\n scale: float,\n shear: List[float],\n interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,\n fill: _FillTypeJIT = None,\n center: Optional[List[float]] = None,\n ) -> Datapoint:\n return self\n\n def perspective(\n self,\n startpoints: Optional[List[List[int]]],\n endpoints: Optional[List[List[int]]],\n interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,\n fill: _FillTypeJIT = None,\n coefficients: Optional[List[float]] = None,\n ) -> Datapoint:\n return self\n\n def elastic(\n self,\n displacement: torch.Tensor,\n interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,\n fill: _FillTypeJIT = None,\n ) -> Datapoint:\n return self\n\n def rgb_to_grayscale(self, num_output_channels: int = 1) -> Datapoint:\n return self\n\n def adjust_brightness(self, brightness_factor: float) -> Datapoint:\n return self\n\n def adjust_saturation(self, saturation_factor: float) -> Datapoint:\n return self\n\n def adjust_contrast(self, contrast_factor: float) -> Datapoint:\n return self\n\n def adjust_sharpness(self, sharpness_factor: float) -> Datapoint:\n return self\n\n def adjust_hue(self, hue_factor: float) -> Datapoint:\n return self\n\n def adjust_gamma(self, gamma: float, gain: float = 1) -> Datapoint:\n return self\n\n def posterize(self, bits: int) -> Datapoint:\n return self\n\n def solarize(self, threshold: float) -> Datapoint:\n return self\n\n def autocontrast(self) -> Datapoint:\n return self\n\n def equalize(self) -> Datapoint:\n return self\n\n def invert(self) -> Datapoint:\n return self\n\n def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Datapoint:\n return self\n\n\n_InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint]\n_InputTypeJIT = torch.Tensor\n", "path": "torchvision/datapoints/_datapoint.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom types import ModuleType\nfrom typing import Any, Callable, Dict, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union\n\nimport PIL.Image\nimport torch\nfrom torch._C import DisableTorchFunctionSubclass\nfrom torch.types import _device, _dtype, _size\nfrom torchvision.transforms import InterpolationMode\n\n\nD = TypeVar(\"D\", bound=\"Datapoint\")\n_FillType = Union[int, float, Sequence[int], Sequence[float], None]\n_FillTypeJIT = Optional[List[float]]\n\n\nclass Datapoint(torch.Tensor):\n __F: Optional[ModuleType] = None\n\n @staticmethod\n def _to_tensor(\n data: Any,\n dtype: Optional[torch.dtype] = None,\n device: Optional[Union[torch.device, str, int]] = None,\n requires_grad: Optional[bool] = None,\n ) -> torch.Tensor:\n if requires_grad is None:\n requires_grad = data.requires_grad if isinstance(data, torch.Tensor) else False\n return torch.as_tensor(data, dtype=dtype, device=device).requires_grad_(requires_grad)\n\n @classmethod\n def wrap_like(cls: Type[D], other: D, tensor: torch.Tensor) -> D:\n raise NotImplementedError\n\n _NO_WRAPPING_EXCEPTIONS = {\n torch.Tensor.clone: lambda cls, input, output: cls.wrap_like(input, output),\n torch.Tensor.to: lambda cls, input, output: cls.wrap_like(input, output),\n torch.Tensor.detach: lambda cls, input, output: cls.wrap_like(input, output),\n # We don't need to wrap the output of `Tensor.requires_grad_`, since it is an inplace operation and thus\n # retains the type automatically\n torch.Tensor.requires_grad_: lambda cls, input, output: output,\n }\n\n @classmethod\n def __torch_function__(\n cls,\n func: Callable[..., torch.Tensor],\n types: Tuple[Type[torch.Tensor], ...],\n args: Sequence[Any] = (),\n kwargs: Optional[Mapping[str, Any]] = None,\n ) -> torch.Tensor:\n \"\"\"For general information about how the __torch_function__ protocol works,\n see https://pytorch.org/docs/stable/notes/extending.html#extending-torch\n\n TL;DR: Every time a PyTorch operator is called, it goes through the inputs and looks for the\n ``__torch_function__`` method. If one is found, it is invoked with the operator as ``func`` as well as the\n ``args`` and ``kwargs`` of the original call.\n\n The default behavior of :class:`~torch.Tensor`'s is to retain a custom tensor type. For the :class:`Datapoint`\n use case, this has two downsides:\n\n 1. Since some :class:`Datapoint`'s require metadata to be constructed, the default wrapping, i.e.\n ``return cls(func(*args, **kwargs))``, will fail for them.\n 2. For most operations, there is no way of knowing if the input type is still valid for the output.\n\n For these reasons, the automatic output wrapping is turned off for most operators. The only exceptions are\n listed in :attr:`Datapoint._NO_WRAPPING_EXCEPTIONS`\n \"\"\"\n # Since super().__torch_function__ has no hook to prevent the coercing of the output into the input type, we\n # need to reimplement the functionality.\n\n if not all(issubclass(cls, t) for t in types):\n return NotImplemented\n\n with DisableTorchFunctionSubclass():\n output = func(*args, **kwargs or dict())\n\n wrapper = cls._NO_WRAPPING_EXCEPTIONS.get(func)\n # Apart from `func` needing to be an exception, we also require the primary operand, i.e. `args[0]`, to be\n # an instance of the class that `__torch_function__` was invoked on. The __torch_function__ protocol will\n # invoke this method on *all* types involved in the computation by walking the MRO upwards. For example,\n # `torch.Tensor(...).to(datapoints.Image(...))` will invoke `datapoints.Image.__torch_function__` with\n # `args = (torch.Tensor(), datapoints.Image())` first. Without this guard, the original `torch.Tensor` would\n # be wrapped into a `datapoints.Image`.\n if wrapper and isinstance(args[0], cls):\n return wrapper(cls, args[0], output)\n\n # Inplace `func`'s, canonically identified with a trailing underscore in their name like `.add_(...)`,\n # will retain the input type. Thus, we need to unwrap here.\n if isinstance(output, cls):\n return output.as_subclass(torch.Tensor)\n\n return output\n\n def _make_repr(self, **kwargs: Any) -> str:\n # This is a poor man's implementation of the proposal in https://github.com/pytorch/pytorch/issues/76532.\n # If that ever gets implemented, remove this in favor of the solution on the `torch.Tensor` class.\n extra_repr = \", \".join(f\"{key}={value}\" for key, value in kwargs.items())\n return f\"{super().__repr__()[:-1]}, {extra_repr})\"\n\n @property\n def _F(self) -> ModuleType:\n # This implements a lazy import of the functional to get around the cyclic import. This import is deferred\n # until the first time we need reference to the functional module and it's shared across all instances of\n # the class. This approach avoids the DataLoader issue described at\n # https://github.com/pytorch/vision/pull/6476#discussion_r953588621\n if Datapoint.__F is None:\n from ..transforms.v2 import functional\n\n Datapoint.__F = functional\n return Datapoint.__F\n\n # Add properties for common attributes like shape, dtype, device, ndim etc\n # this way we return the result without passing into __torch_function__\n @property\n def shape(self) -> _size: # type: ignore[override]\n with DisableTorchFunctionSubclass():\n return super().shape\n\n @property\n def ndim(self) -> int: # type: ignore[override]\n with DisableTorchFunctionSubclass():\n return super().ndim\n\n @property\n def device(self, *args: Any, **kwargs: Any) -> _device: # type: ignore[override]\n with DisableTorchFunctionSubclass():\n return super().device\n\n @property\n def dtype(self) -> _dtype: # type: ignore[override]\n with DisableTorchFunctionSubclass():\n return super().dtype\n\n def __deepcopy__(self: D, memo: Dict[int, Any]) -> D:\n # We need to detach first, since a plain `Tensor.clone` will be part of the computation graph, which does\n # *not* happen for `deepcopy(Tensor)`. A side-effect from detaching is that the `Tensor.requires_grad`\n # attribute is cleared, so we need to refill it before we return.\n # Note: We don't explicitly handle deep-copying of the metadata here. The only metadata we currently have is\n # `BoundingBox.format` and `BoundingBox.spatial_size`, which are immutable and thus implicitly deep-copied by\n # `BoundingBox.clone()`.\n return self.detach().clone().requires_grad_(self.requires_grad) # type: ignore[return-value]\n\n def horizontal_flip(self) -> Datapoint:\n return self\n\n def vertical_flip(self) -> Datapoint:\n return self\n\n # TODO: We have to ignore override mypy error as there is torch.Tensor built-in deprecated op: Tensor.resize\n # https://github.com/pytorch/pytorch/blob/e8727994eb7cdb2ab642749d6549bc497563aa06/torch/_tensor.py#L588-L593\n def resize( # type: ignore[override]\n self,\n size: List[int],\n interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,\n max_size: Optional[int] = None,\n antialias: Optional[Union[str, bool]] = \"warn\",\n ) -> Datapoint:\n return self\n\n def crop(self, top: int, left: int, height: int, width: int) -> Datapoint:\n return self\n\n def center_crop(self, output_size: List[int]) -> Datapoint:\n return self\n\n def resized_crop(\n self,\n top: int,\n left: int,\n height: int,\n width: int,\n size: List[int],\n interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,\n antialias: Optional[Union[str, bool]] = \"warn\",\n ) -> Datapoint:\n return self\n\n def pad(\n self,\n padding: List[int],\n fill: Optional[Union[int, float, List[float]]] = None,\n padding_mode: str = \"constant\",\n ) -> Datapoint:\n return self\n\n def rotate(\n self,\n angle: float,\n interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,\n expand: bool = False,\n center: Optional[List[float]] = None,\n fill: _FillTypeJIT = None,\n ) -> Datapoint:\n return self\n\n def affine(\n self,\n angle: Union[int, float],\n translate: List[float],\n scale: float,\n shear: List[float],\n interpolation: Union[InterpolationMode, int] = InterpolationMode.NEAREST,\n fill: _FillTypeJIT = None,\n center: Optional[List[float]] = None,\n ) -> Datapoint:\n return self\n\n def perspective(\n self,\n startpoints: Optional[List[List[int]]],\n endpoints: Optional[List[List[int]]],\n interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,\n fill: _FillTypeJIT = None,\n coefficients: Optional[List[float]] = None,\n ) -> Datapoint:\n return self\n\n def elastic(\n self,\n displacement: torch.Tensor,\n interpolation: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,\n fill: _FillTypeJIT = None,\n ) -> Datapoint:\n return self\n\n def rgb_to_grayscale(self, num_output_channels: int = 1) -> Datapoint:\n return self\n\n def adjust_brightness(self, brightness_factor: float) -> Datapoint:\n return self\n\n def adjust_saturation(self, saturation_factor: float) -> Datapoint:\n return self\n\n def adjust_contrast(self, contrast_factor: float) -> Datapoint:\n return self\n\n def adjust_sharpness(self, sharpness_factor: float) -> Datapoint:\n return self\n\n def adjust_hue(self, hue_factor: float) -> Datapoint:\n return self\n\n def adjust_gamma(self, gamma: float, gain: float = 1) -> Datapoint:\n return self\n\n def posterize(self, bits: int) -> Datapoint:\n return self\n\n def solarize(self, threshold: float) -> Datapoint:\n return self\n\n def autocontrast(self) -> Datapoint:\n return self\n\n def equalize(self) -> Datapoint:\n return self\n\n def invert(self) -> Datapoint:\n return self\n\n def gaussian_blur(self, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Datapoint:\n return self\n\n\n_InputType = Union[torch.Tensor, PIL.Image.Image, Datapoint]\n_InputTypeJIT = torch.Tensor\n", "path": "torchvision/datapoints/_datapoint.py"}]}
3,946
483
gh_patches_debug_15438
rasdani/github-patches
git_diff
mozilla__bugbug-124
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use bug reporter as a feature --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bugbug/models/tracking.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import xgboost 7 from sklearn.compose import ColumnTransformer 8 from sklearn.feature_extraction import DictVectorizer 9 from sklearn.pipeline import Pipeline 10 11 from bugbug import bug_features 12 from bugbug import bugzilla 13 from bugbug.model import Model 14 15 16 class TrackingModel(Model): 17 def __init__(self, lemmatization=False): 18 Model.__init__(self, lemmatization) 19 20 feature_extractors = [ 21 bug_features.has_str(), 22 bug_features.has_regression_range(), 23 bug_features.severity(), 24 bug_features.keywords(), 25 bug_features.is_coverity_issue(), 26 bug_features.has_crash_signature(), 27 bug_features.has_url(), 28 bug_features.has_w3c_url(), 29 bug_features.has_github_url(), 30 bug_features.whiteboard(), 31 bug_features.patches(), 32 bug_features.landings(), 33 bug_features.title(), 34 bug_features.priority(), 35 ] 36 37 cleanup_functions = [ 38 bug_features.cleanup_fileref, 39 bug_features.cleanup_url, 40 bug_features.cleanup_synonyms, 41 ] 42 43 self.extraction_pipeline = Pipeline([ 44 ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)), 45 ('union', ColumnTransformer([ 46 ('data', DictVectorizer(), 'data'), 47 48 ('title', self.text_vectorizer(stop_words='english'), 'title'), 49 50 ('comments', self.text_vectorizer(stop_words='english'), 'comments'), 51 ])), 52 ]) 53 54 self.clf = xgboost.XGBClassifier(n_jobs=16) 55 self.clf.set_params(predictor='cpu_predictor') 56 57 def rollback(self, change): 58 return change['field_name'].startswith('cf_tracking_firefox') 59 60 def get_labels(self): 61 classes = {} 62 63 for bug_data in bugzilla.get_bugs(): 64 bug_id = int(bug_data['id']) 65 66 for entry in bug_data['history']: 67 for change in entry['changes']: 68 if change['field_name'].startswith('cf_tracking_firefox'): 69 if change['added'] in ['blocking', '+']: 70 classes[bug_id] = 1 71 elif change['added'] == '-': 72 classes[bug_id] = 0 73 74 if bug_id not in classes: 75 classes[bug_id] = 0 76 77 return classes 78 79 def get_feature_names(self): 80 return self.extraction_pipeline.named_steps['union'].get_feature_names() 81 ``` Path: `bugbug/bug_features.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # This Source Code Form is subject to the terms of the Mozilla Public 3 # License, v. 2.0. If a copy of the MPL was not distributed with this file, 4 # You can obtain one at http://mozilla.org/MPL/2.0/. 5 6 import re 7 from datetime import datetime 8 from datetime import timezone 9 10 import pandas as pd 11 from libmozdata import versions 12 from sklearn.base import BaseEstimator 13 from sklearn.base import TransformerMixin 14 15 from bugbug import bug_snapshot 16 from bugbug import repository 17 18 19 def field(bug, field): 20 if field in bug and bug[field] != '---': 21 return bug[field] 22 23 return None 24 25 26 class has_str(object): 27 def __call__(self, bug): 28 return field(bug, 'cf_has_str') 29 30 31 class has_regression_range(object): 32 def __call__(self, bug): 33 return field(bug, 'cf_has_regression_range') 34 35 36 class has_crash_signature(object): 37 def __call__(self, bug): 38 return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != '' 39 40 41 class keywords(object): 42 def __init__(self, to_ignore=set()): 43 self.to_ignore = to_ignore 44 45 def __call__(self, bug): 46 keywords = [] 47 subkeywords = [] 48 for keyword in bug['keywords']: 49 if keyword in self.to_ignore: 50 continue 51 52 keywords.append(keyword) 53 54 if keyword.startswith('sec-'): 55 subkeywords.append('sec-') 56 elif keyword.startswith('csectype-'): 57 subkeywords.append('csectype-') 58 return keywords + subkeywords 59 60 61 class severity(object): 62 def __call__(self, bug): 63 return field(bug, 'severity') 64 65 66 class is_coverity_issue(object): 67 def __call__(self, bug): 68 return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None 69 70 71 class has_url(object): 72 def __call__(self, bug): 73 return bug['url'] != '' 74 75 76 class has_w3c_url(object): 77 def __call__(self, bug): 78 return 'w3c' in bug['url'] 79 80 81 class has_github_url(object): 82 def __call__(self, bug): 83 return 'github' in bug['url'] 84 85 86 class whiteboard(object): 87 def __call__(self, bug): 88 89 # Split by '[' 90 paren_splits = bug['whiteboard'].lower().split('[') 91 92 # Split splits by space if they weren't in [ and ]. 93 splits = [] 94 for paren_split in paren_splits: 95 if ']' in paren_split: 96 paren_split = paren_split.split(']') 97 splits += paren_split 98 else: 99 splits += paren_split.split(' ') 100 101 # Remove empty splits and strip 102 splits = [split.strip() for split in splits if split.strip() != ''] 103 104 # For splits which contain ':', return both the whole string and the string before ':'. 105 splits += [split.split(':', 1)[0] for split in splits if ':' in split] 106 107 return splits 108 109 110 class patches(object): 111 def __call__(self, bug): 112 return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request']) 113 114 115 class landings(object): 116 def __call__(self, bug): 117 return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text']) 118 119 120 class title(object): 121 def __call__(self, bug): 122 ret = [] 123 124 keywords = [ 125 'fail', 126 ] 127 for keyword in keywords: 128 if keyword in bug['summary'].lower(): 129 ret.append(keyword) 130 131 return ret 132 133 134 class product(object): 135 def __call__(self, bug): 136 return bug['product'] 137 138 139 class component(object): 140 def __call__(self, bug): 141 return bug['component'] 142 143 144 class is_mozillian(object): 145 def __call__(self, bug): 146 return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org']) 147 148 149 class delta_request_merge(object): 150 def __call__(self, bug): 151 for history in bug['history']: 152 for change in history['changes']: 153 if change['added'].startswith('approval-mozilla'): 154 uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc) 155 timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime 156 return timedelta.days + timedelta.seconds / (24 * 60 * 60) 157 158 return None 159 160 161 class commit_added(object): 162 def __call__(self, bug): 163 return sum(commit['added'] for commit in bug['commits']) 164 165 166 class commit_deleted(object): 167 def __call__(self, bug): 168 return sum(commit['deleted'] for commit in bug['commits']) 169 170 171 class commit_types(object): 172 def __call__(self, bug): 173 return sum((commit['types'] for commit in bug['commits']), []) 174 175 176 class blocked_bugs_number(object): 177 def __call__(self, bug): 178 return len(bug['blocks']) 179 180 181 class priority(object): 182 def __call__(self, bug): 183 return bug['priority'] 184 185 186 class bug_has_cve_in_alias(object): 187 def __call__(self, bug): 188 return bug['alias'] is not None and 'CVE' in bug['alias'] 189 190 191 class commit_files_modified_num(object): 192 def __call__(self, bug): 193 return sum(commit['files_modified_num'] for commit in bug['commits']) 194 195 196 class comment_count(object): 197 def __call__(self, bug): 198 return field(bug, 'comment_count') 199 200 201 class comment_length(object): 202 def __call__(self, bug): 203 return sum(len(x['text']) for x in bug['comments']) 204 205 206 def cleanup_url(text): 207 text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\S+', '__CODE_REFERENCE_URL__', text) 208 return re.sub(r'http\S+', '__URL__', text) 209 210 211 def cleanup_fileref(text): 212 return re.sub(r'\w+\.py\b|\w+\.json\b|\w+\.js\b|\w+\.jsm\b|\w+\.html\b|\w+\.css\b|\w+\.c\b|\w+\.cpp\b|\w+\.h\b', '__FILE_REFERENCE__', text) 213 214 215 def cleanup_responses(text): 216 return re.sub('>[^\n]+', ' ', text) 217 218 219 def cleanup_hex(text): 220 return re.sub(r'\b0[xX][0-9a-fA-F]+\b', '__HEX_NUMBER__', text) 221 222 223 def cleanup_dll(text): 224 return re.sub(r'\w+(\.dll|\.so|\.dylib)\b', '__DLL_NAME__', text) 225 226 227 def cleanup_synonyms(text): 228 synonyms = [ 229 ('safemode', ['safemode', 'safe mode']), 230 ('str', ['str', 'steps to reproduce', 'repro steps']), 231 ('uaf', ['uaf', 'use after free', 'use-after-free']), 232 ('asan', ['asan', 'address sanitizer', 'addresssanitizer']), 233 ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']), 234 ('spec', ['spec', 'specification']), 235 ] 236 237 for synonym_group, synonym_list in synonyms: 238 text = re.sub('|'.join(fr'\b{synonym}\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE) 239 240 return text 241 242 243 def cleanup_crash(text): 244 return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\b', '__CRASH_STATS_LINK__', text) 245 246 247 class BugExtractor(BaseEstimator, TransformerMixin): 248 def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False): 249 self.feature_extractors = feature_extractors 250 self.cleanup_functions = cleanup_functions 251 self.rollback = rollback 252 self.rollback_when = rollback_when 253 self.commit_map = repository.get_commit_map() if commit_data else None 254 255 def fit(self, x, y=None): 256 return self 257 258 def transform(self, bugs): 259 results = [] 260 261 for bug in bugs: 262 bug_id = bug['id'] 263 264 if self.rollback: 265 bug = bug_snapshot.rollback(bug, self.rollback_when) 266 267 data = {} 268 269 if self.commit_map is not None: 270 if bug_id in self.commit_map: 271 bug['commits'] = self.commit_map[bug_id] 272 else: 273 bug['commits'] = [] 274 275 for f in self.feature_extractors: 276 res = f(bug) 277 278 if res is None: 279 continue 280 281 if isinstance(res, list): 282 for item in res: 283 data[f.__class__.__name__ + '-' + item] = 'True' 284 continue 285 286 if isinstance(res, bool): 287 res = str(res) 288 289 data[f.__class__.__name__] = res 290 291 # TODO: Try simply using all possible fields instead of extracting features manually. 292 293 for cleanup_function in self.cleanup_functions: 294 bug['summary'] = cleanup_function(bug['summary']) 295 for c in bug['comments']: 296 c['text'] = cleanup_function(c['text']) 297 298 result = { 299 'data': data, 300 'title': bug['summary'], 301 'first_comment': bug['comments'][0]['text'], 302 'comments': ' '.join([c['text'] for c in bug['comments']]), 303 } 304 305 results.append(result) 306 307 return pd.DataFrame(results) 308 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py --- a/bugbug/bug_features.py +++ b/bugbug/bug_features.py @@ -146,6 +146,11 @@ return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org']) +class bug_reporter(object): + def __call__(self, bug): + return bug['creator_detail']['email'] + + class delta_request_merge(object): def __call__(self, bug): for history in bug['history']: diff --git a/bugbug/models/tracking.py b/bugbug/models/tracking.py --- a/bugbug/models/tracking.py +++ b/bugbug/models/tracking.py @@ -32,6 +32,7 @@ bug_features.landings(), bug_features.title(), bug_features.priority(), + bug_features.bug_reporter() ] cleanup_functions = [
{"golden_diff": "diff --git a/bugbug/bug_features.py b/bugbug/bug_features.py\n--- a/bugbug/bug_features.py\n+++ b/bugbug/bug_features.py\n@@ -146,6 +146,11 @@\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n \n \n+class bug_reporter(object):\n+ def __call__(self, bug):\n+ return bug['creator_detail']['email']\n+\n+\n class delta_request_merge(object):\n def __call__(self, bug):\n for history in bug['history']:\ndiff --git a/bugbug/models/tracking.py b/bugbug/models/tracking.py\n--- a/bugbug/models/tracking.py\n+++ b/bugbug/models/tracking.py\n@@ -32,6 +32,7 @@\n bug_features.landings(),\n bug_features.title(),\n bug_features.priority(),\n+ bug_features.bug_reporter()\n ]\n \n cleanup_functions = [\n", "issue": "Use bug reporter as a feature\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug.model import Model\n\n\nclass TrackingModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.priority(),\n ]\n\n cleanup_functions = [\n bug_features.cleanup_fileref,\n bug_features.cleanup_url,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(stop_words='english'), 'title'),\n\n ('comments', self.text_vectorizer(stop_words='english'), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def rollback(self, change):\n return change['field_name'].startswith('cf_tracking_firefox')\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data['id'])\n\n for entry in bug_data['history']:\n for change in entry['changes']:\n if change['field_name'].startswith('cf_tracking_firefox'):\n if change['added'] in ['blocking', '+']:\n classes[bug_id] = 1\n elif change['added'] == '-':\n classes[bug_id] = 0\n\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n", "path": "bugbug/models/tracking.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\nfrom datetime import datetime\nfrom datetime import timezone\n\nimport pandas as pd\nfrom libmozdata import versions\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\nfrom bugbug import repository\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n\n # Split by '['\n paren_splits = bug['whiteboard'].lower().split('[')\n\n # Split splits by space if they weren't in [ and ].\n splits = []\n for paren_split in paren_splits:\n if ']' in paren_split:\n paren_split = paren_split.split(']')\n splits += paren_split\n else:\n splits += paren_split.split(' ')\n\n # Remove empty splits and strip\n splits = [split.strip() for split in splits if split.strip() != '']\n\n # For splits which contain ':', return both the whole string and the string before ':'.\n splits += [split.split(':', 1)[0] for split in splits if ':' in split]\n\n return splits\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\nclass is_mozillian(object):\n def __call__(self, bug):\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n\n\nclass delta_request_merge(object):\n def __call__(self, bug):\n for history in bug['history']:\n for change in history['changes']:\n if change['added'].startswith('approval-mozilla'):\n uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)\n timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime\n return timedelta.days + timedelta.seconds / (24 * 60 * 60)\n\n return None\n\n\nclass commit_added(object):\n def __call__(self, bug):\n return sum(commit['added'] for commit in bug['commits'])\n\n\nclass commit_deleted(object):\n def __call__(self, bug):\n return sum(commit['deleted'] for commit in bug['commits'])\n\n\nclass commit_types(object):\n def __call__(self, bug):\n return sum((commit['types'] for commit in bug['commits']), [])\n\n\nclass blocked_bugs_number(object):\n def __call__(self, bug):\n return len(bug['blocks'])\n\n\nclass priority(object):\n def __call__(self, bug):\n return bug['priority']\n\n\nclass bug_has_cve_in_alias(object):\n def __call__(self, bug):\n return bug['alias'] is not None and 'CVE' in bug['alias']\n\n\nclass commit_files_modified_num(object):\n def __call__(self, bug):\n return sum(commit['files_modified_num'] for commit in bug['commits'])\n\n\nclass comment_count(object):\n def __call__(self, bug):\n return field(bug, 'comment_count')\n\n\nclass comment_length(object):\n def __call__(self, bug):\n return sum(len(x['text']) for x in bug['comments'])\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+(\\.dll|\\.so|\\.dylib)\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ('spec', ['spec', 'specification']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(fr'\\b{synonym}\\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_map = repository.get_commit_map() if commit_data else None\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n if self.commit_map is not None:\n if bug_id in self.commit_map:\n bug['commits'] = self.commit_map[bug_id]\n else:\n bug['commits'] = []\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport xgboost\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.pipeline import Pipeline\n\nfrom bugbug import bug_features\nfrom bugbug import bugzilla\nfrom bugbug.model import Model\n\n\nclass TrackingModel(Model):\n def __init__(self, lemmatization=False):\n Model.__init__(self, lemmatization)\n\n feature_extractors = [\n bug_features.has_str(),\n bug_features.has_regression_range(),\n bug_features.severity(),\n bug_features.keywords(),\n bug_features.is_coverity_issue(),\n bug_features.has_crash_signature(),\n bug_features.has_url(),\n bug_features.has_w3c_url(),\n bug_features.has_github_url(),\n bug_features.whiteboard(),\n bug_features.patches(),\n bug_features.landings(),\n bug_features.title(),\n bug_features.priority(),\n bug_features.bug_reporter()\n ]\n\n cleanup_functions = [\n bug_features.cleanup_fileref,\n bug_features.cleanup_url,\n bug_features.cleanup_synonyms,\n ]\n\n self.extraction_pipeline = Pipeline([\n ('bug_extractor', bug_features.BugExtractor(feature_extractors, cleanup_functions, rollback=True, rollback_when=self.rollback)),\n ('union', ColumnTransformer([\n ('data', DictVectorizer(), 'data'),\n\n ('title', self.text_vectorizer(stop_words='english'), 'title'),\n\n ('comments', self.text_vectorizer(stop_words='english'), 'comments'),\n ])),\n ])\n\n self.clf = xgboost.XGBClassifier(n_jobs=16)\n self.clf.set_params(predictor='cpu_predictor')\n\n def rollback(self, change):\n return change['field_name'].startswith('cf_tracking_firefox')\n\n def get_labels(self):\n classes = {}\n\n for bug_data in bugzilla.get_bugs():\n bug_id = int(bug_data['id'])\n\n for entry in bug_data['history']:\n for change in entry['changes']:\n if change['field_name'].startswith('cf_tracking_firefox'):\n if change['added'] in ['blocking', '+']:\n classes[bug_id] = 1\n elif change['added'] == '-':\n classes[bug_id] = 0\n\n if bug_id not in classes:\n classes[bug_id] = 0\n\n return classes\n\n def get_feature_names(self):\n return self.extraction_pipeline.named_steps['union'].get_feature_names()\n", "path": "bugbug/models/tracking.py"}, {"content": "# -*- coding: utf-8 -*-\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n\nimport re\nfrom datetime import datetime\nfrom datetime import timezone\n\nimport pandas as pd\nfrom libmozdata import versions\nfrom sklearn.base import BaseEstimator\nfrom sklearn.base import TransformerMixin\n\nfrom bugbug import bug_snapshot\nfrom bugbug import repository\n\n\ndef field(bug, field):\n if field in bug and bug[field] != '---':\n return bug[field]\n\n return None\n\n\nclass has_str(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_str')\n\n\nclass has_regression_range(object):\n def __call__(self, bug):\n return field(bug, 'cf_has_regression_range')\n\n\nclass has_crash_signature(object):\n def __call__(self, bug):\n return 'cf_crash_signature' in bug and bug['cf_crash_signature'] != ''\n\n\nclass keywords(object):\n def __init__(self, to_ignore=set()):\n self.to_ignore = to_ignore\n\n def __call__(self, bug):\n keywords = []\n subkeywords = []\n for keyword in bug['keywords']:\n if keyword in self.to_ignore:\n continue\n\n keywords.append(keyword)\n\n if keyword.startswith('sec-'):\n subkeywords.append('sec-')\n elif keyword.startswith('csectype-'):\n subkeywords.append('csectype-')\n return keywords + subkeywords\n\n\nclass severity(object):\n def __call__(self, bug):\n return field(bug, 'severity')\n\n\nclass is_coverity_issue(object):\n def __call__(self, bug):\n return re.search('[CID ?[0-9]+]', bug['summary']) is not None or re.search('[CID ?[0-9]+]', bug['whiteboard']) is not None\n\n\nclass has_url(object):\n def __call__(self, bug):\n return bug['url'] != ''\n\n\nclass has_w3c_url(object):\n def __call__(self, bug):\n return 'w3c' in bug['url']\n\n\nclass has_github_url(object):\n def __call__(self, bug):\n return 'github' in bug['url']\n\n\nclass whiteboard(object):\n def __call__(self, bug):\n\n # Split by '['\n paren_splits = bug['whiteboard'].lower().split('[')\n\n # Split splits by space if they weren't in [ and ].\n splits = []\n for paren_split in paren_splits:\n if ']' in paren_split:\n paren_split = paren_split.split(']')\n splits += paren_split\n else:\n splits += paren_split.split(' ')\n\n # Remove empty splits and strip\n splits = [split.strip() for split in splits if split.strip() != '']\n\n # For splits which contain ':', return both the whole string and the string before ':'.\n splits += [split.split(':', 1)[0] for split in splits if ':' in split]\n\n return splits\n\n\nclass patches(object):\n def __call__(self, bug):\n return sum(1 for a in bug['attachments'] if a['is_patch'] or a['content_type'] in ['text/x-review-board-request', 'text/x-phabricator-request'])\n\n\nclass landings(object):\n def __call__(self, bug):\n return sum(1 for c in bug['comments'] if '://hg.mozilla.org/' in c['text'])\n\n\nclass title(object):\n def __call__(self, bug):\n ret = []\n\n keywords = [\n 'fail',\n ]\n for keyword in keywords:\n if keyword in bug['summary'].lower():\n ret.append(keyword)\n\n return ret\n\n\nclass product(object):\n def __call__(self, bug):\n return bug['product']\n\n\nclass component(object):\n def __call__(self, bug):\n return bug['component']\n\n\nclass is_mozillian(object):\n def __call__(self, bug):\n return any(bug['creator_detail']['email'].endswith(domain) for domain in ['@mozilla.com', '@mozilla.org'])\n\n\nclass bug_reporter(object):\n def __call__(self, bug):\n return bug['creator_detail']['email']\n\n\nclass delta_request_merge(object):\n def __call__(self, bug):\n for history in bug['history']:\n for change in history['changes']:\n if change['added'].startswith('approval-mozilla'):\n uplift_request_datetime = datetime.strptime(history['when'], '%Y-%m-%dT%H:%M:%SZ').replace(tzinfo=timezone.utc)\n timedelta = versions.getCloserRelease(uplift_request_datetime)[1] - uplift_request_datetime\n return timedelta.days + timedelta.seconds / (24 * 60 * 60)\n\n return None\n\n\nclass commit_added(object):\n def __call__(self, bug):\n return sum(commit['added'] for commit in bug['commits'])\n\n\nclass commit_deleted(object):\n def __call__(self, bug):\n return sum(commit['deleted'] for commit in bug['commits'])\n\n\nclass commit_types(object):\n def __call__(self, bug):\n return sum((commit['types'] for commit in bug['commits']), [])\n\n\nclass blocked_bugs_number(object):\n def __call__(self, bug):\n return len(bug['blocks'])\n\n\nclass priority(object):\n def __call__(self, bug):\n return bug['priority']\n\n\nclass bug_has_cve_in_alias(object):\n def __call__(self, bug):\n return bug['alias'] is not None and 'CVE' in bug['alias']\n\n\nclass commit_files_modified_num(object):\n def __call__(self, bug):\n return sum(commit['files_modified_num'] for commit in bug['commits'])\n\n\nclass comment_count(object):\n def __call__(self, bug):\n return field(bug, 'comment_count')\n\n\nclass comment_length(object):\n def __call__(self, bug):\n return sum(len(x['text']) for x in bug['comments'])\n\n\ndef cleanup_url(text):\n text = re.sub(r'http[s]?://(hg.mozilla|searchfox|dxr.mozilla)\\S+', '__CODE_REFERENCE_URL__', text)\n return re.sub(r'http\\S+', '__URL__', text)\n\n\ndef cleanup_fileref(text):\n return re.sub(r'\\w+\\.py\\b|\\w+\\.json\\b|\\w+\\.js\\b|\\w+\\.jsm\\b|\\w+\\.html\\b|\\w+\\.css\\b|\\w+\\.c\\b|\\w+\\.cpp\\b|\\w+\\.h\\b', '__FILE_REFERENCE__', text)\n\n\ndef cleanup_responses(text):\n return re.sub('>[^\\n]+', ' ', text)\n\n\ndef cleanup_hex(text):\n return re.sub(r'\\b0[xX][0-9a-fA-F]+\\b', '__HEX_NUMBER__', text)\n\n\ndef cleanup_dll(text):\n return re.sub(r'\\w+(\\.dll|\\.so|\\.dylib)\\b', '__DLL_NAME__', text)\n\n\ndef cleanup_synonyms(text):\n synonyms = [\n ('safemode', ['safemode', 'safe mode']),\n ('str', ['str', 'steps to reproduce', 'repro steps']),\n ('uaf', ['uaf', 'use after free', 'use-after-free']),\n ('asan', ['asan', 'address sanitizer', 'addresssanitizer']),\n ('permafailure', ['permafailure', 'permafailing', 'permafail', 'perma failure', 'perma failing', 'perma fail', 'perma-failure', 'perma-failing', 'perma-fail']),\n ('spec', ['spec', 'specification']),\n ]\n\n for synonym_group, synonym_list in synonyms:\n text = re.sub('|'.join(fr'\\b{synonym}\\b' for synonym in synonym_list), synonym_group, text, flags=re.IGNORECASE)\n\n return text\n\n\ndef cleanup_crash(text):\n return re.sub(r'bp-[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{6}[0-9]{6}\\b', '__CRASH_STATS_LINK__', text)\n\n\nclass BugExtractor(BaseEstimator, TransformerMixin):\n def __init__(self, feature_extractors, cleanup_functions, rollback=False, rollback_when=None, commit_data=False):\n self.feature_extractors = feature_extractors\n self.cleanup_functions = cleanup_functions\n self.rollback = rollback\n self.rollback_when = rollback_when\n self.commit_map = repository.get_commit_map() if commit_data else None\n\n def fit(self, x, y=None):\n return self\n\n def transform(self, bugs):\n results = []\n\n for bug in bugs:\n bug_id = bug['id']\n\n if self.rollback:\n bug = bug_snapshot.rollback(bug, self.rollback_when)\n\n data = {}\n\n if self.commit_map is not None:\n if bug_id in self.commit_map:\n bug['commits'] = self.commit_map[bug_id]\n else:\n bug['commits'] = []\n\n for f in self.feature_extractors:\n res = f(bug)\n\n if res is None:\n continue\n\n if isinstance(res, list):\n for item in res:\n data[f.__class__.__name__ + '-' + item] = 'True'\n continue\n\n if isinstance(res, bool):\n res = str(res)\n\n data[f.__class__.__name__] = res\n\n # TODO: Try simply using all possible fields instead of extracting features manually.\n\n for cleanup_function in self.cleanup_functions:\n bug['summary'] = cleanup_function(bug['summary'])\n for c in bug['comments']:\n c['text'] = cleanup_function(c['text'])\n\n result = {\n 'data': data,\n 'title': bug['summary'],\n 'first_comment': bug['comments'][0]['text'],\n 'comments': ' '.join([c['text'] for c in bug['comments']]),\n }\n\n results.append(result)\n\n return pd.DataFrame(results)\n", "path": "bugbug/bug_features.py"}]}
4,040
220
gh_patches_debug_22565
rasdani/github-patches
git_diff
pre-commit__pre-commit-874
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Concurrent execution results in uneven work per thread I'm running `pre-commit` from current `master` to test the concurrency feature introduced with #851. While it in general seems to work, work is distributed pretty uneven. One hook we run is [`prospector`](https://github.com/guykisel/prospector-mirror) which is nice for testing, because it takes a relatively long time and it prints the time taken in its output. Running `pre-commit run -a --verbose prospector | grep "Time Taken"` on a medium sized project (~100 Python files) results in the following distribution of work to the available 4 logical CPU cores: ``` Time Taken: 17.10 seconds Time Taken: 8.70 seconds Time Taken: 18.68 seconds Time Taken: 108.02 seconds ``` Especially compared to running it with concurrency disabled (using `PRE_COMMIT_NO_CONCURRENCY`), it's pretty obvious that concurrency doesn't provide any real benefit here: ``` Time Taken: 116.95 seconds ``` I'd be happy to help debugging this further. Just tell me what other information you need. :slightly_smiling_face: --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/languages/helpers.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import multiprocessing 4 import os 5 import shlex 6 7 from pre_commit.util import cmd_output 8 from pre_commit.xargs import xargs 9 10 11 def run_setup_cmd(prefix, cmd): 12 cmd_output(*cmd, cwd=prefix.prefix_dir, encoding=None) 13 14 15 def environment_dir(ENVIRONMENT_DIR, language_version): 16 if ENVIRONMENT_DIR is None: 17 return None 18 else: 19 return '{}-{}'.format(ENVIRONMENT_DIR, language_version) 20 21 22 def to_cmd(hook): 23 return tuple(shlex.split(hook['entry'])) + tuple(hook['args']) 24 25 26 def assert_version_default(binary, version): 27 if version != 'default': 28 raise AssertionError( 29 'For now, pre-commit requires system-installed {}'.format(binary), 30 ) 31 32 33 def assert_no_additional_deps(lang, additional_deps): 34 if additional_deps: 35 raise AssertionError( 36 'For now, pre-commit does not support ' 37 'additional_dependencies for {}'.format(lang), 38 ) 39 40 41 def basic_get_default_version(): 42 return 'default' 43 44 45 def basic_healthy(prefix, language_version): 46 return True 47 48 49 def no_install(prefix, version, additional_dependencies): 50 raise AssertionError('This type is not installable') 51 52 53 def target_concurrency(hook): 54 if hook['require_serial'] or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ: 55 return 1 56 else: 57 # Travis appears to have a bunch of CPUs, but we can't use them all. 58 if 'TRAVIS' in os.environ: 59 return 2 60 else: 61 try: 62 return multiprocessing.cpu_count() 63 except NotImplementedError: 64 return 1 65 66 67 def run_xargs(hook, cmd, file_args): 68 return xargs(cmd, file_args, target_concurrency=target_concurrency(hook)) 69 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py --- a/pre_commit/languages/helpers.py +++ b/pre_commit/languages/helpers.py @@ -2,12 +2,18 @@ import multiprocessing import os +import random import shlex +import six + from pre_commit.util import cmd_output from pre_commit.xargs import xargs +FIXED_RANDOM_SEED = 1542676186 + + def run_setup_cmd(prefix, cmd): cmd_output(*cmd, cwd=prefix.prefix_dir, encoding=None) @@ -64,5 +70,21 @@ return 1 +def _shuffled(seq): + """Deterministically shuffle identically under both py2 + py3.""" + fixed_random = random.Random() + if six.PY2: # pragma: no cover (py2) + fixed_random.seed(FIXED_RANDOM_SEED) + else: + fixed_random.seed(FIXED_RANDOM_SEED, version=1) + + seq = list(seq) + random.shuffle(seq, random=fixed_random.random) + return seq + + def run_xargs(hook, cmd, file_args): + # Shuffle the files so that they more evenly fill out the xargs partitions, + # but do it deterministically in case a hook cares about ordering. + file_args = _shuffled(file_args) return xargs(cmd, file_args, target_concurrency=target_concurrency(hook))
{"golden_diff": "diff --git a/pre_commit/languages/helpers.py b/pre_commit/languages/helpers.py\n--- a/pre_commit/languages/helpers.py\n+++ b/pre_commit/languages/helpers.py\n@@ -2,12 +2,18 @@\n \n import multiprocessing\n import os\n+import random\n import shlex\n \n+import six\n+\n from pre_commit.util import cmd_output\n from pre_commit.xargs import xargs\n \n \n+FIXED_RANDOM_SEED = 1542676186\n+\n+\n def run_setup_cmd(prefix, cmd):\n cmd_output(*cmd, cwd=prefix.prefix_dir, encoding=None)\n \n@@ -64,5 +70,21 @@\n return 1\n \n \n+def _shuffled(seq):\n+ \"\"\"Deterministically shuffle identically under both py2 + py3.\"\"\"\n+ fixed_random = random.Random()\n+ if six.PY2: # pragma: no cover (py2)\n+ fixed_random.seed(FIXED_RANDOM_SEED)\n+ else:\n+ fixed_random.seed(FIXED_RANDOM_SEED, version=1)\n+\n+ seq = list(seq)\n+ random.shuffle(seq, random=fixed_random.random)\n+ return seq\n+\n+\n def run_xargs(hook, cmd, file_args):\n+ # Shuffle the files so that they more evenly fill out the xargs partitions,\n+ # but do it deterministically in case a hook cares about ordering.\n+ file_args = _shuffled(file_args)\n return xargs(cmd, file_args, target_concurrency=target_concurrency(hook))\n", "issue": "Concurrent execution results in uneven work per thread\nI'm running `pre-commit` from current `master` to test the concurrency feature introduced with #851. While it in general seems to work, work is distributed pretty uneven. One hook we run is [`prospector`](https://github.com/guykisel/prospector-mirror) which is nice for testing, because it takes a relatively long time and it prints the time taken in its output.\r\n\r\nRunning `pre-commit run -a --verbose prospector | grep \"Time Taken\"` on a medium sized project (~100 Python files) results in the following distribution of work to the available 4 logical CPU cores:\r\n```\r\nTime Taken: 17.10 seconds\r\nTime Taken: 8.70 seconds\r\nTime Taken: 18.68 seconds\r\nTime Taken: 108.02 seconds\r\n```\r\n\r\nEspecially compared to running it with concurrency disabled (using `PRE_COMMIT_NO_CONCURRENCY`), it's pretty obvious that concurrency doesn't provide any real benefit here:\r\n```\r\nTime Taken: 116.95 seconds\r\n```\r\n\r\nI'd be happy to help debugging this further. Just tell me what other information you need. :slightly_smiling_face: \n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport multiprocessing\nimport os\nimport shlex\n\nfrom pre_commit.util import cmd_output\nfrom pre_commit.xargs import xargs\n\n\ndef run_setup_cmd(prefix, cmd):\n cmd_output(*cmd, cwd=prefix.prefix_dir, encoding=None)\n\n\ndef environment_dir(ENVIRONMENT_DIR, language_version):\n if ENVIRONMENT_DIR is None:\n return None\n else:\n return '{}-{}'.format(ENVIRONMENT_DIR, language_version)\n\n\ndef to_cmd(hook):\n return tuple(shlex.split(hook['entry'])) + tuple(hook['args'])\n\n\ndef assert_version_default(binary, version):\n if version != 'default':\n raise AssertionError(\n 'For now, pre-commit requires system-installed {}'.format(binary),\n )\n\n\ndef assert_no_additional_deps(lang, additional_deps):\n if additional_deps:\n raise AssertionError(\n 'For now, pre-commit does not support '\n 'additional_dependencies for {}'.format(lang),\n )\n\n\ndef basic_get_default_version():\n return 'default'\n\n\ndef basic_healthy(prefix, language_version):\n return True\n\n\ndef no_install(prefix, version, additional_dependencies):\n raise AssertionError('This type is not installable')\n\n\ndef target_concurrency(hook):\n if hook['require_serial'] or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:\n return 1\n else:\n # Travis appears to have a bunch of CPUs, but we can't use them all.\n if 'TRAVIS' in os.environ:\n return 2\n else:\n try:\n return multiprocessing.cpu_count()\n except NotImplementedError:\n return 1\n\n\ndef run_xargs(hook, cmd, file_args):\n return xargs(cmd, file_args, target_concurrency=target_concurrency(hook))\n", "path": "pre_commit/languages/helpers.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport multiprocessing\nimport os\nimport random\nimport shlex\n\nimport six\n\nfrom pre_commit.util import cmd_output\nfrom pre_commit.xargs import xargs\n\n\nFIXED_RANDOM_SEED = 1542676186\n\n\ndef run_setup_cmd(prefix, cmd):\n cmd_output(*cmd, cwd=prefix.prefix_dir, encoding=None)\n\n\ndef environment_dir(ENVIRONMENT_DIR, language_version):\n if ENVIRONMENT_DIR is None:\n return None\n else:\n return '{}-{}'.format(ENVIRONMENT_DIR, language_version)\n\n\ndef to_cmd(hook):\n return tuple(shlex.split(hook['entry'])) + tuple(hook['args'])\n\n\ndef assert_version_default(binary, version):\n if version != 'default':\n raise AssertionError(\n 'For now, pre-commit requires system-installed {}'.format(binary),\n )\n\n\ndef assert_no_additional_deps(lang, additional_deps):\n if additional_deps:\n raise AssertionError(\n 'For now, pre-commit does not support '\n 'additional_dependencies for {}'.format(lang),\n )\n\n\ndef basic_get_default_version():\n return 'default'\n\n\ndef basic_healthy(prefix, language_version):\n return True\n\n\ndef no_install(prefix, version, additional_dependencies):\n raise AssertionError('This type is not installable')\n\n\ndef target_concurrency(hook):\n if hook['require_serial'] or 'PRE_COMMIT_NO_CONCURRENCY' in os.environ:\n return 1\n else:\n # Travis appears to have a bunch of CPUs, but we can't use them all.\n if 'TRAVIS' in os.environ:\n return 2\n else:\n try:\n return multiprocessing.cpu_count()\n except NotImplementedError:\n return 1\n\n\ndef _shuffled(seq):\n \"\"\"Deterministically shuffle identically under both py2 + py3.\"\"\"\n fixed_random = random.Random()\n if six.PY2: # pragma: no cover (py2)\n fixed_random.seed(FIXED_RANDOM_SEED)\n else:\n fixed_random.seed(FIXED_RANDOM_SEED, version=1)\n\n seq = list(seq)\n random.shuffle(seq, random=fixed_random.random)\n return seq\n\n\ndef run_xargs(hook, cmd, file_args):\n # Shuffle the files so that they more evenly fill out the xargs partitions,\n # but do it deterministically in case a hook cares about ordering.\n file_args = _shuffled(file_args)\n return xargs(cmd, file_args, target_concurrency=target_concurrency(hook))\n", "path": "pre_commit/languages/helpers.py"}]}
1,040
333
gh_patches_debug_4351
rasdani/github-patches
git_diff
localstack__localstack-1589
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- describe-stream for dynamodb streams crashes Steps to recreate: 1. create table ``` aws dynamodb create-table --table-name MusicCollection --attribute-definitions AttributeName=Artist,AttributeType=S AttributeName=SongTitle,AttributeType=S --key-schema AttributeName=Artist,KeyType=HASH AttributeName=SongTitle,KeyType=RANGE --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 --endpoint http://localhost:4569 ``` 2. add stream ``` aws dynamodb update-table --table-name MusicCollection --stream-specification StreamEnabled=true,StreamViewType=NEW_IMAGE --endpoint http://localhost:4569 ``` 3. describe stream using the stream arn from step 2. ``` aws dynamodbstreams describe-stream --stream-arn "arn:aws:dynamodb:eu-central-1:000000000000:table/MusicCollection/stream/2019-09-21T14:06:37.345" --endpoint http://localhost:4570 ``` localstack logs: ``` Traceback (most recent call last): File "/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 2292, in wsgi_app response = self.full_dispatch_request() File "/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1815, in full_dispatch_request rv = self.handle_user_exception(e) File "/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask_cors/extension.py", line 161, in wrapped_function return cors_after_request(app.make_response(f(*args, **kwargs))) File "/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1718, in handle_user_exception reraise(exc_type, exc_value, tb) File "/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1813, in full_dispatch_request rv = self.dispatch_request() File "/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py", line 1799, in dispatch_request return self.view_functions[rule.endpoint](**req.view_args) File "/home/balrog/localstack/localstack/services/dynamodbstreams/dynamodbstreams_api.py", line 88, in post_request shard['ShardId'] = shard_id(stream_name, shard['ShardId']) File "/home/balrog/localstack/localstack/services/dynamodbstreams/dynamodbstreams_api.py", line 146, in shard_id return '-'.join([kinesis_shard_id, random_id(stream_arn, kinesis_shard_id)]) File "/home/balrog/localstack/localstack/services/dynamodbstreams/dynamodbstreams_api.py", line 142, in random_id return uuid.uuid5(namespace, kinesis_shard_id).hex File "/usr/lib/python2.7/uuid.py", line 609, in uuid5 hash = sha1(namespace.bytes + name).digest() UnicodeDecodeError: 'ascii' codec can't decode byte 0xe1 in position 4: ordinal not in range(128) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `localstack/services/dynamodbstreams/dynamodbstreams_api.py` Content: ``` 1 import json 2 import uuid 3 import hashlib 4 from flask import Flask, jsonify, request, make_response 5 from localstack.services import generic_proxy 6 from localstack.utils.aws import aws_stack 7 from localstack.utils.common import to_str, to_bytes 8 from localstack.utils.analytics import event_publisher 9 10 APP_NAME = 'ddb_streams_api' 11 12 app = Flask(APP_NAME) 13 14 DDB_STREAMS = {} 15 16 DDB_KINESIS_STREAM_NAME_PREFIX = '__ddb_stream_' 17 18 ACTION_HEADER_PREFIX = 'DynamoDBStreams_20120810' 19 20 SEQUENCE_NUMBER_COUNTER = 1 21 22 23 def add_dynamodb_stream(table_name, latest_stream_label, view_type='NEW_AND_OLD_IMAGES', enabled=True): 24 if enabled: 25 # create kinesis stream as a backend 26 stream_name = get_kinesis_stream_name(table_name) 27 aws_stack.create_kinesis_stream(stream_name) 28 stream = { 29 'StreamArn': aws_stack.dynamodb_stream_arn(table_name=table_name, 30 latest_stream_label=latest_stream_label), 31 'TableName': table_name, 32 'StreamLabel': latest_stream_label, 33 'StreamStatus': 'ENABLED', 34 'KeySchema': [], 35 'Shards': [] 36 } 37 table_arn = aws_stack.dynamodb_table_arn(table_name) 38 DDB_STREAMS[table_arn] = stream 39 # record event 40 event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_STREAM, 41 payload={'n': event_publisher.get_hash(table_name)}) 42 43 44 def forward_events(records): 45 global SEQUENCE_NUMBER_COUNTER 46 kinesis = aws_stack.connect_to_service('kinesis') 47 for record in records: 48 if 'SequenceNumber' not in record['dynamodb']: 49 record['dynamodb']['SequenceNumber'] = str(SEQUENCE_NUMBER_COUNTER) 50 SEQUENCE_NUMBER_COUNTER += 1 51 table_arn = record['eventSourceARN'] 52 stream = DDB_STREAMS.get(table_arn) 53 if stream: 54 table_name = table_name_from_stream_arn(stream['StreamArn']) 55 stream_name = get_kinesis_stream_name(table_name) 56 kinesis.put_record(StreamName=stream_name, Data=json.dumps(record), PartitionKey='TODO') 57 58 59 @app.route('/', methods=['POST']) 60 def post_request(): 61 action = request.headers.get('x-amz-target') 62 data = json.loads(to_str(request.data)) 63 result = {} 64 kinesis = aws_stack.connect_to_service('kinesis') 65 if action == '%s.ListStreams' % ACTION_HEADER_PREFIX: 66 result = { 67 'Streams': list(DDB_STREAMS.values()), 68 'LastEvaluatedStreamArn': 'TODO' 69 } 70 elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX: 71 for stream in DDB_STREAMS.values(): 72 if stream['StreamArn'] == data['StreamArn']: 73 result = { 74 'StreamDescription': stream 75 } 76 # get stream details 77 dynamodb = aws_stack.connect_to_service('dynamodb') 78 table_name = table_name_from_stream_arn(stream['StreamArn']) 79 stream_name = get_kinesis_stream_name(table_name) 80 stream_details = kinesis.describe_stream(StreamName=stream_name) 81 table_details = dynamodb.describe_table(TableName=table_name) 82 stream['KeySchema'] = table_details['Table']['KeySchema'] 83 84 # Replace Kinesis ShardIDs with ones that mimic actual 85 # DynamoDBStream ShardIDs. 86 stream_shards = stream_details['StreamDescription']['Shards'] 87 for shard in stream_shards: 88 shard['ShardId'] = shard_id(stream_name, shard['ShardId']) 89 stream['Shards'] = stream_shards 90 break 91 if not result: 92 return error_response('Requested resource not found', error_type='ResourceNotFoundException') 93 elif action == '%s.GetShardIterator' % ACTION_HEADER_PREFIX: 94 # forward request to Kinesis API 95 stream_name = stream_name_from_stream_arn(data['StreamArn']) 96 stream_shard_id = kinesis_shard_id(data['ShardId']) 97 result = kinesis.get_shard_iterator(StreamName=stream_name, 98 ShardId=stream_shard_id, ShardIteratorType=data['ShardIteratorType']) 99 elif action == '%s.GetRecords' % ACTION_HEADER_PREFIX: 100 kinesis_records = kinesis.get_records(**data) 101 result = {'Records': [], 'NextShardIterator': kinesis_records.get('NextShardIterator')} 102 for record in kinesis_records['Records']: 103 result['Records'].append(json.loads(to_str(record['Data']))) 104 else: 105 print('WARNING: Unknown operation "%s"' % action) 106 return jsonify(result) 107 108 109 # ----------------- 110 # HELPER FUNCTIONS 111 # ----------------- 112 113 def error_response(message=None, error_type=None, code=400): 114 if not message: 115 message = 'Unknown error' 116 if not error_type: 117 error_type = 'UnknownError' 118 if 'com.amazonaws.dynamodb' not in error_type: 119 error_type = 'com.amazonaws.dynamodb.v20120810#%s' % error_type 120 content = { 121 'message': message, 122 '__type': error_type 123 } 124 return make_response(jsonify(content), code) 125 126 127 def get_kinesis_stream_name(table_name): 128 return DDB_KINESIS_STREAM_NAME_PREFIX + table_name 129 130 131 def table_name_from_stream_arn(stream_arn): 132 return stream_arn.split(':table/')[1].split('/')[0] 133 134 135 def stream_name_from_stream_arn(stream_arn): 136 table_name = table_name_from_stream_arn(stream_arn) 137 return get_kinesis_stream_name(table_name) 138 139 140 def random_id(stream_arn, kinesis_shard_id): 141 namespace = uuid.UUID(bytes=hashlib.sha1(to_bytes(stream_arn)).digest()[:16]) 142 return uuid.uuid5(namespace, kinesis_shard_id).hex 143 144 145 def shard_id(stream_arn, kinesis_shard_id): 146 return '-'.join([kinesis_shard_id, random_id(stream_arn, kinesis_shard_id)]) 147 148 149 def kinesis_shard_id(dynamodbstream_shard_id): 150 return dynamodbstream_shard_id.rsplit('-', 1)[0] 151 152 153 def serve(port, quiet=True): 154 generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet) 155 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/localstack/services/dynamodbstreams/dynamodbstreams_api.py b/localstack/services/dynamodbstreams/dynamodbstreams_api.py --- a/localstack/services/dynamodbstreams/dynamodbstreams_api.py +++ b/localstack/services/dynamodbstreams/dynamodbstreams_api.py @@ -139,7 +139,7 @@ def random_id(stream_arn, kinesis_shard_id): namespace = uuid.UUID(bytes=hashlib.sha1(to_bytes(stream_arn)).digest()[:16]) - return uuid.uuid5(namespace, kinesis_shard_id).hex + return uuid.uuid5(namespace, to_bytes(kinesis_shard_id)).hex def shard_id(stream_arn, kinesis_shard_id):
{"golden_diff": "diff --git a/localstack/services/dynamodbstreams/dynamodbstreams_api.py b/localstack/services/dynamodbstreams/dynamodbstreams_api.py\n--- a/localstack/services/dynamodbstreams/dynamodbstreams_api.py\n+++ b/localstack/services/dynamodbstreams/dynamodbstreams_api.py\n@@ -139,7 +139,7 @@\n \n def random_id(stream_arn, kinesis_shard_id):\n namespace = uuid.UUID(bytes=hashlib.sha1(to_bytes(stream_arn)).digest()[:16])\n- return uuid.uuid5(namespace, kinesis_shard_id).hex\n+ return uuid.uuid5(namespace, to_bytes(kinesis_shard_id)).hex\n \n \n def shard_id(stream_arn, kinesis_shard_id):\n", "issue": "describe-stream for dynamodb streams crashes\nSteps to recreate:\r\n1. create table\r\n```\r\naws dynamodb create-table --table-name MusicCollection --attribute-definitions AttributeName=Artist,AttributeType=S AttributeName=SongTitle,AttributeType=S --key-schema AttributeName=Artist,KeyType=HASH AttributeName=SongTitle,KeyType=RANGE --provisioned-throughput ReadCapacityUnits=5,WriteCapacityUnits=5 --endpoint http://localhost:4569\r\n```\r\n2. add stream\r\n```\r\naws dynamodb update-table --table-name MusicCollection --stream-specification StreamEnabled=true,StreamViewType=NEW_IMAGE --endpoint http://localhost:4569 \r\n```\r\n3. describe stream using the stream arn from step 2.\r\n```\r\naws dynamodbstreams describe-stream --stream-arn \"arn:aws:dynamodb:eu-central-1:000000000000:table/MusicCollection/stream/2019-09-21T14:06:37.345\" --endpoint http://localhost:4570\r\n```\r\nlocalstack logs:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py\", line 2292, in wsgi_app\r\n response = self.full_dispatch_request()\r\n File \"/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py\", line 1815, in full_dispatch_request\r\n rv = self.handle_user_exception(e)\r\n File \"/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask_cors/extension.py\", line 161, in wrapped_function\r\n return cors_after_request(app.make_response(f(*args, **kwargs)))\r\n File \"/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py\", line 1718, in handle_user_exception\r\n reraise(exc_type, exc_value, tb)\r\n File \"/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py\", line 1813, in full_dispatch_request\r\n rv = self.dispatch_request()\r\n File \"/home/balrog/localstack/.venv/lib/python2.7/site-packages/flask/app.py\", line 1799, in dispatch_request\r\n return self.view_functions[rule.endpoint](**req.view_args)\r\n File \"/home/balrog/localstack/localstack/services/dynamodbstreams/dynamodbstreams_api.py\", line 88, in post_request\r\n shard['ShardId'] = shard_id(stream_name, shard['ShardId'])\r\n File \"/home/balrog/localstack/localstack/services/dynamodbstreams/dynamodbstreams_api.py\", line 146, in shard_id\r\n return '-'.join([kinesis_shard_id, random_id(stream_arn, kinesis_shard_id)])\r\n File \"/home/balrog/localstack/localstack/services/dynamodbstreams/dynamodbstreams_api.py\", line 142, in random_id\r\n return uuid.uuid5(namespace, kinesis_shard_id).hex\r\n File \"/usr/lib/python2.7/uuid.py\", line 609, in uuid5\r\n hash = sha1(namespace.bytes + name).digest()\r\nUnicodeDecodeError: 'ascii' codec can't decode byte 0xe1 in position 4: ordinal not in range(128)\r\n```\n", "before_files": [{"content": "import json\nimport uuid\nimport hashlib\nfrom flask import Flask, jsonify, request, make_response\nfrom localstack.services import generic_proxy\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import to_str, to_bytes\nfrom localstack.utils.analytics import event_publisher\n\nAPP_NAME = 'ddb_streams_api'\n\napp = Flask(APP_NAME)\n\nDDB_STREAMS = {}\n\nDDB_KINESIS_STREAM_NAME_PREFIX = '__ddb_stream_'\n\nACTION_HEADER_PREFIX = 'DynamoDBStreams_20120810'\n\nSEQUENCE_NUMBER_COUNTER = 1\n\n\ndef add_dynamodb_stream(table_name, latest_stream_label, view_type='NEW_AND_OLD_IMAGES', enabled=True):\n if enabled:\n # create kinesis stream as a backend\n stream_name = get_kinesis_stream_name(table_name)\n aws_stack.create_kinesis_stream(stream_name)\n stream = {\n 'StreamArn': aws_stack.dynamodb_stream_arn(table_name=table_name,\n latest_stream_label=latest_stream_label),\n 'TableName': table_name,\n 'StreamLabel': latest_stream_label,\n 'StreamStatus': 'ENABLED',\n 'KeySchema': [],\n 'Shards': []\n }\n table_arn = aws_stack.dynamodb_table_arn(table_name)\n DDB_STREAMS[table_arn] = stream\n # record event\n event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_STREAM,\n payload={'n': event_publisher.get_hash(table_name)})\n\n\ndef forward_events(records):\n global SEQUENCE_NUMBER_COUNTER\n kinesis = aws_stack.connect_to_service('kinesis')\n for record in records:\n if 'SequenceNumber' not in record['dynamodb']:\n record['dynamodb']['SequenceNumber'] = str(SEQUENCE_NUMBER_COUNTER)\n SEQUENCE_NUMBER_COUNTER += 1\n table_arn = record['eventSourceARN']\n stream = DDB_STREAMS.get(table_arn)\n if stream:\n table_name = table_name_from_stream_arn(stream['StreamArn'])\n stream_name = get_kinesis_stream_name(table_name)\n kinesis.put_record(StreamName=stream_name, Data=json.dumps(record), PartitionKey='TODO')\n\n\[email protected]('/', methods=['POST'])\ndef post_request():\n action = request.headers.get('x-amz-target')\n data = json.loads(to_str(request.data))\n result = {}\n kinesis = aws_stack.connect_to_service('kinesis')\n if action == '%s.ListStreams' % ACTION_HEADER_PREFIX:\n result = {\n 'Streams': list(DDB_STREAMS.values()),\n 'LastEvaluatedStreamArn': 'TODO'\n }\n elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX:\n for stream in DDB_STREAMS.values():\n if stream['StreamArn'] == data['StreamArn']:\n result = {\n 'StreamDescription': stream\n }\n # get stream details\n dynamodb = aws_stack.connect_to_service('dynamodb')\n table_name = table_name_from_stream_arn(stream['StreamArn'])\n stream_name = get_kinesis_stream_name(table_name)\n stream_details = kinesis.describe_stream(StreamName=stream_name)\n table_details = dynamodb.describe_table(TableName=table_name)\n stream['KeySchema'] = table_details['Table']['KeySchema']\n\n # Replace Kinesis ShardIDs with ones that mimic actual\n # DynamoDBStream ShardIDs.\n stream_shards = stream_details['StreamDescription']['Shards']\n for shard in stream_shards:\n shard['ShardId'] = shard_id(stream_name, shard['ShardId'])\n stream['Shards'] = stream_shards\n break\n if not result:\n return error_response('Requested resource not found', error_type='ResourceNotFoundException')\n elif action == '%s.GetShardIterator' % ACTION_HEADER_PREFIX:\n # forward request to Kinesis API\n stream_name = stream_name_from_stream_arn(data['StreamArn'])\n stream_shard_id = kinesis_shard_id(data['ShardId'])\n result = kinesis.get_shard_iterator(StreamName=stream_name,\n ShardId=stream_shard_id, ShardIteratorType=data['ShardIteratorType'])\n elif action == '%s.GetRecords' % ACTION_HEADER_PREFIX:\n kinesis_records = kinesis.get_records(**data)\n result = {'Records': [], 'NextShardIterator': kinesis_records.get('NextShardIterator')}\n for record in kinesis_records['Records']:\n result['Records'].append(json.loads(to_str(record['Data'])))\n else:\n print('WARNING: Unknown operation \"%s\"' % action)\n return jsonify(result)\n\n\n# -----------------\n# HELPER FUNCTIONS\n# -----------------\n\ndef error_response(message=None, error_type=None, code=400):\n if not message:\n message = 'Unknown error'\n if not error_type:\n error_type = 'UnknownError'\n if 'com.amazonaws.dynamodb' not in error_type:\n error_type = 'com.amazonaws.dynamodb.v20120810#%s' % error_type\n content = {\n 'message': message,\n '__type': error_type\n }\n return make_response(jsonify(content), code)\n\n\ndef get_kinesis_stream_name(table_name):\n return DDB_KINESIS_STREAM_NAME_PREFIX + table_name\n\n\ndef table_name_from_stream_arn(stream_arn):\n return stream_arn.split(':table/')[1].split('/')[0]\n\n\ndef stream_name_from_stream_arn(stream_arn):\n table_name = table_name_from_stream_arn(stream_arn)\n return get_kinesis_stream_name(table_name)\n\n\ndef random_id(stream_arn, kinesis_shard_id):\n namespace = uuid.UUID(bytes=hashlib.sha1(to_bytes(stream_arn)).digest()[:16])\n return uuid.uuid5(namespace, kinesis_shard_id).hex\n\n\ndef shard_id(stream_arn, kinesis_shard_id):\n return '-'.join([kinesis_shard_id, random_id(stream_arn, kinesis_shard_id)])\n\n\ndef kinesis_shard_id(dynamodbstream_shard_id):\n return dynamodbstream_shard_id.rsplit('-', 1)[0]\n\n\ndef serve(port, quiet=True):\n generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)\n", "path": "localstack/services/dynamodbstreams/dynamodbstreams_api.py"}], "after_files": [{"content": "import json\nimport uuid\nimport hashlib\nfrom flask import Flask, jsonify, request, make_response\nfrom localstack.services import generic_proxy\nfrom localstack.utils.aws import aws_stack\nfrom localstack.utils.common import to_str, to_bytes\nfrom localstack.utils.analytics import event_publisher\n\nAPP_NAME = 'ddb_streams_api'\n\napp = Flask(APP_NAME)\n\nDDB_STREAMS = {}\n\nDDB_KINESIS_STREAM_NAME_PREFIX = '__ddb_stream_'\n\nACTION_HEADER_PREFIX = 'DynamoDBStreams_20120810'\n\nSEQUENCE_NUMBER_COUNTER = 1\n\n\ndef add_dynamodb_stream(table_name, latest_stream_label, view_type='NEW_AND_OLD_IMAGES', enabled=True):\n if enabled:\n # create kinesis stream as a backend\n stream_name = get_kinesis_stream_name(table_name)\n aws_stack.create_kinesis_stream(stream_name)\n stream = {\n 'StreamArn': aws_stack.dynamodb_stream_arn(table_name=table_name,\n latest_stream_label=latest_stream_label),\n 'TableName': table_name,\n 'StreamLabel': latest_stream_label,\n 'StreamStatus': 'ENABLED',\n 'KeySchema': [],\n 'Shards': []\n }\n table_arn = aws_stack.dynamodb_table_arn(table_name)\n DDB_STREAMS[table_arn] = stream\n # record event\n event_publisher.fire_event(event_publisher.EVENT_DYNAMODB_CREATE_STREAM,\n payload={'n': event_publisher.get_hash(table_name)})\n\n\ndef forward_events(records):\n global SEQUENCE_NUMBER_COUNTER\n kinesis = aws_stack.connect_to_service('kinesis')\n for record in records:\n if 'SequenceNumber' not in record['dynamodb']:\n record['dynamodb']['SequenceNumber'] = str(SEQUENCE_NUMBER_COUNTER)\n SEQUENCE_NUMBER_COUNTER += 1\n table_arn = record['eventSourceARN']\n stream = DDB_STREAMS.get(table_arn)\n if stream:\n table_name = table_name_from_stream_arn(stream['StreamArn'])\n stream_name = get_kinesis_stream_name(table_name)\n kinesis.put_record(StreamName=stream_name, Data=json.dumps(record), PartitionKey='TODO')\n\n\[email protected]('/', methods=['POST'])\ndef post_request():\n action = request.headers.get('x-amz-target')\n data = json.loads(to_str(request.data))\n result = {}\n kinesis = aws_stack.connect_to_service('kinesis')\n if action == '%s.ListStreams' % ACTION_HEADER_PREFIX:\n result = {\n 'Streams': list(DDB_STREAMS.values()),\n 'LastEvaluatedStreamArn': 'TODO'\n }\n elif action == '%s.DescribeStream' % ACTION_HEADER_PREFIX:\n for stream in DDB_STREAMS.values():\n if stream['StreamArn'] == data['StreamArn']:\n result = {\n 'StreamDescription': stream\n }\n # get stream details\n dynamodb = aws_stack.connect_to_service('dynamodb')\n table_name = table_name_from_stream_arn(stream['StreamArn'])\n stream_name = get_kinesis_stream_name(table_name)\n stream_details = kinesis.describe_stream(StreamName=stream_name)\n table_details = dynamodb.describe_table(TableName=table_name)\n stream['KeySchema'] = table_details['Table']['KeySchema']\n\n # Replace Kinesis ShardIDs with ones that mimic actual\n # DynamoDBStream ShardIDs.\n stream_shards = stream_details['StreamDescription']['Shards']\n for shard in stream_shards:\n shard['ShardId'] = shard_id(stream_name, shard['ShardId'])\n stream['Shards'] = stream_shards\n break\n if not result:\n return error_response('Requested resource not found', error_type='ResourceNotFoundException')\n elif action == '%s.GetShardIterator' % ACTION_HEADER_PREFIX:\n # forward request to Kinesis API\n stream_name = stream_name_from_stream_arn(data['StreamArn'])\n stream_shard_id = kinesis_shard_id(data['ShardId'])\n result = kinesis.get_shard_iterator(StreamName=stream_name,\n ShardId=stream_shard_id, ShardIteratorType=data['ShardIteratorType'])\n elif action == '%s.GetRecords' % ACTION_HEADER_PREFIX:\n kinesis_records = kinesis.get_records(**data)\n result = {'Records': [], 'NextShardIterator': kinesis_records.get('NextShardIterator')}\n for record in kinesis_records['Records']:\n result['Records'].append(json.loads(to_str(record['Data'])))\n else:\n print('WARNING: Unknown operation \"%s\"' % action)\n return jsonify(result)\n\n\n# -----------------\n# HELPER FUNCTIONS\n# -----------------\n\ndef error_response(message=None, error_type=None, code=400):\n if not message:\n message = 'Unknown error'\n if not error_type:\n error_type = 'UnknownError'\n if 'com.amazonaws.dynamodb' not in error_type:\n error_type = 'com.amazonaws.dynamodb.v20120810#%s' % error_type\n content = {\n 'message': message,\n '__type': error_type\n }\n return make_response(jsonify(content), code)\n\n\ndef get_kinesis_stream_name(table_name):\n return DDB_KINESIS_STREAM_NAME_PREFIX + table_name\n\n\ndef table_name_from_stream_arn(stream_arn):\n return stream_arn.split(':table/')[1].split('/')[0]\n\n\ndef stream_name_from_stream_arn(stream_arn):\n table_name = table_name_from_stream_arn(stream_arn)\n return get_kinesis_stream_name(table_name)\n\n\ndef random_id(stream_arn, kinesis_shard_id):\n namespace = uuid.UUID(bytes=hashlib.sha1(to_bytes(stream_arn)).digest()[:16])\n return uuid.uuid5(namespace, to_bytes(kinesis_shard_id)).hex\n\n\ndef shard_id(stream_arn, kinesis_shard_id):\n return '-'.join([kinesis_shard_id, random_id(stream_arn, kinesis_shard_id)])\n\n\ndef kinesis_shard_id(dynamodbstream_shard_id):\n return dynamodbstream_shard_id.rsplit('-', 1)[0]\n\n\ndef serve(port, quiet=True):\n generic_proxy.serve_flask_app(app=app, port=port, quiet=quiet)\n", "path": "localstack/services/dynamodbstreams/dynamodbstreams_api.py"}]}
2,721
155
gh_patches_debug_9732
rasdani/github-patches
git_diff
dbt-labs__dbt-core-9452
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [CT-3190] Pinning detective work ### Housekeeping - [X] I am a maintainer of dbt-core ### Short description We recently pinned `types-requests<2.31.0` because it had a dependency conflict with `urllib3` which we have pinned to `~=1.0` because of another conflict with `requests` requiring `openssl`. This ticket is to look into if those pins are still required and clean them up if not. ### Acceptance criteria We have confirmed that the pins are - required to continue to work _or_ - not required and we have re-pinned appropriately ### Impact to Other Teams adapters - based on the notes it seems like `urllib3` is pinned for the snowflake adapter as well so we will want to ensure changing the dependencies does not adversely affect them ### Will backports be required? no ### Context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `core/setup.py` Content: ``` 1 #!/usr/bin/env python 2 import os 3 import sys 4 5 if sys.version_info < (3, 8): 6 print("Error: dbt does not support this version of Python.") 7 print("Please upgrade to Python 3.8 or higher.") 8 sys.exit(1) 9 10 11 from setuptools import setup 12 13 try: 14 from setuptools import find_namespace_packages 15 except ImportError: 16 # the user has a downlevel version of setuptools. 17 print("Error: dbt requires setuptools v40.1.0 or higher.") 18 print('Please upgrade setuptools with "pip install --upgrade setuptools" ' "and try again") 19 sys.exit(1) 20 21 22 this_directory = os.path.abspath(os.path.dirname(__file__)) 23 with open(os.path.join(this_directory, "README.md")) as f: 24 long_description = f.read() 25 26 27 package_name = "dbt-core" 28 package_version = "1.8.0a1" 29 description = """With dbt, data analysts and engineers can build analytics \ 30 the way engineers build applications.""" 31 32 33 setup( 34 name=package_name, 35 version=package_version, 36 description=description, 37 long_description=long_description, 38 long_description_content_type="text/markdown", 39 author="dbt Labs", 40 author_email="[email protected]", 41 url="https://github.com/dbt-labs/dbt-core", 42 packages=find_namespace_packages(include=["dbt", "dbt.*"]), 43 include_package_data=True, 44 test_suite="test", 45 entry_points={ 46 "console_scripts": ["dbt = dbt.cli.main:cli"], 47 }, 48 install_requires=[ 49 # ---- 50 # dbt-core uses these packages deeply, throughout the codebase, and there have been breaking changes in past patch releases (even though these are major-version-one). 51 # Pin to the patch or minor version, and bump in each new minor version of dbt-core. 52 "agate~=1.7.0", 53 "Jinja2~=3.1.2", 54 "mashumaro[msgpack]~=3.9", 55 # ---- 56 # Legacy: This package has not been updated since 2019, and it is unused in dbt's logging system (since v1.0) 57 # The dependency here will be removed along with the removal of 'legacy logging', in a future release of dbt-core 58 "logbook>=1.5,<1.6", 59 # ---- 60 # dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility 61 # with major versions in each new minor version of dbt-core. 62 "click>=8.0.2,<9", 63 "networkx>=2.3,<4", 64 # ---- 65 # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes) 66 # and check compatibility / bump in each new minor version of dbt-core. 67 "pathspec>=0.9,<0.12", 68 "sqlparse>=0.2.3,<0.5", 69 # ---- 70 # These are major-version-0 packages also maintained by dbt-labs. Accept patches. 71 "dbt-extractor~=0.5.0", 72 "minimal-snowplow-tracker~=0.0.2", 73 "dbt-semantic-interfaces~=0.5.0a2", 74 "dbt-common~=0.1.0", 75 "dbt-adapters~=0.1.0a2", 76 # ---- 77 # Expect compatibility with all new versions of these packages, so lower bounds only. 78 "packaging>20.9", 79 "protobuf>=4.0.0", 80 "pytz>=2015.7", 81 "pyyaml>=6.0", 82 "daff>=1.3.46", 83 "typing-extensions>=4.4", 84 # ---- 85 ], 86 zip_safe=False, 87 classifiers=[ 88 "Development Status :: 5 - Production/Stable", 89 "License :: OSI Approved :: Apache Software License", 90 "Operating System :: Microsoft :: Windows", 91 "Operating System :: MacOS :: MacOS X", 92 "Operating System :: POSIX :: Linux", 93 "Programming Language :: Python :: 3.8", 94 "Programming Language :: Python :: 3.9", 95 "Programming Language :: Python :: 3.10", 96 "Programming Language :: Python :: 3.11", 97 ], 98 python_requires=">=3.8", 99 ) 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/setup.py b/core/setup.py --- a/core/setup.py +++ b/core/setup.py @@ -61,6 +61,7 @@ # with major versions in each new minor version of dbt-core. "click>=8.0.2,<9", "networkx>=2.3,<4", + "requests<3.0.0", # should match dbt-common # ---- # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes) # and check compatibility / bump in each new minor version of dbt-core.
{"golden_diff": "diff --git a/core/setup.py b/core/setup.py\n--- a/core/setup.py\n+++ b/core/setup.py\n@@ -61,6 +61,7 @@\n # with major versions in each new minor version of dbt-core.\n \"click>=8.0.2,<9\",\n \"networkx>=2.3,<4\",\n+ \"requests<3.0.0\", # should match dbt-common\n # ----\n # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes)\n # and check compatibility / bump in each new minor version of dbt-core.\n", "issue": "[CT-3190] Pinning detective work\n### Housekeeping\n\n- [X] I am a maintainer of dbt-core\n\n### Short description\n\nWe recently pinned `types-requests<2.31.0` because it had a dependency conflict with `urllib3` which we have pinned to `~=1.0` because of another conflict with `requests` requiring `openssl`.\r\n\r\nThis ticket is to look into if those pins are still required and clean them up if not. \n\n### Acceptance criteria\n\nWe have confirmed that the pins are\r\n- required to continue to work\r\n_or_\r\n- not required and we have re-pinned appropriately\n\n### Impact to Other Teams\n\nadapters - based on the notes it seems like `urllib3` is pinned for the snowflake adapter as well so we will want to ensure changing the dependencies does not adversely affect them\n\n### Will backports be required?\n\nno\n\n### Context\n\n_No response_\n", "before_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 8):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.8 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.8.0a1\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n # ----\n # dbt-core uses these packages deeply, throughout the codebase, and there have been breaking changes in past patch releases (even though these are major-version-one).\n # Pin to the patch or minor version, and bump in each new minor version of dbt-core.\n \"agate~=1.7.0\",\n \"Jinja2~=3.1.2\",\n \"mashumaro[msgpack]~=3.9\",\n # ----\n # Legacy: This package has not been updated since 2019, and it is unused in dbt's logging system (since v1.0)\n # The dependency here will be removed along with the removal of 'legacy logging', in a future release of dbt-core\n \"logbook>=1.5,<1.6\",\n # ----\n # dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility\n # with major versions in each new minor version of dbt-core.\n \"click>=8.0.2,<9\",\n \"networkx>=2.3,<4\",\n # ----\n # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes)\n # and check compatibility / bump in each new minor version of dbt-core.\n \"pathspec>=0.9,<0.12\",\n \"sqlparse>=0.2.3,<0.5\",\n # ----\n # These are major-version-0 packages also maintained by dbt-labs. Accept patches.\n \"dbt-extractor~=0.5.0\",\n \"minimal-snowplow-tracker~=0.0.2\",\n \"dbt-semantic-interfaces~=0.5.0a2\",\n \"dbt-common~=0.1.0\",\n \"dbt-adapters~=0.1.0a2\",\n # ----\n # Expect compatibility with all new versions of these packages, so lower bounds only.\n \"packaging>20.9\",\n \"protobuf>=4.0.0\",\n \"pytz>=2015.7\",\n \"pyyaml>=6.0\",\n \"daff>=1.3.46\",\n \"typing-extensions>=4.4\",\n # ----\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.8\",\n)\n", "path": "core/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport os\nimport sys\n\nif sys.version_info < (3, 8):\n print(\"Error: dbt does not support this version of Python.\")\n print(\"Please upgrade to Python 3.8 or higher.\")\n sys.exit(1)\n\n\nfrom setuptools import setup\n\ntry:\n from setuptools import find_namespace_packages\nexcept ImportError:\n # the user has a downlevel version of setuptools.\n print(\"Error: dbt requires setuptools v40.1.0 or higher.\")\n print('Please upgrade setuptools with \"pip install --upgrade setuptools\" ' \"and try again\")\n sys.exit(1)\n\n\nthis_directory = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(this_directory, \"README.md\")) as f:\n long_description = f.read()\n\n\npackage_name = \"dbt-core\"\npackage_version = \"1.8.0a1\"\ndescription = \"\"\"With dbt, data analysts and engineers can build analytics \\\nthe way engineers build applications.\"\"\"\n\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"dbt Labs\",\n author_email=\"[email protected]\",\n url=\"https://github.com/dbt-labs/dbt-core\",\n packages=find_namespace_packages(include=[\"dbt\", \"dbt.*\"]),\n include_package_data=True,\n test_suite=\"test\",\n entry_points={\n \"console_scripts\": [\"dbt = dbt.cli.main:cli\"],\n },\n install_requires=[\n # ----\n # dbt-core uses these packages deeply, throughout the codebase, and there have been breaking changes in past patch releases (even though these are major-version-one).\n # Pin to the patch or minor version, and bump in each new minor version of dbt-core.\n \"agate~=1.7.0\",\n \"Jinja2~=3.1.2\",\n \"mashumaro[msgpack]~=3.9\",\n # ----\n # Legacy: This package has not been updated since 2019, and it is unused in dbt's logging system (since v1.0)\n # The dependency here will be removed along with the removal of 'legacy logging', in a future release of dbt-core\n \"logbook>=1.5,<1.6\",\n # ----\n # dbt-core uses these packages in standard ways. Pin to the major version, and check compatibility\n # with major versions in each new minor version of dbt-core.\n \"click>=8.0.2,<9\",\n \"networkx>=2.3,<4\",\n \"requests<3.0.0\", # should match dbt-common\n # ----\n # These packages are major-version-0. Keep upper bounds on upcoming minor versions (which could have breaking changes)\n # and check compatibility / bump in each new minor version of dbt-core.\n \"pathspec>=0.9,<0.12\",\n \"sqlparse>=0.2.3,<0.5\",\n # ----\n # These are major-version-0 packages also maintained by dbt-labs. Accept patches.\n \"dbt-extractor~=0.5.0\",\n \"minimal-snowplow-tracker~=0.0.2\",\n \"dbt-semantic-interfaces~=0.5.0a2\",\n \"dbt-common~=0.1.0\",\n \"dbt-adapters~=0.1.0a2\",\n # ----\n # Expect compatibility with all new versions of these packages, so lower bounds only.\n \"packaging>20.9\",\n \"protobuf>=4.0.0\",\n \"pytz>=2015.7\",\n \"pyyaml>=6.0\",\n \"daff>=1.3.46\",\n \"typing-extensions>=4.4\",\n # ----\n ],\n zip_safe=False,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3.11\",\n ],\n python_requires=\">=3.8\",\n)\n", "path": "core/setup.py"}]}
1,608
138
gh_patches_debug_40781
rasdani/github-patches
git_diff
bokeh__bokeh-6812
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Inline, Minified Resources do not work in classic notebooks This is due to an interaction with the classic notebooks use of JQuery, when output is published as `text/html`. New notebook code published a div and a script together as `text/html`. Propose to solve by publishing a single script as `application/javascript` (which should work) that creates the necessary div itself --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bokeh/util/notebook.py` Content: ``` 1 ''' Functions useful for loading Bokeh code and data in Jupyter/Zeppelin notebooks. 2 3 ''' 4 from __future__ import absolute_import 5 6 from IPython.display import publish_display_data 7 8 from ..embed import _wrap_in_script_tag 9 10 LOAD_MIME_TYPE = 'application/vnd.bokehjs_load.v0+json' 11 EXEC_MIME_TYPE = 'application/vnd.bokehjs_exec.v0+json' 12 13 _notebook_loaded = None 14 15 # TODO (bev) notebook_type and zeppelin bits should be removed after external zeppelin hook available 16 def load_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000, notebook_type='jupyter'): 17 ''' Prepare the IPython notebook for displaying Bokeh plots. 18 19 Args: 20 resources (Resource, optional) : 21 how and where to load BokehJS from (default: CDN) 22 23 verbose (bool, optional) : 24 whether to report detailed settings (default: False) 25 26 hide_banner (bool, optional): 27 whether to hide the Bokeh banner (default: False) 28 29 load_timeout (int, optional) : 30 Timeout in milliseconds when plots assume load timed out (default: 5000) 31 32 notebook_type (string): 33 notebook_type (default: jupyter) 34 35 .. warning:: 36 Clearing the output cell containing the published BokehJS 37 resources HTML code may cause Bokeh CSS styling to be removed. 38 39 Returns: 40 None 41 42 ''' 43 nb_html, nb_js = _load_notebook_html(resources, verbose, hide_banner, load_timeout) 44 lab_html, lab_js = _load_notebook_html(resources, verbose, hide_banner, load_timeout, register_mimetype=False) 45 if notebook_type=='jupyter': 46 publish_display_data({'text/html': nb_html + _wrap_in_script_tag(nb_js), 47 LOAD_MIME_TYPE: {"script": lab_js, "div": lab_html}}) 48 else: 49 _publish_zeppelin_data(lab_html, lab_js) 50 51 52 FINALIZE_JS = """ 53 document.getElementById("%s").textContent = "BokehJS is loading..."; 54 """ 55 56 # TODO (bev) This will eventually go away 57 def _publish_zeppelin_data(html, js): 58 print('%html ' + html) 59 print('%html ' + '<script type="text/javascript">' + js + "</script>") 60 61 def _load_notebook_html(resources=None, verbose=False, hide_banner=False, 62 load_timeout=5000, register_mimetype=True): 63 global _notebook_loaded 64 65 from .. import __version__ 66 from ..core.templates import AUTOLOAD_NB_JS, NOTEBOOK_LOAD 67 from ..util.serialization import make_id 68 from ..util.compiler import bundle_all_models 69 from ..resources import CDN 70 71 if resources is None: 72 resources = CDN 73 74 if resources.mode == 'inline': 75 js_info = 'inline' 76 css_info = 'inline' 77 else: 78 js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files 79 css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files 80 81 warnings = ["Warning: " + msg['text'] for msg in resources.messages if msg['type'] == 'warn'] 82 83 if _notebook_loaded and verbose: 84 warnings.append('Warning: BokehJS previously loaded') 85 86 _notebook_loaded = resources 87 88 element_id = make_id() 89 90 html = NOTEBOOK_LOAD.render( 91 element_id = element_id, 92 verbose = verbose, 93 js_info = js_info, 94 css_info = css_info, 95 bokeh_version = __version__, 96 warnings = warnings, 97 hide_banner = hide_banner, 98 ) 99 100 custom_models_js = bundle_all_models() 101 102 js = AUTOLOAD_NB_JS.render( 103 elementid = '' if hide_banner else element_id, 104 js_urls = resources.js_files, 105 css_urls = resources.css_files, 106 js_raw = resources.js_raw + [custom_models_js] + ([] if hide_banner else [FINALIZE_JS % element_id]), 107 css_raw = resources.css_raw_str, 108 force = True, 109 timeout = load_timeout, 110 register_mimetype = register_mimetype 111 ) 112 113 return html, js 114 115 def get_comms(target_name): 116 ''' Create a Jupyter comms object for a specific target, that can 117 be used to update Bokeh documents in the Jupyter notebook. 118 119 Args: 120 target_name (str) : the target name the Comms object should connect to 121 122 Returns 123 Jupyter Comms 124 125 ''' 126 from ipykernel.comm import Comm 127 return Comm(target_name=target_name, data={}) 128 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bokeh/util/notebook.py b/bokeh/util/notebook.py --- a/bokeh/util/notebook.py +++ b/bokeh/util/notebook.py @@ -5,8 +5,7 @@ from IPython.display import publish_display_data -from ..embed import _wrap_in_script_tag - +JS_MIME_TYPE = 'application/javascript' LOAD_MIME_TYPE = 'application/vnd.bokehjs_load.v0+json' EXEC_MIME_TYPE = 'application/vnd.bokehjs_exec.v0+json' @@ -40,33 +39,14 @@ None ''' - nb_html, nb_js = _load_notebook_html(resources, verbose, hide_banner, load_timeout) - lab_html, lab_js = _load_notebook_html(resources, verbose, hide_banner, load_timeout, register_mimetype=False) - if notebook_type=='jupyter': - publish_display_data({'text/html': nb_html + _wrap_in_script_tag(nb_js), - LOAD_MIME_TYPE: {"script": lab_js, "div": lab_html}}) - else: - _publish_zeppelin_data(lab_html, lab_js) - -FINALIZE_JS = """ -document.getElementById("%s").textContent = "BokehJS is loading..."; -""" - -# TODO (bev) This will eventually go away -def _publish_zeppelin_data(html, js): - print('%html ' + html) - print('%html ' + '<script type="text/javascript">' + js + "</script>") - -def _load_notebook_html(resources=None, verbose=False, hide_banner=False, - load_timeout=5000, register_mimetype=True): global _notebook_loaded from .. import __version__ - from ..core.templates import AUTOLOAD_NB_JS, NOTEBOOK_LOAD + from ..core.templates import NOTEBOOK_LOAD from ..util.serialization import make_id - from ..util.compiler import bundle_all_models from ..resources import CDN + from ..util.compiler import bundle_all_models if resources is None: resources = CDN @@ -99,18 +79,48 @@ custom_models_js = bundle_all_models() + nb_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=True) + jl_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=False) + + if notebook_type=='jupyter': + + if not hide_banner: + publish_display_data({'text/html': html}) + + publish_display_data({ + JS_MIME_TYPE : nb_js, + LOAD_MIME_TYPE : {"script": jl_js} + }) + + else: + _publish_zeppelin_data(html, jl_js) + + +FINALIZE_JS = """ +document.getElementById("%s").textContent = "BokehJS is loading..."; +""" + +# TODO (bev) This will eventually go away +def _publish_zeppelin_data(html, js): + print('%html ' + html) + print('%html ' + '<script type="text/javascript">' + js + "</script>") + +def _loading_js(resources, element_id, custom_models_js, load_timeout=5000, register_mime=True): + + from ..core.templates import AUTOLOAD_NB_JS + js = AUTOLOAD_NB_JS.render( - elementid = '' if hide_banner else element_id, - js_urls = resources.js_files, - css_urls = resources.css_files, - js_raw = resources.js_raw + [custom_models_js] + ([] if hide_banner else [FINALIZE_JS % element_id]), - css_raw = resources.css_raw_str, - force = True, - timeout = load_timeout, - register_mimetype = register_mimetype + elementid = element_id, + js_urls = resources.js_files, + css_urls = resources.css_files, + js_raw = resources.js_raw + [custom_models_js] + [FINALIZE_JS % element_id], + css_raw = resources.css_raw_str, + force = True, + timeout = load_timeout, + register_mime = register_mime ) - return html, js + return js def get_comms(target_name): ''' Create a Jupyter comms object for a specific target, that can
{"golden_diff": "diff --git a/bokeh/util/notebook.py b/bokeh/util/notebook.py\n--- a/bokeh/util/notebook.py\n+++ b/bokeh/util/notebook.py\n@@ -5,8 +5,7 @@\n \n from IPython.display import publish_display_data\n \n-from ..embed import _wrap_in_script_tag\n-\n+JS_MIME_TYPE = 'application/javascript'\n LOAD_MIME_TYPE = 'application/vnd.bokehjs_load.v0+json'\n EXEC_MIME_TYPE = 'application/vnd.bokehjs_exec.v0+json'\n \n@@ -40,33 +39,14 @@\n None\n \n '''\n- nb_html, nb_js = _load_notebook_html(resources, verbose, hide_banner, load_timeout)\n- lab_html, lab_js = _load_notebook_html(resources, verbose, hide_banner, load_timeout, register_mimetype=False)\n- if notebook_type=='jupyter':\n- publish_display_data({'text/html': nb_html + _wrap_in_script_tag(nb_js),\n- LOAD_MIME_TYPE: {\"script\": lab_js, \"div\": lab_html}})\n- else:\n- _publish_zeppelin_data(lab_html, lab_js)\n \n-\n-FINALIZE_JS = \"\"\"\n-document.getElementById(\"%s\").textContent = \"BokehJS is loading...\";\n-\"\"\"\n-\n-# TODO (bev) This will eventually go away\n-def _publish_zeppelin_data(html, js):\n- print('%html ' + html)\n- print('%html ' + '<script type=\"text/javascript\">' + js + \"</script>\")\n-\n-def _load_notebook_html(resources=None, verbose=False, hide_banner=False,\n- load_timeout=5000, register_mimetype=True):\n global _notebook_loaded\n \n from .. import __version__\n- from ..core.templates import AUTOLOAD_NB_JS, NOTEBOOK_LOAD\n+ from ..core.templates import NOTEBOOK_LOAD\n from ..util.serialization import make_id\n- from ..util.compiler import bundle_all_models\n from ..resources import CDN\n+ from ..util.compiler import bundle_all_models\n \n if resources is None:\n resources = CDN\n@@ -99,18 +79,48 @@\n \n custom_models_js = bundle_all_models()\n \n+ nb_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=True)\n+ jl_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=False)\n+\n+ if notebook_type=='jupyter':\n+\n+ if not hide_banner:\n+ publish_display_data({'text/html': html})\n+\n+ publish_display_data({\n+ JS_MIME_TYPE : nb_js,\n+ LOAD_MIME_TYPE : {\"script\": jl_js}\n+ })\n+\n+ else:\n+ _publish_zeppelin_data(html, jl_js)\n+\n+\n+FINALIZE_JS = \"\"\"\n+document.getElementById(\"%s\").textContent = \"BokehJS is loading...\";\n+\"\"\"\n+\n+# TODO (bev) This will eventually go away\n+def _publish_zeppelin_data(html, js):\n+ print('%html ' + html)\n+ print('%html ' + '<script type=\"text/javascript\">' + js + \"</script>\")\n+\n+def _loading_js(resources, element_id, custom_models_js, load_timeout=5000, register_mime=True):\n+\n+ from ..core.templates import AUTOLOAD_NB_JS\n+\n js = AUTOLOAD_NB_JS.render(\n- elementid = '' if hide_banner else element_id,\n- js_urls = resources.js_files,\n- css_urls = resources.css_files,\n- js_raw = resources.js_raw + [custom_models_js] + ([] if hide_banner else [FINALIZE_JS % element_id]),\n- css_raw = resources.css_raw_str,\n- force = True,\n- timeout = load_timeout,\n- register_mimetype = register_mimetype\n+ elementid = element_id,\n+ js_urls = resources.js_files,\n+ css_urls = resources.css_files,\n+ js_raw = resources.js_raw + [custom_models_js] + [FINALIZE_JS % element_id],\n+ css_raw = resources.css_raw_str,\n+ force = True,\n+ timeout = load_timeout,\n+ register_mime = register_mime\n )\n \n- return html, js\n+ return js\n \n def get_comms(target_name):\n ''' Create a Jupyter comms object for a specific target, that can\n", "issue": "Inline, Minified Resources do not work in classic notebooks\nThis is due to an interaction with the classic notebooks use of JQuery, when output is published as `text/html`. New notebook code published a div and a script together as `text/html`. Propose to solve by publishing a single script as `application/javascript` (which should work) that creates the necessary div itself \n", "before_files": [{"content": "''' Functions useful for loading Bokeh code and data in Jupyter/Zeppelin notebooks.\n\n'''\nfrom __future__ import absolute_import\n\nfrom IPython.display import publish_display_data\n\nfrom ..embed import _wrap_in_script_tag\n\nLOAD_MIME_TYPE = 'application/vnd.bokehjs_load.v0+json'\nEXEC_MIME_TYPE = 'application/vnd.bokehjs_exec.v0+json'\n\n_notebook_loaded = None\n\n# TODO (bev) notebook_type and zeppelin bits should be removed after external zeppelin hook available\ndef load_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000, notebook_type='jupyter'):\n ''' Prepare the IPython notebook for displaying Bokeh plots.\n\n Args:\n resources (Resource, optional) :\n how and where to load BokehJS from (default: CDN)\n\n verbose (bool, optional) :\n whether to report detailed settings (default: False)\n\n hide_banner (bool, optional):\n whether to hide the Bokeh banner (default: False)\n\n load_timeout (int, optional) :\n Timeout in milliseconds when plots assume load timed out (default: 5000)\n\n notebook_type (string):\n notebook_type (default: jupyter)\n\n .. warning::\n Clearing the output cell containing the published BokehJS\n resources HTML code may cause Bokeh CSS styling to be removed.\n\n Returns:\n None\n\n '''\n nb_html, nb_js = _load_notebook_html(resources, verbose, hide_banner, load_timeout)\n lab_html, lab_js = _load_notebook_html(resources, verbose, hide_banner, load_timeout, register_mimetype=False)\n if notebook_type=='jupyter':\n publish_display_data({'text/html': nb_html + _wrap_in_script_tag(nb_js),\n LOAD_MIME_TYPE: {\"script\": lab_js, \"div\": lab_html}})\n else:\n _publish_zeppelin_data(lab_html, lab_js)\n\n\nFINALIZE_JS = \"\"\"\ndocument.getElementById(\"%s\").textContent = \"BokehJS is loading...\";\n\"\"\"\n\n# TODO (bev) This will eventually go away\ndef _publish_zeppelin_data(html, js):\n print('%html ' + html)\n print('%html ' + '<script type=\"text/javascript\">' + js + \"</script>\")\n\ndef _load_notebook_html(resources=None, verbose=False, hide_banner=False,\n load_timeout=5000, register_mimetype=True):\n global _notebook_loaded\n\n from .. import __version__\n from ..core.templates import AUTOLOAD_NB_JS, NOTEBOOK_LOAD\n from ..util.serialization import make_id\n from ..util.compiler import bundle_all_models\n from ..resources import CDN\n\n if resources is None:\n resources = CDN\n\n if resources.mode == 'inline':\n js_info = 'inline'\n css_info = 'inline'\n else:\n js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files\n css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files\n\n warnings = [\"Warning: \" + msg['text'] for msg in resources.messages if msg['type'] == 'warn']\n\n if _notebook_loaded and verbose:\n warnings.append('Warning: BokehJS previously loaded')\n\n _notebook_loaded = resources\n\n element_id = make_id()\n\n html = NOTEBOOK_LOAD.render(\n element_id = element_id,\n verbose = verbose,\n js_info = js_info,\n css_info = css_info,\n bokeh_version = __version__,\n warnings = warnings,\n hide_banner = hide_banner,\n )\n\n custom_models_js = bundle_all_models()\n\n js = AUTOLOAD_NB_JS.render(\n elementid = '' if hide_banner else element_id,\n js_urls = resources.js_files,\n css_urls = resources.css_files,\n js_raw = resources.js_raw + [custom_models_js] + ([] if hide_banner else [FINALIZE_JS % element_id]),\n css_raw = resources.css_raw_str,\n force = True,\n timeout = load_timeout,\n register_mimetype = register_mimetype\n )\n\n return html, js\n\ndef get_comms(target_name):\n ''' Create a Jupyter comms object for a specific target, that can\n be used to update Bokeh documents in the Jupyter notebook.\n\n Args:\n target_name (str) : the target name the Comms object should connect to\n\n Returns\n Jupyter Comms\n\n '''\n from ipykernel.comm import Comm\n return Comm(target_name=target_name, data={})\n", "path": "bokeh/util/notebook.py"}], "after_files": [{"content": "''' Functions useful for loading Bokeh code and data in Jupyter/Zeppelin notebooks.\n\n'''\nfrom __future__ import absolute_import\n\nfrom IPython.display import publish_display_data\n\nJS_MIME_TYPE = 'application/javascript'\nLOAD_MIME_TYPE = 'application/vnd.bokehjs_load.v0+json'\nEXEC_MIME_TYPE = 'application/vnd.bokehjs_exec.v0+json'\n\n_notebook_loaded = None\n\n# TODO (bev) notebook_type and zeppelin bits should be removed after external zeppelin hook available\ndef load_notebook(resources=None, verbose=False, hide_banner=False, load_timeout=5000, notebook_type='jupyter'):\n ''' Prepare the IPython notebook for displaying Bokeh plots.\n\n Args:\n resources (Resource, optional) :\n how and where to load BokehJS from (default: CDN)\n\n verbose (bool, optional) :\n whether to report detailed settings (default: False)\n\n hide_banner (bool, optional):\n whether to hide the Bokeh banner (default: False)\n\n load_timeout (int, optional) :\n Timeout in milliseconds when plots assume load timed out (default: 5000)\n\n notebook_type (string):\n notebook_type (default: jupyter)\n\n .. warning::\n Clearing the output cell containing the published BokehJS\n resources HTML code may cause Bokeh CSS styling to be removed.\n\n Returns:\n None\n\n '''\n\n global _notebook_loaded\n\n from .. import __version__\n from ..core.templates import NOTEBOOK_LOAD\n from ..util.serialization import make_id\n from ..resources import CDN\n from ..util.compiler import bundle_all_models\n\n if resources is None:\n resources = CDN\n\n if resources.mode == 'inline':\n js_info = 'inline'\n css_info = 'inline'\n else:\n js_info = resources.js_files[0] if len(resources.js_files) == 1 else resources.js_files\n css_info = resources.css_files[0] if len(resources.css_files) == 1 else resources.css_files\n\n warnings = [\"Warning: \" + msg['text'] for msg in resources.messages if msg['type'] == 'warn']\n\n if _notebook_loaded and verbose:\n warnings.append('Warning: BokehJS previously loaded')\n\n _notebook_loaded = resources\n\n element_id = make_id()\n\n html = NOTEBOOK_LOAD.render(\n element_id = element_id,\n verbose = verbose,\n js_info = js_info,\n css_info = css_info,\n bokeh_version = __version__,\n warnings = warnings,\n hide_banner = hide_banner,\n )\n\n custom_models_js = bundle_all_models()\n\n nb_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=True)\n jl_js = _loading_js(resources, element_id, custom_models_js, load_timeout, register_mime=False)\n\n if notebook_type=='jupyter':\n\n if not hide_banner:\n publish_display_data({'text/html': html})\n\n publish_display_data({\n JS_MIME_TYPE : nb_js,\n LOAD_MIME_TYPE : {\"script\": jl_js}\n })\n\n else:\n _publish_zeppelin_data(html, jl_js)\n\n\nFINALIZE_JS = \"\"\"\ndocument.getElementById(\"%s\").textContent = \"BokehJS is loading...\";\n\"\"\"\n\n# TODO (bev) This will eventually go away\ndef _publish_zeppelin_data(html, js):\n print('%html ' + html)\n print('%html ' + '<script type=\"text/javascript\">' + js + \"</script>\")\n\ndef _loading_js(resources, element_id, custom_models_js, load_timeout=5000, register_mime=True):\n\n from ..core.templates import AUTOLOAD_NB_JS\n\n js = AUTOLOAD_NB_JS.render(\n elementid = element_id,\n js_urls = resources.js_files,\n css_urls = resources.css_files,\n js_raw = resources.js_raw + [custom_models_js] + [FINALIZE_JS % element_id],\n css_raw = resources.css_raw_str,\n force = True,\n timeout = load_timeout,\n register_mime = register_mime\n )\n\n return js\n\ndef get_comms(target_name):\n ''' Create a Jupyter comms object for a specific target, that can\n be used to update Bokeh documents in the Jupyter notebook.\n\n Args:\n target_name (str) : the target name the Comms object should connect to\n\n Returns\n Jupyter Comms\n\n '''\n from ipykernel.comm import Comm\n return Comm(target_name=target_name, data={})\n", "path": "bokeh/util/notebook.py"}]}
1,623
963
gh_patches_debug_59717
rasdani/github-patches
git_diff
pytorch__audio-1339
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Making `AudioMetaData` print friendly `AudioMetaData` class reports meta-data of audio source. It is however not print friendly. ```python print(torchaudio.info(src)) >>> <torchaudio.backend.common.AudioMetaData object at 0x7f1bc5cd2890> ``` It is nice if we can simply print the attributes like `dataclass` objects do. ```python print(torchaudio.info(src)) >>> AudioMetaData(sample_rate=900, encoding="PCM", ...) ``` ## Steps There are two approaches I can think of 1. Add `__str__` method. 2. Use `dataclasses.dataclass` For 2, the `info` function has to be TorchScript-compatible. This means that its return type `AudioMetaData` has to be TorchScript-able. For this reason, `dataclass` might not be applicable. This can be checked with the following test; ```bash (cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py) ``` ## Build and test Please refer to the [contribution guide](https://github.com/pytorch/audio/blob/master/CONTRIBUTING.md) for how to setup development environment. To test, ```bash (cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py torchaudio_unittest/backend/sox_io/info_test.py torchaudio_unittest/backend/soundfile_io/info_test.py) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `torchaudio/backend/common.py` Content: ``` 1 class AudioMetaData: 2 """Return type of ``torchaudio.info`` function. 3 4 This class is used by :ref:`"sox_io" backend<sox_io_backend>` and 5 :ref:`"soundfile" backend with the new interface<soundfile_backend>`. 6 7 :ivar int sample_rate: Sample rate 8 :ivar int num_frames: The number of frames 9 :ivar int num_channels: The number of channels 10 :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats, 11 or when it cannot be accurately inferred. 12 :ivar str encoding: Audio encoding 13 The values encoding can take are one of the following: 14 15 * ``PCM_S``: Signed integer linear PCM 16 * ``PCM_U``: Unsigned integer linear PCM 17 * ``PCM_F``: Floating point linear PCM 18 * ``FLAC``: Flac, Free Lossless Audio Codec 19 * ``ULAW``: Mu-law 20 * ``ALAW``: A-law 21 * ``MP3`` : MP3, MPEG-1 Audio Layer III 22 * ``VORBIS``: OGG Vorbis 23 * ``AMR_WB``: Adaptive Multi-Rate 24 * ``AMR_NB``: Adaptive Multi-Rate Wideband 25 * ``OPUS``: Opus 26 * ``UNKNOWN`` : None of above 27 """ 28 def __init__( 29 self, 30 sample_rate: int, 31 num_frames: int, 32 num_channels: int, 33 bits_per_sample: int, 34 encoding: str, 35 ): 36 self.sample_rate = sample_rate 37 self.num_frames = num_frames 38 self.num_channels = num_channels 39 self.bits_per_sample = bits_per_sample 40 self.encoding = encoding 41 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/torchaudio/backend/common.py b/torchaudio/backend/common.py --- a/torchaudio/backend/common.py +++ b/torchaudio/backend/common.py @@ -38,3 +38,14 @@ self.num_channels = num_channels self.bits_per_sample = bits_per_sample self.encoding = encoding + + def __str__(self): + return ( + f"AudioMetaData(" + f"sample_rate={self.sample_rate}, " + f"num_frames={self.num_frames}, " + f"num_channels={self.num_channels}, " + f"bits_per_sample={self.bits_per_sample}, " + f"encoding={self.encoding}" + f")" + )
{"golden_diff": "diff --git a/torchaudio/backend/common.py b/torchaudio/backend/common.py\n--- a/torchaudio/backend/common.py\n+++ b/torchaudio/backend/common.py\n@@ -38,3 +38,14 @@\n self.num_channels = num_channels\n self.bits_per_sample = bits_per_sample\n self.encoding = encoding\n+\n+ def __str__(self):\n+ return (\n+ f\"AudioMetaData(\"\n+ f\"sample_rate={self.sample_rate}, \"\n+ f\"num_frames={self.num_frames}, \"\n+ f\"num_channels={self.num_channels}, \"\n+ f\"bits_per_sample={self.bits_per_sample}, \"\n+ f\"encoding={self.encoding}\"\n+ f\")\"\n+ )\n", "issue": "Making `AudioMetaData` print friendly\n`AudioMetaData` class reports meta-data of audio source. It is however not print friendly.\r\n\r\n```python\r\nprint(torchaudio.info(src))\r\n>>> <torchaudio.backend.common.AudioMetaData object at 0x7f1bc5cd2890>\r\n```\r\n\r\nIt is nice if we can simply print the attributes like `dataclass` objects do.\r\n\r\n```python\r\nprint(torchaudio.info(src))\r\n>>> AudioMetaData(sample_rate=900, encoding=\"PCM\", ...)\r\n```\r\n\r\n## Steps\r\n\r\nThere are two approaches I can think of\r\n1. Add `__str__` method.\r\n2. Use `dataclasses.dataclass`\r\n\r\nFor 2, the `info` function has to be TorchScript-compatible. This means that its return type `AudioMetaData` has to be TorchScript-able. For this reason, `dataclass` might not be applicable. This can be checked with the following test;\r\n\r\n```bash\r\n(cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py)\r\n```\r\n\r\n## Build and test\r\n\r\nPlease refer to the [contribution guide](https://github.com/pytorch/audio/blob/master/CONTRIBUTING.md) for how to setup development environment.\r\n\r\nTo test, \r\n\r\n```bash\r\n(cd test && pytest torchaudio_unittest/backend/sox_io/torchscript_test.py torchaudio_unittest/backend/sox_io/info_test.py torchaudio_unittest/backend/soundfile_io/info_test.py)\r\n```\n", "before_files": [{"content": "class AudioMetaData:\n \"\"\"Return type of ``torchaudio.info`` function.\n\n This class is used by :ref:`\"sox_io\" backend<sox_io_backend>` and\n :ref:`\"soundfile\" backend with the new interface<soundfile_backend>`.\n\n :ivar int sample_rate: Sample rate\n :ivar int num_frames: The number of frames\n :ivar int num_channels: The number of channels\n :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,\n or when it cannot be accurately inferred.\n :ivar str encoding: Audio encoding\n The values encoding can take are one of the following:\n\n * ``PCM_S``: Signed integer linear PCM\n * ``PCM_U``: Unsigned integer linear PCM\n * ``PCM_F``: Floating point linear PCM\n * ``FLAC``: Flac, Free Lossless Audio Codec\n * ``ULAW``: Mu-law\n * ``ALAW``: A-law\n * ``MP3`` : MP3, MPEG-1 Audio Layer III\n * ``VORBIS``: OGG Vorbis\n * ``AMR_WB``: Adaptive Multi-Rate\n * ``AMR_NB``: Adaptive Multi-Rate Wideband\n * ``OPUS``: Opus\n * ``UNKNOWN`` : None of above\n \"\"\"\n def __init__(\n self,\n sample_rate: int,\n num_frames: int,\n num_channels: int,\n bits_per_sample: int,\n encoding: str,\n ):\n self.sample_rate = sample_rate\n self.num_frames = num_frames\n self.num_channels = num_channels\n self.bits_per_sample = bits_per_sample\n self.encoding = encoding\n", "path": "torchaudio/backend/common.py"}], "after_files": [{"content": "class AudioMetaData:\n \"\"\"Return type of ``torchaudio.info`` function.\n\n This class is used by :ref:`\"sox_io\" backend<sox_io_backend>` and\n :ref:`\"soundfile\" backend with the new interface<soundfile_backend>`.\n\n :ivar int sample_rate: Sample rate\n :ivar int num_frames: The number of frames\n :ivar int num_channels: The number of channels\n :ivar int bits_per_sample: The number of bits per sample. This is 0 for lossy formats,\n or when it cannot be accurately inferred.\n :ivar str encoding: Audio encoding\n The values encoding can take are one of the following:\n\n * ``PCM_S``: Signed integer linear PCM\n * ``PCM_U``: Unsigned integer linear PCM\n * ``PCM_F``: Floating point linear PCM\n * ``FLAC``: Flac, Free Lossless Audio Codec\n * ``ULAW``: Mu-law\n * ``ALAW``: A-law\n * ``MP3`` : MP3, MPEG-1 Audio Layer III\n * ``VORBIS``: OGG Vorbis\n * ``AMR_WB``: Adaptive Multi-Rate\n * ``AMR_NB``: Adaptive Multi-Rate Wideband\n * ``OPUS``: Opus\n * ``UNKNOWN`` : None of above\n \"\"\"\n def __init__(\n self,\n sample_rate: int,\n num_frames: int,\n num_channels: int,\n bits_per_sample: int,\n encoding: str,\n ):\n self.sample_rate = sample_rate\n self.num_frames = num_frames\n self.num_channels = num_channels\n self.bits_per_sample = bits_per_sample\n self.encoding = encoding\n\n def __str__(self):\n return (\n f\"AudioMetaData(\"\n f\"sample_rate={self.sample_rate}, \"\n f\"num_frames={self.num_frames}, \"\n f\"num_channels={self.num_channels}, \"\n f\"bits_per_sample={self.bits_per_sample}, \"\n f\"encoding={self.encoding}\"\n f\")\"\n )\n", "path": "torchaudio/backend/common.py"}]}
1,034
163
gh_patches_debug_444
rasdani/github-patches
git_diff
sktime__sktime-170
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Documentation needs to be fixed Single comment line on `sktime.utils.validation.forecasting` needs to be fixed. Check the image below. Instead of `<<<<<<< HEAD:sktime/utils/validation/forecasting.py` we should see `Returns`. ![image](https://user-images.githubusercontent.com/7353520/66719148-2c0d0580-edc2-11e9-9a10-8632c1d896e4.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sktime/utils/validation/forecasting.py` Content: ``` 1 import numpy as np 2 import pandas as pd 3 from sklearn.utils.validation import check_is_fitted 4 5 __author__ = "Markus Löning" 6 __all__ = ["validate_y", "validate_X", "validate_y_X", "validate_fh"] 7 8 9 def validate_y_X(y, X): 10 """Validate input data. 11 12 Parameters 13 ---------- 14 y : pandas Series or numpy ndarray 15 X : pandas DataFrame 16 17 <<<<<<< HEAD:sktime/utils/validation/forecasting.py 18 Returns 19 ------- 20 None 21 22 Raises 23 ------ 24 ValueError 25 If y is an invalid input 26 """ 27 validate_y(y) 28 validate_X(X) 29 30 31 def validate_y(y): 32 """Validate input data. 33 34 Parameters 35 ---------- 36 y : pandas Series or numpy ndarray 37 38 Returns 39 ------- 40 None 41 42 Raises 43 ------ 44 ValueError 45 If y is an invalid input 46 """ 47 # Check if pandas series 48 if not isinstance(y, pd.Series): 49 raise ValueError(f'y must be a pandas Series, but found: {type(y)}') 50 51 # Check if single row 52 if not y.shape[0] == 1: 53 raise ValueError(f'y must consist of a pandas Series with a single row, ' 54 f'but found: {y.shape[0]} rows') 55 56 # Check if contained time series is either pandas series or numpy array 57 s = y.iloc[0] 58 if not isinstance(s, (np.ndarray, pd.Series)): 59 raise ValueError(f'y must contain a pandas Series or numpy array, ' 60 f'but found: {type(s)}.') 61 62 63 def validate_X(X): 64 """Validate input data. 65 66 Parameters 67 ---------- 68 X : pandas DataFrame 69 70 Returns 71 ------- 72 None 73 74 Raises 75 ------ 76 ValueError 77 If y is an invalid input 78 """ 79 if X is not None: 80 if not isinstance(X, pd.DataFrame): 81 raise ValueError(f"`X` must a pandas DataFrame, but found: {type(X)}") 82 if X.shape[0] > 1: 83 raise ValueError(f"`X` must consist of a single row, but found: {X.shape[0]} rows") 84 85 # Check if index is the same for all columns. 86 87 # Get index from first row, can be either pd.Series or np.array. 88 first_index = X.iloc[0, 0].index if hasattr(X.iloc[0, 0], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0]) 89 90 # Series must contain at least 2 observations, otherwise should be primitive. 91 if len(first_index) < 1: 92 raise ValueError(f'Time series must contain at least 2 observations, but found: ' 93 f'{len(first_index)} observations in column: {X.columns[0]}') 94 95 # Compare with remaining columns 96 for c, col in enumerate(X.columns): 97 index = X.iloc[0, c].index if hasattr(X.iloc[0, c], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0]) 98 if not np.array_equal(first_index, index): 99 raise ValueError(f'Found time series with unequal index in column {col}. ' 100 f'Input time-series must have the same index.') 101 102 103 def validate_sp(sp): 104 """Validate seasonal periodicity. 105 106 Parameters 107 ---------- 108 sp : int 109 Seasonal periodicity 110 111 Returns 112 ------- 113 sp : int 114 Validated seasonal periodicity 115 """ 116 117 if sp is None: 118 return sp 119 120 else: 121 if not isinstance(sp, int) and (sp >= 0): 122 raise ValueError(f"Seasonal periodicity (sp) has to be a positive integer, but found: " 123 f"{sp} of type: {type(sp)}") 124 return sp 125 126 127 def validate_fh(fh): 128 """Validate forecasting horizon. 129 130 Parameters 131 ---------- 132 fh : int or list of int 133 Forecasting horizon with steps ahead to predict. 134 135 Returns 136 ------- 137 fh : numpy array of int 138 Sorted and validated forecasting horizon. 139 """ 140 141 # Check single integer 142 if np.issubdtype(type(fh), np.integer): 143 return np.array([fh], dtype=np.int) 144 145 # Check array-like input 146 elif isinstance(fh, list): 147 if len(fh) < 1: 148 raise ValueError(f"`fh` must specify at least one step, but found: " 149 f"{type(fh)} of length {len(fh)}") 150 if not np.all([np.issubdtype(type(h), np.integer) for h in fh]): 151 raise ValueError('If `fh` is passed as a list, ' 152 'it has to be a list of integers') 153 154 elif isinstance(fh, np.ndarray): 155 if fh.ndim > 1: 156 raise ValueError(f"`fh` must be a 1d array, but found: " 157 f"{fh.ndim} dimensions") 158 if len(fh) < 1: 159 raise ValueError(f"`fh` must specify at least one step, but found: " 160 f"{type(fh)} of length {len(fh)}") 161 if not np.issubdtype(fh.dtype, np.integer): 162 raise ValueError( 163 f'If `fh` is passed as an array, it has to be an array of ' 164 f'integers, but found an array of dtype: {fh.dtype}') 165 166 else: 167 raise ValueError(f"`fh` has to be either a list or array of integers, or a single " 168 f"integer, but found: {type(fh)}") 169 170 return np.asarray(np.sort(fh), dtype=np.int) 171 172 173 def check_is_fitted_in_transform(estimator, attributes, msg=None, all_or_any=all): 174 """Checks if the estimator is fitted during transform by verifying the presence of 175 "all_or_any" of the passed attributes and raises a NotFittedError with the 176 given message. 177 178 Parameters 179 ---------- 180 estimator : estimator instance. 181 estimator instance for which the check is performed. 182 attributes : attribute name(s) given as string or a list/tuple of strings 183 Eg.: 184 ``["coef_", "estimator_", ...], "coef_"`` 185 msg : string 186 The default error message is, "This %(name)s instance is not fitted 187 yet. Call 'fit' with appropriate arguments before using this method." 188 For custom messages if "%(name)s" is present in the message string, 189 it is substituted for the estimator name. 190 Eg. : "Estimator, %(name)s, must be fitted before sparsifying". 191 all_or_any : callable, {all, any}, default all 192 Specify whether all or any of the given attributes must exist. 193 Returns 194 ------- 195 None 196 197 Raises 198 ------ 199 NotFittedError 200 If the attributes are not found. 201 """ 202 if msg is None: 203 msg = ("This %(name)s instance has not been fitted yet. Call 'transform' with " 204 "appropriate arguments before using this method.") 205 206 check_is_fitted(estimator, attributes=attributes, msg=msg, all_or_any=all_or_any) 207 208 209 def validate_time_index(time_index): 210 """Validate time index 211 212 Parameters 213 ---------- 214 time_index : array-like 215 216 Returns 217 ------- 218 time_index : ndarray 219 """ 220 # period or datetime index are not support yet 221 # TODO add support for period/datetime indexing 222 if isinstance(time_index, (pd.PeriodIndex, pd.DatetimeIndex)): 223 raise NotImplementedError(f"{type(time_index)} is not fully supported yet, " 224 f"use pandas RangeIndex instead") 225 226 return np.asarray(time_index) 227 228 229 def check_consistent_time_indices(x, y): 230 """Check that x and y have consistent indices. 231 232 Parameters 233 ---------- 234 x : pandas Series 235 y : pandas Series 236 237 Raises: 238 ------- 239 ValueError 240 If time indicies are not equal 241 """ 242 243 if not x.index.equals(y.index): 244 raise ValueError(f"Found input variables with inconsistent indices") 245 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sktime/utils/validation/forecasting.py b/sktime/utils/validation/forecasting.py --- a/sktime/utils/validation/forecasting.py +++ b/sktime/utils/validation/forecasting.py @@ -14,7 +14,6 @@ y : pandas Series or numpy ndarray X : pandas DataFrame -<<<<<<< HEAD:sktime/utils/validation/forecasting.py Returns ------- None
{"golden_diff": "diff --git a/sktime/utils/validation/forecasting.py b/sktime/utils/validation/forecasting.py\n--- a/sktime/utils/validation/forecasting.py\n+++ b/sktime/utils/validation/forecasting.py\n@@ -14,7 +14,6 @@\n y : pandas Series or numpy ndarray\n X : pandas DataFrame\n \n-<<<<<<< HEAD:sktime/utils/validation/forecasting.py\n Returns\n -------\n None\n", "issue": "Documentation needs to be fixed \nSingle comment line on `sktime.utils.validation.forecasting` needs to be fixed. Check the image below.\r\n\r\nInstead of `<<<<<<< HEAD:sktime/utils/validation/forecasting.py` we should see `Returns`.\r\n \r\n![image](https://user-images.githubusercontent.com/7353520/66719148-2c0d0580-edc2-11e9-9a10-8632c1d896e4.png)\n", "before_files": [{"content": "import numpy as np\nimport pandas as pd\nfrom sklearn.utils.validation import check_is_fitted\n\n__author__ = \"Markus L\u00f6ning\"\n__all__ = [\"validate_y\", \"validate_X\", \"validate_y_X\", \"validate_fh\"]\n\n\ndef validate_y_X(y, X):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n y : pandas Series or numpy ndarray\n X : pandas DataFrame\n\n<<<<<<< HEAD:sktime/utils/validation/forecasting.py\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n validate_y(y)\n validate_X(X)\n\n\ndef validate_y(y):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n y : pandas Series or numpy ndarray\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n # Check if pandas series\n if not isinstance(y, pd.Series):\n raise ValueError(f'y must be a pandas Series, but found: {type(y)}')\n\n # Check if single row\n if not y.shape[0] == 1:\n raise ValueError(f'y must consist of a pandas Series with a single row, '\n f'but found: {y.shape[0]} rows')\n\n # Check if contained time series is either pandas series or numpy array\n s = y.iloc[0]\n if not isinstance(s, (np.ndarray, pd.Series)):\n raise ValueError(f'y must contain a pandas Series or numpy array, '\n f'but found: {type(s)}.')\n\n\ndef validate_X(X):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n X : pandas DataFrame\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n if X is not None:\n if not isinstance(X, pd.DataFrame):\n raise ValueError(f\"`X` must a pandas DataFrame, but found: {type(X)}\")\n if X.shape[0] > 1:\n raise ValueError(f\"`X` must consist of a single row, but found: {X.shape[0]} rows\")\n\n # Check if index is the same for all columns.\n\n # Get index from first row, can be either pd.Series or np.array.\n first_index = X.iloc[0, 0].index if hasattr(X.iloc[0, 0], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0])\n\n # Series must contain at least 2 observations, otherwise should be primitive.\n if len(first_index) < 1:\n raise ValueError(f'Time series must contain at least 2 observations, but found: '\n f'{len(first_index)} observations in column: {X.columns[0]}')\n\n # Compare with remaining columns\n for c, col in enumerate(X.columns):\n index = X.iloc[0, c].index if hasattr(X.iloc[0, c], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0])\n if not np.array_equal(first_index, index):\n raise ValueError(f'Found time series with unequal index in column {col}. '\n f'Input time-series must have the same index.')\n\n\ndef validate_sp(sp):\n \"\"\"Validate seasonal periodicity.\n\n Parameters\n ----------\n sp : int\n Seasonal periodicity\n\n Returns\n -------\n sp : int\n Validated seasonal periodicity\n \"\"\"\n\n if sp is None:\n return sp\n\n else:\n if not isinstance(sp, int) and (sp >= 0):\n raise ValueError(f\"Seasonal periodicity (sp) has to be a positive integer, but found: \"\n f\"{sp} of type: {type(sp)}\")\n return sp\n\n\ndef validate_fh(fh):\n \"\"\"Validate forecasting horizon.\n\n Parameters\n ----------\n fh : int or list of int\n Forecasting horizon with steps ahead to predict.\n\n Returns\n -------\n fh : numpy array of int\n Sorted and validated forecasting horizon.\n \"\"\"\n\n # Check single integer\n if np.issubdtype(type(fh), np.integer):\n return np.array([fh], dtype=np.int)\n\n # Check array-like input\n elif isinstance(fh, list):\n if len(fh) < 1:\n raise ValueError(f\"`fh` must specify at least one step, but found: \"\n f\"{type(fh)} of length {len(fh)}\")\n if not np.all([np.issubdtype(type(h), np.integer) for h in fh]):\n raise ValueError('If `fh` is passed as a list, '\n 'it has to be a list of integers')\n\n elif isinstance(fh, np.ndarray):\n if fh.ndim > 1:\n raise ValueError(f\"`fh` must be a 1d array, but found: \"\n f\"{fh.ndim} dimensions\")\n if len(fh) < 1:\n raise ValueError(f\"`fh` must specify at least one step, but found: \"\n f\"{type(fh)} of length {len(fh)}\")\n if not np.issubdtype(fh.dtype, np.integer):\n raise ValueError(\n f'If `fh` is passed as an array, it has to be an array of '\n f'integers, but found an array of dtype: {fh.dtype}')\n\n else:\n raise ValueError(f\"`fh` has to be either a list or array of integers, or a single \"\n f\"integer, but found: {type(fh)}\")\n\n return np.asarray(np.sort(fh), dtype=np.int)\n\n\ndef check_is_fitted_in_transform(estimator, attributes, msg=None, all_or_any=all):\n \"\"\"Checks if the estimator is fitted during transform by verifying the presence of\n \"all_or_any\" of the passed attributes and raises a NotFittedError with the\n given message.\n \n Parameters\n ----------\n estimator : estimator instance.\n estimator instance for which the check is performed.\n attributes : attribute name(s) given as string or a list/tuple of strings\n Eg.:\n ``[\"coef_\", \"estimator_\", ...], \"coef_\"``\n msg : string\n The default error message is, \"This %(name)s instance is not fitted\n yet. Call 'fit' with appropriate arguments before using this method.\"\n For custom messages if \"%(name)s\" is present in the message string,\n it is substituted for the estimator name.\n Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\".\n all_or_any : callable, {all, any}, default all\n Specify whether all or any of the given attributes must exist.\n Returns\n -------\n None\n \n Raises\n ------\n NotFittedError\n If the attributes are not found. \n \"\"\"\n if msg is None:\n msg = (\"This %(name)s instance has not been fitted yet. Call 'transform' with \"\n \"appropriate arguments before using this method.\")\n\n check_is_fitted(estimator, attributes=attributes, msg=msg, all_or_any=all_or_any)\n\n\ndef validate_time_index(time_index):\n \"\"\"Validate time index\n\n Parameters\n ----------\n time_index : array-like\n\n Returns\n -------\n time_index : ndarray\n \"\"\"\n # period or datetime index are not support yet\n # TODO add support for period/datetime indexing\n if isinstance(time_index, (pd.PeriodIndex, pd.DatetimeIndex)):\n raise NotImplementedError(f\"{type(time_index)} is not fully supported yet, \"\n f\"use pandas RangeIndex instead\")\n\n return np.asarray(time_index)\n\n\ndef check_consistent_time_indices(x, y):\n \"\"\"Check that x and y have consistent indices.\n\n Parameters\n ----------\n x : pandas Series\n y : pandas Series\n\n Raises:\n -------\n ValueError\n If time indicies are not equal\n \"\"\"\n\n if not x.index.equals(y.index):\n raise ValueError(f\"Found input variables with inconsistent indices\")\n", "path": "sktime/utils/validation/forecasting.py"}], "after_files": [{"content": "import numpy as np\nimport pandas as pd\nfrom sklearn.utils.validation import check_is_fitted\n\n__author__ = \"Markus L\u00f6ning\"\n__all__ = [\"validate_y\", \"validate_X\", \"validate_y_X\", \"validate_fh\"]\n\n\ndef validate_y_X(y, X):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n y : pandas Series or numpy ndarray\n X : pandas DataFrame\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n validate_y(y)\n validate_X(X)\n\n\ndef validate_y(y):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n y : pandas Series or numpy ndarray\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n # Check if pandas series\n if not isinstance(y, pd.Series):\n raise ValueError(f'y must be a pandas Series, but found: {type(y)}')\n\n # Check if single row\n if not y.shape[0] == 1:\n raise ValueError(f'y must consist of a pandas Series with a single row, '\n f'but found: {y.shape[0]} rows')\n\n # Check if contained time series is either pandas series or numpy array\n s = y.iloc[0]\n if not isinstance(s, (np.ndarray, pd.Series)):\n raise ValueError(f'y must contain a pandas Series or numpy array, '\n f'but found: {type(s)}.')\n\n\ndef validate_X(X):\n \"\"\"Validate input data.\n\n Parameters\n ----------\n X : pandas DataFrame\n\n Returns\n -------\n None\n\n Raises\n ------\n ValueError\n If y is an invalid input\n \"\"\"\n if X is not None:\n if not isinstance(X, pd.DataFrame):\n raise ValueError(f\"`X` must a pandas DataFrame, but found: {type(X)}\")\n if X.shape[0] > 1:\n raise ValueError(f\"`X` must consist of a single row, but found: {X.shape[0]} rows\")\n\n # Check if index is the same for all columns.\n\n # Get index from first row, can be either pd.Series or np.array.\n first_index = X.iloc[0, 0].index if hasattr(X.iloc[0, 0], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0])\n\n # Series must contain at least 2 observations, otherwise should be primitive.\n if len(first_index) < 1:\n raise ValueError(f'Time series must contain at least 2 observations, but found: '\n f'{len(first_index)} observations in column: {X.columns[0]}')\n\n # Compare with remaining columns\n for c, col in enumerate(X.columns):\n index = X.iloc[0, c].index if hasattr(X.iloc[0, c], 'index') else pd.RangeIndex(X.iloc[0, 0].shape[0])\n if not np.array_equal(first_index, index):\n raise ValueError(f'Found time series with unequal index in column {col}. '\n f'Input time-series must have the same index.')\n\n\ndef validate_sp(sp):\n \"\"\"Validate seasonal periodicity.\n\n Parameters\n ----------\n sp : int\n Seasonal periodicity\n\n Returns\n -------\n sp : int\n Validated seasonal periodicity\n \"\"\"\n\n if sp is None:\n return sp\n\n else:\n if not isinstance(sp, int) and (sp >= 0):\n raise ValueError(f\"Seasonal periodicity (sp) has to be a positive integer, but found: \"\n f\"{sp} of type: {type(sp)}\")\n return sp\n\n\ndef validate_fh(fh):\n \"\"\"Validate forecasting horizon.\n\n Parameters\n ----------\n fh : int or list of int\n Forecasting horizon with steps ahead to predict.\n\n Returns\n -------\n fh : numpy array of int\n Sorted and validated forecasting horizon.\n \"\"\"\n\n # Check single integer\n if np.issubdtype(type(fh), np.integer):\n return np.array([fh], dtype=np.int)\n\n # Check array-like input\n elif isinstance(fh, list):\n if len(fh) < 1:\n raise ValueError(f\"`fh` must specify at least one step, but found: \"\n f\"{type(fh)} of length {len(fh)}\")\n if not np.all([np.issubdtype(type(h), np.integer) for h in fh]):\n raise ValueError('If `fh` is passed as a list, '\n 'it has to be a list of integers')\n\n elif isinstance(fh, np.ndarray):\n if fh.ndim > 1:\n raise ValueError(f\"`fh` must be a 1d array, but found: \"\n f\"{fh.ndim} dimensions\")\n if len(fh) < 1:\n raise ValueError(f\"`fh` must specify at least one step, but found: \"\n f\"{type(fh)} of length {len(fh)}\")\n if not np.issubdtype(fh.dtype, np.integer):\n raise ValueError(\n f'If `fh` is passed as an array, it has to be an array of '\n f'integers, but found an array of dtype: {fh.dtype}')\n\n else:\n raise ValueError(f\"`fh` has to be either a list or array of integers, or a single \"\n f\"integer, but found: {type(fh)}\")\n\n return np.asarray(np.sort(fh), dtype=np.int)\n\n\ndef check_is_fitted_in_transform(estimator, attributes, msg=None, all_or_any=all):\n \"\"\"Checks if the estimator is fitted during transform by verifying the presence of\n \"all_or_any\" of the passed attributes and raises a NotFittedError with the\n given message.\n \n Parameters\n ----------\n estimator : estimator instance.\n estimator instance for which the check is performed.\n attributes : attribute name(s) given as string or a list/tuple of strings\n Eg.:\n ``[\"coef_\", \"estimator_\", ...], \"coef_\"``\n msg : string\n The default error message is, \"This %(name)s instance is not fitted\n yet. Call 'fit' with appropriate arguments before using this method.\"\n For custom messages if \"%(name)s\" is present in the message string,\n it is substituted for the estimator name.\n Eg. : \"Estimator, %(name)s, must be fitted before sparsifying\".\n all_or_any : callable, {all, any}, default all\n Specify whether all or any of the given attributes must exist.\n Returns\n -------\n None\n \n Raises\n ------\n NotFittedError\n If the attributes are not found. \n \"\"\"\n if msg is None:\n msg = (\"This %(name)s instance has not been fitted yet. Call 'transform' with \"\n \"appropriate arguments before using this method.\")\n\n check_is_fitted(estimator, attributes=attributes, msg=msg, all_or_any=all_or_any)\n\n\ndef validate_time_index(time_index):\n \"\"\"Validate time index\n\n Parameters\n ----------\n time_index : array-like\n\n Returns\n -------\n time_index : ndarray\n \"\"\"\n # period or datetime index are not support yet\n # TODO add support for period/datetime indexing\n if isinstance(time_index, (pd.PeriodIndex, pd.DatetimeIndex)):\n raise NotImplementedError(f\"{type(time_index)} is not fully supported yet, \"\n f\"use pandas RangeIndex instead\")\n\n return np.asarray(time_index)\n\n\ndef check_consistent_time_indices(x, y):\n \"\"\"Check that x and y have consistent indices.\n\n Parameters\n ----------\n x : pandas Series\n y : pandas Series\n\n Raises:\n -------\n ValueError\n If time indicies are not equal\n \"\"\"\n\n if not x.index.equals(y.index):\n raise ValueError(f\"Found input variables with inconsistent indices\")\n", "path": "sktime/utils/validation/forecasting.py"}]}
2,737
96
gh_patches_debug_24917
rasdani/github-patches
git_diff
pytorch__examples-699
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dcgan fails on "fake" dataset I have been using the dcgan example as a stress test for a machine. To save time, I have made use of the `fake` dataset. `python main.py --dataset 'fake'` fails because `dataroot` is a required parameter. However, the `fake` dataset, does not need such information. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dcgan/main.py` Content: ``` 1 from __future__ import print_function 2 import argparse 3 import os 4 import random 5 import torch 6 import torch.nn as nn 7 import torch.nn.parallel 8 import torch.backends.cudnn as cudnn 9 import torch.optim as optim 10 import torch.utils.data 11 import torchvision.datasets as dset 12 import torchvision.transforms as transforms 13 import torchvision.utils as vutils 14 15 16 parser = argparse.ArgumentParser() 17 parser.add_argument('--dataset', required=True, help='cifar10 | lsun | mnist |imagenet | folder | lfw | fake') 18 parser.add_argument('--dataroot', required=True, help='path to dataset') 19 parser.add_argument('--workers', type=int, help='number of data loading workers', default=2) 20 parser.add_argument('--batchSize', type=int, default=64, help='input batch size') 21 parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network') 22 parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector') 23 parser.add_argument('--ngf', type=int, default=64) 24 parser.add_argument('--ndf', type=int, default=64) 25 parser.add_argument('--niter', type=int, default=25, help='number of epochs to train for') 26 parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002') 27 parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5') 28 parser.add_argument('--cuda', action='store_true', help='enables cuda') 29 parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use') 30 parser.add_argument('--netG', default='', help="path to netG (to continue training)") 31 parser.add_argument('--netD', default='', help="path to netD (to continue training)") 32 parser.add_argument('--outf', default='.', help='folder to output images and model checkpoints') 33 parser.add_argument('--manualSeed', type=int, help='manual seed') 34 parser.add_argument('--classes', default='bedroom', help='comma separated list of classes for the lsun data set') 35 36 opt = parser.parse_args() 37 print(opt) 38 39 try: 40 os.makedirs(opt.outf) 41 except OSError: 42 pass 43 44 if opt.manualSeed is None: 45 opt.manualSeed = random.randint(1, 10000) 46 print("Random Seed: ", opt.manualSeed) 47 random.seed(opt.manualSeed) 48 torch.manual_seed(opt.manualSeed) 49 50 cudnn.benchmark = True 51 52 if torch.cuda.is_available() and not opt.cuda: 53 print("WARNING: You have a CUDA device, so you should probably run with --cuda") 54 55 if opt.dataset in ['imagenet', 'folder', 'lfw']: 56 # folder dataset 57 dataset = dset.ImageFolder(root=opt.dataroot, 58 transform=transforms.Compose([ 59 transforms.Resize(opt.imageSize), 60 transforms.CenterCrop(opt.imageSize), 61 transforms.ToTensor(), 62 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 63 ])) 64 nc=3 65 elif opt.dataset == 'lsun': 66 classes = [ c + '_train' for c in opt.classes.split(',')] 67 dataset = dset.LSUN(root=opt.dataroot, classes=classes, 68 transform=transforms.Compose([ 69 transforms.Resize(opt.imageSize), 70 transforms.CenterCrop(opt.imageSize), 71 transforms.ToTensor(), 72 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 73 ])) 74 nc=3 75 elif opt.dataset == 'cifar10': 76 dataset = dset.CIFAR10(root=opt.dataroot, download=True, 77 transform=transforms.Compose([ 78 transforms.Resize(opt.imageSize), 79 transforms.ToTensor(), 80 transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), 81 ])) 82 nc=3 83 84 elif opt.dataset == 'mnist': 85 dataset = dset.MNIST(root=opt.dataroot, download=True, 86 transform=transforms.Compose([ 87 transforms.Resize(opt.imageSize), 88 transforms.ToTensor(), 89 transforms.Normalize((0.5,), (0.5,)), 90 ])) 91 nc=1 92 93 elif opt.dataset == 'fake': 94 dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize), 95 transform=transforms.ToTensor()) 96 nc=3 97 98 assert dataset 99 dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, 100 shuffle=True, num_workers=int(opt.workers)) 101 102 device = torch.device("cuda:0" if opt.cuda else "cpu") 103 ngpu = int(opt.ngpu) 104 nz = int(opt.nz) 105 ngf = int(opt.ngf) 106 ndf = int(opt.ndf) 107 108 109 # custom weights initialization called on netG and netD 110 def weights_init(m): 111 classname = m.__class__.__name__ 112 if classname.find('Conv') != -1: 113 m.weight.data.normal_(0.0, 0.02) 114 elif classname.find('BatchNorm') != -1: 115 m.weight.data.normal_(1.0, 0.02) 116 m.bias.data.fill_(0) 117 118 119 class Generator(nn.Module): 120 def __init__(self, ngpu): 121 super(Generator, self).__init__() 122 self.ngpu = ngpu 123 self.main = nn.Sequential( 124 # input is Z, going into a convolution 125 nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False), 126 nn.BatchNorm2d(ngf * 8), 127 nn.ReLU(True), 128 # state size. (ngf*8) x 4 x 4 129 nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False), 130 nn.BatchNorm2d(ngf * 4), 131 nn.ReLU(True), 132 # state size. (ngf*4) x 8 x 8 133 nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), 134 nn.BatchNorm2d(ngf * 2), 135 nn.ReLU(True), 136 # state size. (ngf*2) x 16 x 16 137 nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), 138 nn.BatchNorm2d(ngf), 139 nn.ReLU(True), 140 # state size. (ngf) x 32 x 32 141 nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False), 142 nn.Tanh() 143 # state size. (nc) x 64 x 64 144 ) 145 146 def forward(self, input): 147 if input.is_cuda and self.ngpu > 1: 148 output = nn.parallel.data_parallel(self.main, input, range(self.ngpu)) 149 else: 150 output = self.main(input) 151 return output 152 153 154 netG = Generator(ngpu).to(device) 155 netG.apply(weights_init) 156 if opt.netG != '': 157 netG.load_state_dict(torch.load(opt.netG)) 158 print(netG) 159 160 161 class Discriminator(nn.Module): 162 def __init__(self, ngpu): 163 super(Discriminator, self).__init__() 164 self.ngpu = ngpu 165 self.main = nn.Sequential( 166 # input is (nc) x 64 x 64 167 nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), 168 nn.LeakyReLU(0.2, inplace=True), 169 # state size. (ndf) x 32 x 32 170 nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), 171 nn.BatchNorm2d(ndf * 2), 172 nn.LeakyReLU(0.2, inplace=True), 173 # state size. (ndf*2) x 16 x 16 174 nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), 175 nn.BatchNorm2d(ndf * 4), 176 nn.LeakyReLU(0.2, inplace=True), 177 # state size. (ndf*4) x 8 x 8 178 nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False), 179 nn.BatchNorm2d(ndf * 8), 180 nn.LeakyReLU(0.2, inplace=True), 181 # state size. (ndf*8) x 4 x 4 182 nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False), 183 nn.Sigmoid() 184 ) 185 186 def forward(self, input): 187 if input.is_cuda and self.ngpu > 1: 188 output = nn.parallel.data_parallel(self.main, input, range(self.ngpu)) 189 else: 190 output = self.main(input) 191 192 return output.view(-1, 1).squeeze(1) 193 194 195 netD = Discriminator(ngpu).to(device) 196 netD.apply(weights_init) 197 if opt.netD != '': 198 netD.load_state_dict(torch.load(opt.netD)) 199 print(netD) 200 201 criterion = nn.BCELoss() 202 203 fixed_noise = torch.randn(opt.batchSize, nz, 1, 1, device=device) 204 real_label = 1 205 fake_label = 0 206 207 # setup optimizer 208 optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) 209 optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999)) 210 211 for epoch in range(opt.niter): 212 for i, data in enumerate(dataloader, 0): 213 ############################ 214 # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z))) 215 ########################### 216 # train with real 217 netD.zero_grad() 218 real_cpu = data[0].to(device) 219 batch_size = real_cpu.size(0) 220 label = torch.full((batch_size,), real_label, device=device) 221 222 output = netD(real_cpu) 223 errD_real = criterion(output, label) 224 errD_real.backward() 225 D_x = output.mean().item() 226 227 # train with fake 228 noise = torch.randn(batch_size, nz, 1, 1, device=device) 229 fake = netG(noise) 230 label.fill_(fake_label) 231 output = netD(fake.detach()) 232 errD_fake = criterion(output, label) 233 errD_fake.backward() 234 D_G_z1 = output.mean().item() 235 errD = errD_real + errD_fake 236 optimizerD.step() 237 238 ############################ 239 # (2) Update G network: maximize log(D(G(z))) 240 ########################### 241 netG.zero_grad() 242 label.fill_(real_label) # fake labels are real for generator cost 243 output = netD(fake) 244 errG = criterion(output, label) 245 errG.backward() 246 D_G_z2 = output.mean().item() 247 optimizerG.step() 248 249 print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f' 250 % (epoch, opt.niter, i, len(dataloader), 251 errD.item(), errG.item(), D_x, D_G_z1, D_G_z2)) 252 if i % 100 == 0: 253 vutils.save_image(real_cpu, 254 '%s/real_samples.png' % opt.outf, 255 normalize=True) 256 fake = netG(fixed_noise) 257 vutils.save_image(fake.detach(), 258 '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch), 259 normalize=True) 260 261 # do checkpointing 262 torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch)) 263 torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch)) 264 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dcgan/main.py b/dcgan/main.py --- a/dcgan/main.py +++ b/dcgan/main.py @@ -15,7 +15,7 @@ parser = argparse.ArgumentParser() parser.add_argument('--dataset', required=True, help='cifar10 | lsun | mnist |imagenet | folder | lfw | fake') -parser.add_argument('--dataroot', required=True, help='path to dataset') +parser.add_argument('--dataroot', required=False, help='path to dataset') parser.add_argument('--workers', type=int, help='number of data loading workers', default=2) parser.add_argument('--batchSize', type=int, default=64, help='input batch size') parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network') @@ -51,6 +51,9 @@ if torch.cuda.is_available() and not opt.cuda: print("WARNING: You have a CUDA device, so you should probably run with --cuda") + +if opt.dataroot is None and str(opt.dataset).lower() != 'fake': + raise ValueError("`dataroot` parameter is required for dataset \"%s\"" % opt.dataset) if opt.dataset in ['imagenet', 'folder', 'lfw']: # folder dataset
{"golden_diff": "diff --git a/dcgan/main.py b/dcgan/main.py\n--- a/dcgan/main.py\n+++ b/dcgan/main.py\n@@ -15,7 +15,7 @@\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', required=True, help='cifar10 | lsun | mnist |imagenet | folder | lfw | fake')\n-parser.add_argument('--dataroot', required=True, help='path to dataset')\n+parser.add_argument('--dataroot', required=False, help='path to dataset')\n parser.add_argument('--workers', type=int, help='number of data loading workers', default=2)\n parser.add_argument('--batchSize', type=int, default=64, help='input batch size')\n parser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')\n@@ -51,6 +51,9 @@\n \n if torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n+ \n+if opt.dataroot is None and str(opt.dataset).lower() != 'fake':\n+ raise ValueError(\"`dataroot` parameter is required for dataset \\\"%s\\\"\" % opt.dataset)\n \n if opt.dataset in ['imagenet', 'folder', 'lfw']:\n # folder dataset\n", "issue": "dcgan fails on \"fake\" dataset\nI have been using the dcgan example as a stress test for a machine. To save time, I have made use of the `fake` dataset.\r\n\r\n`python main.py --dataset 'fake'` fails because `dataroot` is a required parameter. However, the `fake` dataset, does not need such information.\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', required=True, help='cifar10 | lsun | mnist |imagenet | folder | lfw | fake')\nparser.add_argument('--dataroot', required=True, help='path to dataset')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=2)\nparser.add_argument('--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')\nparser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--ndf', type=int, default=64)\nparser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--classes', default='bedroom', help='comma separated list of classes for the lsun data set')\n\nopt = parser.parse_args()\nprint(opt)\n\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\nif opt.dataset in ['imagenet', 'folder', 'lfw']:\n # folder dataset\n dataset = dset.ImageFolder(root=opt.dataroot,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.CenterCrop(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\n nc=3\nelif opt.dataset == 'lsun':\n classes = [ c + '_train' for c in opt.classes.split(',')]\n dataset = dset.LSUN(root=opt.dataroot, classes=classes,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.CenterCrop(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\n nc=3\nelif opt.dataset == 'cifar10':\n dataset = dset.CIFAR10(root=opt.dataroot, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\n nc=3\n\nelif opt.dataset == 'mnist':\n dataset = dset.MNIST(root=opt.dataroot, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ]))\n nc=1\n\nelif opt.dataset == 'fake':\n dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),\n transform=transforms.ToTensor())\n nc=3\n\nassert dataset\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\n\ndevice = torch.device(\"cuda:0\" if opt.cuda else \"cpu\")\nngpu = int(opt.ngpu)\nnz = int(opt.nz)\nngf = int(opt.ngf)\nndf = int(opt.ndf)\n\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\nclass Generator(nn.Module):\n def __init__(self, ngpu):\n super(Generator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n if input.is_cuda and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n return output\n\n\nnetG = Generator(ngpu).to(device)\nnetG.apply(weights_init)\nif opt.netG != '':\n netG.load_state_dict(torch.load(opt.netG))\nprint(netG)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, ngpu):\n super(Discriminator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n if input.is_cuda and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n\n return output.view(-1, 1).squeeze(1)\n\n\nnetD = Discriminator(ngpu).to(device)\nnetD.apply(weights_init)\nif opt.netD != '':\n netD.load_state_dict(torch.load(opt.netD))\nprint(netD)\n\ncriterion = nn.BCELoss()\n\nfixed_noise = torch.randn(opt.batchSize, nz, 1, 1, device=device)\nreal_label = 1\nfake_label = 0\n\n# setup optimizer\noptimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\nfor epoch in range(opt.niter):\n for i, data in enumerate(dataloader, 0):\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n # train with real\n netD.zero_grad()\n real_cpu = data[0].to(device)\n batch_size = real_cpu.size(0)\n label = torch.full((batch_size,), real_label, device=device)\n\n output = netD(real_cpu)\n errD_real = criterion(output, label)\n errD_real.backward()\n D_x = output.mean().item()\n\n # train with fake\n noise = torch.randn(batch_size, nz, 1, 1, device=device)\n fake = netG(noise)\n label.fill_(fake_label)\n output = netD(fake.detach())\n errD_fake = criterion(output, label)\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n errD = errD_real + errD_fake\n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.fill_(real_label) # fake labels are real for generator cost\n output = netD(fake)\n errG = criterion(output, label)\n errG.backward()\n D_G_z2 = output.mean().item()\n optimizerG.step()\n\n print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'\n % (epoch, opt.niter, i, len(dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n if i % 100 == 0:\n vutils.save_image(real_cpu,\n '%s/real_samples.png' % opt.outf,\n normalize=True)\n fake = netG(fixed_noise)\n vutils.save_image(fake.detach(),\n '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch),\n normalize=True)\n\n # do checkpointing\n torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))\n torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))\n", "path": "dcgan/main.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nimport torch.utils.data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--dataset', required=True, help='cifar10 | lsun | mnist |imagenet | folder | lfw | fake')\nparser.add_argument('--dataroot', required=False, help='path to dataset')\nparser.add_argument('--workers', type=int, help='number of data loading workers', default=2)\nparser.add_argument('--batchSize', type=int, default=64, help='input batch size')\nparser.add_argument('--imageSize', type=int, default=64, help='the height / width of the input image to network')\nparser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')\nparser.add_argument('--ngf', type=int, default=64)\nparser.add_argument('--ndf', type=int, default=64)\nparser.add_argument('--niter', type=int, default=25, help='number of epochs to train for')\nparser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')\nparser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')\nparser.add_argument('--cuda', action='store_true', help='enables cuda')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')\nparser.add_argument('--netG', default='', help=\"path to netG (to continue training)\")\nparser.add_argument('--netD', default='', help=\"path to netD (to continue training)\")\nparser.add_argument('--outf', default='.', help='folder to output images and model checkpoints')\nparser.add_argument('--manualSeed', type=int, help='manual seed')\nparser.add_argument('--classes', default='bedroom', help='comma separated list of classes for the lsun data set')\n\nopt = parser.parse_args()\nprint(opt)\n\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nif opt.manualSeed is None:\n opt.manualSeed = random.randint(1, 10000)\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\ncudnn.benchmark = True\n\nif torch.cuda.is_available() and not opt.cuda:\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n \nif opt.dataroot is None and str(opt.dataset).lower() != 'fake':\n raise ValueError(\"`dataroot` parameter is required for dataset \\\"%s\\\"\" % opt.dataset)\n\nif opt.dataset in ['imagenet', 'folder', 'lfw']:\n # folder dataset\n dataset = dset.ImageFolder(root=opt.dataroot,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.CenterCrop(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\n nc=3\nelif opt.dataset == 'lsun':\n classes = [ c + '_train' for c in opt.classes.split(',')]\n dataset = dset.LSUN(root=opt.dataroot, classes=classes,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.CenterCrop(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\n nc=3\nelif opt.dataset == 'cifar10':\n dataset = dset.CIFAR10(root=opt.dataroot, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n ]))\n nc=3\n\nelif opt.dataset == 'mnist':\n dataset = dset.MNIST(root=opt.dataroot, download=True,\n transform=transforms.Compose([\n transforms.Resize(opt.imageSize),\n transforms.ToTensor(),\n transforms.Normalize((0.5,), (0.5,)),\n ]))\n nc=1\n\nelif opt.dataset == 'fake':\n dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),\n transform=transforms.ToTensor())\n nc=3\n\nassert dataset\ndataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,\n shuffle=True, num_workers=int(opt.workers))\n\ndevice = torch.device(\"cuda:0\" if opt.cuda else \"cpu\")\nngpu = int(opt.ngpu)\nnz = int(opt.nz)\nngf = int(opt.ngf)\nndf = int(opt.ndf)\n\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n m.weight.data.normal_(0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n m.weight.data.normal_(1.0, 0.02)\n m.bias.data.fill_(0)\n\n\nclass Generator(nn.Module):\n def __init__(self, ngpu):\n super(Generator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is Z, going into a convolution\n nn.ConvTranspose2d( nz, ngf * 8, 4, 1, 0, bias=False),\n nn.BatchNorm2d(ngf * 8),\n nn.ReLU(True),\n # state size. (ngf*8) x 4 x 4\n nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 4),\n nn.ReLU(True),\n # state size. (ngf*4) x 8 x 8\n nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf * 2),\n nn.ReLU(True),\n # state size. (ngf*2) x 16 x 16\n nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ngf),\n nn.ReLU(True),\n # state size. (ngf) x 32 x 32\n nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),\n nn.Tanh()\n # state size. (nc) x 64 x 64\n )\n\n def forward(self, input):\n if input.is_cuda and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n return output\n\n\nnetG = Generator(ngpu).to(device)\nnetG.apply(weights_init)\nif opt.netG != '':\n netG.load_state_dict(torch.load(opt.netG))\nprint(netG)\n\n\nclass Discriminator(nn.Module):\n def __init__(self, ngpu):\n super(Discriminator, self).__init__()\n self.ngpu = ngpu\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf) x 32 x 32\n nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 2),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*2) x 16 x 16\n nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 4),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*4) x 8 x 8\n nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),\n nn.BatchNorm2d(ndf * 8),\n nn.LeakyReLU(0.2, inplace=True),\n # state size. (ndf*8) x 4 x 4\n nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),\n nn.Sigmoid()\n )\n\n def forward(self, input):\n if input.is_cuda and self.ngpu > 1:\n output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))\n else:\n output = self.main(input)\n\n return output.view(-1, 1).squeeze(1)\n\n\nnetD = Discriminator(ngpu).to(device)\nnetD.apply(weights_init)\nif opt.netD != '':\n netD.load_state_dict(torch.load(opt.netD))\nprint(netD)\n\ncriterion = nn.BCELoss()\n\nfixed_noise = torch.randn(opt.batchSize, nz, 1, 1, device=device)\nreal_label = 1\nfake_label = 0\n\n# setup optimizer\noptimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\noptimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))\n\nfor epoch in range(opt.niter):\n for i, data in enumerate(dataloader, 0):\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n # train with real\n netD.zero_grad()\n real_cpu = data[0].to(device)\n batch_size = real_cpu.size(0)\n label = torch.full((batch_size,), real_label, device=device)\n\n output = netD(real_cpu)\n errD_real = criterion(output, label)\n errD_real.backward()\n D_x = output.mean().item()\n\n # train with fake\n noise = torch.randn(batch_size, nz, 1, 1, device=device)\n fake = netG(noise)\n label.fill_(fake_label)\n output = netD(fake.detach())\n errD_fake = criterion(output, label)\n errD_fake.backward()\n D_G_z1 = output.mean().item()\n errD = errD_real + errD_fake\n optimizerD.step()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n netG.zero_grad()\n label.fill_(real_label) # fake labels are real for generator cost\n output = netD(fake)\n errG = criterion(output, label)\n errG.backward()\n D_G_z2 = output.mean().item()\n optimizerG.step()\n\n print('[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'\n % (epoch, opt.niter, i, len(dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n if i % 100 == 0:\n vutils.save_image(real_cpu,\n '%s/real_samples.png' % opt.outf,\n normalize=True)\n fake = netG(fixed_noise)\n vutils.save_image(fake.detach(),\n '%s/fake_samples_epoch_%03d.png' % (opt.outf, epoch),\n normalize=True)\n\n # do checkpointing\n torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))\n torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))\n", "path": "dcgan/main.py"}]}
3,731
292
gh_patches_debug_4769
rasdani/github-patches
git_diff
spotify__luigi-1447
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Scheduler only hosts on unix socket when run in the background Support for hosting the central scheduler on a unix socket was added, which is nice, but the scheduler ignores the `--unix-socket` argument from the command line when `--background` is not also supplied. This will work properly, and the scheduler will listen on the provided unix socket: ``` luigid --unix-socket /path/to/socket --background ``` With this command, the scheduler will still listen on the default port (8082): ``` luigid --unix-socket /path/to/socket ``` Fixing this would be a simple matter of passing the `unix_socket` argument onto the call to `server.run` in the case where the server is not daemonized, but was there a reason this functionality was left out in the first place? If so, it probably ought to be in the documentation; as is, reading it gives me the impression that I should be able to tell the scheduler to listen on a unix socket regardless of whether it's running in the background. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `luigi/cmdline.py` Content: ``` 1 import os 2 import argparse 3 import logging 4 import sys 5 6 from luigi.retcodes import run_with_retcodes 7 8 9 def luigi_run(argv=sys.argv[1:]): 10 run_with_retcodes(argv) 11 12 13 def luigid(argv=sys.argv[1:]): 14 import luigi.server 15 import luigi.process 16 import luigi.configuration 17 parser = argparse.ArgumentParser(description=u'Central luigi server') 18 parser.add_argument(u'--background', help=u'Run in background mode', action='store_true') 19 parser.add_argument(u'--pidfile', help=u'Write pidfile') 20 parser.add_argument(u'--logdir', help=u'log directory') 21 parser.add_argument(u'--state-path', help=u'Pickled state file') 22 parser.add_argument(u'--address', help=u'Listening interface') 23 parser.add_argument(u'--unix-socket', help=u'Unix socket path') 24 parser.add_argument(u'--port', default=8082, help=u'Listening port') 25 26 opts = parser.parse_args(argv) 27 28 if opts.state_path: 29 config = luigi.configuration.get_config() 30 config.set('scheduler', 'state_path', opts.state_path) 31 32 if opts.background: 33 # daemonize sets up logging to spooled log files 34 logging.getLogger().setLevel(logging.INFO) 35 luigi.process.daemonize(luigi.server.run, api_port=opts.port, 36 address=opts.address, pidfile=opts.pidfile, 37 logdir=opts.logdir, unix_socket=opts.unix_socket) 38 else: 39 if opts.logdir: 40 logging.basicConfig(level=logging.INFO, format=luigi.process.get_log_format(), 41 filename=os.path.join(opts.logdir, "luigi-server.log")) 42 else: 43 logging.basicConfig(level=logging.INFO, format=luigi.process.get_log_format()) 44 luigi.server.run(api_port=opts.port, address=opts.address) 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/luigi/cmdline.py b/luigi/cmdline.py --- a/luigi/cmdline.py +++ b/luigi/cmdline.py @@ -41,4 +41,4 @@ filename=os.path.join(opts.logdir, "luigi-server.log")) else: logging.basicConfig(level=logging.INFO, format=luigi.process.get_log_format()) - luigi.server.run(api_port=opts.port, address=opts.address) + luigi.server.run(api_port=opts.port, address=opts.address, unix_socket=opts.unix_socket)
{"golden_diff": "diff --git a/luigi/cmdline.py b/luigi/cmdline.py\n--- a/luigi/cmdline.py\n+++ b/luigi/cmdline.py\n@@ -41,4 +41,4 @@\n filename=os.path.join(opts.logdir, \"luigi-server.log\"))\n else:\n logging.basicConfig(level=logging.INFO, format=luigi.process.get_log_format())\n- luigi.server.run(api_port=opts.port, address=opts.address)\n+ luigi.server.run(api_port=opts.port, address=opts.address, unix_socket=opts.unix_socket)\n", "issue": "Scheduler only hosts on unix socket when run in the background\nSupport for hosting the central scheduler on a unix socket was added, which is nice, but the scheduler ignores the `--unix-socket` argument from the command line when `--background` is not also supplied. \n\nThis will work properly, and the scheduler will listen on the provided unix socket:\n\n```\nluigid --unix-socket /path/to/socket --background\n```\n\nWith this command, the scheduler will still listen on the default port (8082):\n\n```\nluigid --unix-socket /path/to/socket\n```\n\nFixing this would be a simple matter of passing the `unix_socket` argument onto the call to `server.run` in the case where the server is not daemonized, but was there a reason this functionality was left out in the first place? If so, it probably ought to be in the documentation; as is, reading it gives me the impression that I should be able to tell the scheduler to listen on a unix socket regardless of whether it's running in the background.\n\n", "before_files": [{"content": "import os\nimport argparse\nimport logging\nimport sys\n\nfrom luigi.retcodes import run_with_retcodes\n\n\ndef luigi_run(argv=sys.argv[1:]):\n run_with_retcodes(argv)\n\n\ndef luigid(argv=sys.argv[1:]):\n import luigi.server\n import luigi.process\n import luigi.configuration\n parser = argparse.ArgumentParser(description=u'Central luigi server')\n parser.add_argument(u'--background', help=u'Run in background mode', action='store_true')\n parser.add_argument(u'--pidfile', help=u'Write pidfile')\n parser.add_argument(u'--logdir', help=u'log directory')\n parser.add_argument(u'--state-path', help=u'Pickled state file')\n parser.add_argument(u'--address', help=u'Listening interface')\n parser.add_argument(u'--unix-socket', help=u'Unix socket path')\n parser.add_argument(u'--port', default=8082, help=u'Listening port')\n\n opts = parser.parse_args(argv)\n\n if opts.state_path:\n config = luigi.configuration.get_config()\n config.set('scheduler', 'state_path', opts.state_path)\n\n if opts.background:\n # daemonize sets up logging to spooled log files\n logging.getLogger().setLevel(logging.INFO)\n luigi.process.daemonize(luigi.server.run, api_port=opts.port,\n address=opts.address, pidfile=opts.pidfile,\n logdir=opts.logdir, unix_socket=opts.unix_socket)\n else:\n if opts.logdir:\n logging.basicConfig(level=logging.INFO, format=luigi.process.get_log_format(),\n filename=os.path.join(opts.logdir, \"luigi-server.log\"))\n else:\n logging.basicConfig(level=logging.INFO, format=luigi.process.get_log_format())\n luigi.server.run(api_port=opts.port, address=opts.address)\n", "path": "luigi/cmdline.py"}], "after_files": [{"content": "import os\nimport argparse\nimport logging\nimport sys\n\nfrom luigi.retcodes import run_with_retcodes\n\n\ndef luigi_run(argv=sys.argv[1:]):\n run_with_retcodes(argv)\n\n\ndef luigid(argv=sys.argv[1:]):\n import luigi.server\n import luigi.process\n import luigi.configuration\n parser = argparse.ArgumentParser(description=u'Central luigi server')\n parser.add_argument(u'--background', help=u'Run in background mode', action='store_true')\n parser.add_argument(u'--pidfile', help=u'Write pidfile')\n parser.add_argument(u'--logdir', help=u'log directory')\n parser.add_argument(u'--state-path', help=u'Pickled state file')\n parser.add_argument(u'--address', help=u'Listening interface')\n parser.add_argument(u'--unix-socket', help=u'Unix socket path')\n parser.add_argument(u'--port', default=8082, help=u'Listening port')\n\n opts = parser.parse_args(argv)\n\n if opts.state_path:\n config = luigi.configuration.get_config()\n config.set('scheduler', 'state_path', opts.state_path)\n\n if opts.background:\n # daemonize sets up logging to spooled log files\n logging.getLogger().setLevel(logging.INFO)\n luigi.process.daemonize(luigi.server.run, api_port=opts.port,\n address=opts.address, pidfile=opts.pidfile,\n logdir=opts.logdir, unix_socket=opts.unix_socket)\n else:\n if opts.logdir:\n logging.basicConfig(level=logging.INFO, format=luigi.process.get_log_format(),\n filename=os.path.join(opts.logdir, \"luigi-server.log\"))\n else:\n logging.basicConfig(level=logging.INFO, format=luigi.process.get_log_format())\n luigi.server.run(api_port=opts.port, address=opts.address, unix_socket=opts.unix_socket)\n", "path": "luigi/cmdline.py"}]}
968
124
gh_patches_debug_20043
rasdani/github-patches
git_diff
archlinux__archinstall-66
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Issue installing package groups (kde-applications for instance) As mentioned in #61, support for package groups doesn't work. The idea here is that it should be supported, we simply never verified that the [archinstall.find_package()](https://github.com/Torxed/archinstall/blob/master/archinstall/lib/packages.py#L7-L17) function can verify those, and apparently it can't. So we have to use another API endpoint or multiple to support this. *The backplane supports it already, as the packages are sent as a unfiltered string to `pacman -S` more or less.* --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `archinstall/lib/packages.py` Content: ``` 1 import urllib.request, urllib.parse 2 import ssl, json 3 from .exceptions import * 4 5 BASE_URL = 'https://www.archlinux.org/packages/search/json/?name={package}' 6 7 def find_package(name): 8 """ 9 Finds a specific package via the package database. 10 It makes a simple web-request, which might be a bit slow. 11 """ 12 ssl_context = ssl.create_default_context() 13 ssl_context.check_hostname = False 14 ssl_context.verify_mode = ssl.CERT_NONE 15 response = urllib.request.urlopen(BASE_URL.format(package=name), context=ssl_context) 16 data = response.read().decode('UTF-8') 17 return json.loads(data) 18 19 def find_packages(*names): 20 """ 21 This function returns the search results for many packages. 22 The function itself is rather slow, so consider not sending to 23 many packages to the search query. 24 """ 25 result = {} 26 for package in names: 27 result[package] = find_package(package) 28 return result 29 30 def validate_package_list(packages :list): 31 """ 32 Validates a list of given packages. 33 Raises `RequirementError` if one or more packages are not found. 34 """ 35 invalid_packages = [] 36 for package in packages: 37 if not find_package(package)['results']: 38 invalid_packages.append(package) 39 40 if invalid_packages: 41 raise RequirementError(f"Invalid package names: {invalid_packages}") 42 43 return True ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/archinstall/lib/packages.py b/archinstall/lib/packages.py --- a/archinstall/lib/packages.py +++ b/archinstall/lib/packages.py @@ -3,6 +3,23 @@ from .exceptions import * BASE_URL = 'https://www.archlinux.org/packages/search/json/?name={package}' +BASE_GROUP_URL = 'https://www.archlinux.org/groups/x86_64/{group}/' + +def find_group(name): + ssl_context = ssl.create_default_context() + ssl_context.check_hostname = False + ssl_context.verify_mode = ssl.CERT_NONE + try: + response = urllib.request.urlopen(BASE_GROUP_URL.format(group=name), context=ssl_context) + except urllib.error.HTTPError as err: + if err.code == 404: + return False + else: + raise err + + # Just to be sure some code didn't slip through the exception + if response.code == 200: + return True def find_package(name): """ @@ -34,7 +51,7 @@ """ invalid_packages = [] for package in packages: - if not find_package(package)['results']: + if not find_package(package)['results'] and not find_group(package): invalid_packages.append(package) if invalid_packages:
{"golden_diff": "diff --git a/archinstall/lib/packages.py b/archinstall/lib/packages.py\n--- a/archinstall/lib/packages.py\n+++ b/archinstall/lib/packages.py\n@@ -3,6 +3,23 @@\n from .exceptions import *\n \n BASE_URL = 'https://www.archlinux.org/packages/search/json/?name={package}'\n+BASE_GROUP_URL = 'https://www.archlinux.org/groups/x86_64/{group}/'\n+\n+def find_group(name):\n+\tssl_context = ssl.create_default_context()\n+\tssl_context.check_hostname = False\n+\tssl_context.verify_mode = ssl.CERT_NONE\n+\ttry:\n+\t\tresponse = urllib.request.urlopen(BASE_GROUP_URL.format(group=name), context=ssl_context)\n+\texcept urllib.error.HTTPError as err:\n+\t\tif err.code == 404:\n+\t\t\treturn False\n+\t\telse:\n+\t\t\traise err\n+\t\n+\t# Just to be sure some code didn't slip through the exception\n+\tif response.code == 200:\n+\t\treturn True\n \n def find_package(name):\n \t\"\"\"\n@@ -34,7 +51,7 @@\n \t\"\"\"\n \tinvalid_packages = []\n \tfor package in packages:\n-\t\tif not find_package(package)['results']:\n+\t\tif not find_package(package)['results'] and not find_group(package):\n \t\t\tinvalid_packages.append(package)\n \t\n \tif invalid_packages:\n", "issue": "Issue installing package groups (kde-applications for instance)\nAs mentioned in #61, support for package groups doesn't work.\r\nThe idea here is that it should be supported, we simply never verified that the [archinstall.find_package()](https://github.com/Torxed/archinstall/blob/master/archinstall/lib/packages.py#L7-L17) function can verify those, and apparently it can't. So we have to use another API endpoint or multiple to support this.\r\n\r\n*The backplane supports it already, as the packages are sent as a unfiltered string to `pacman -S` more or less.*\n", "before_files": [{"content": "import urllib.request, urllib.parse\nimport ssl, json\nfrom .exceptions import *\n\nBASE_URL = 'https://www.archlinux.org/packages/search/json/?name={package}'\n\ndef find_package(name):\n\t\"\"\"\n\tFinds a specific package via the package database.\n\tIt makes a simple web-request, which might be a bit slow.\n\t\"\"\"\n\tssl_context = ssl.create_default_context()\n\tssl_context.check_hostname = False\n\tssl_context.verify_mode = ssl.CERT_NONE\n\tresponse = urllib.request.urlopen(BASE_URL.format(package=name), context=ssl_context)\n\tdata = response.read().decode('UTF-8')\n\treturn json.loads(data)\n\ndef find_packages(*names):\n\t\"\"\"\n\tThis function returns the search results for many packages.\n\tThe function itself is rather slow, so consider not sending to\n\tmany packages to the search query.\n\t\"\"\"\n\tresult = {}\n\tfor package in names:\n\t\tresult[package] = find_package(package)\n\treturn result\n\ndef validate_package_list(packages :list):\n\t\"\"\"\n\tValidates a list of given packages.\n\tRaises `RequirementError` if one or more packages are not found.\n\t\"\"\"\n\tinvalid_packages = []\n\tfor package in packages:\n\t\tif not find_package(package)['results']:\n\t\t\tinvalid_packages.append(package)\n\t\n\tif invalid_packages:\n\t\traise RequirementError(f\"Invalid package names: {invalid_packages}\")\n\n\treturn True", "path": "archinstall/lib/packages.py"}], "after_files": [{"content": "import urllib.request, urllib.parse\nimport ssl, json\nfrom .exceptions import *\n\nBASE_URL = 'https://www.archlinux.org/packages/search/json/?name={package}'\nBASE_GROUP_URL = 'https://www.archlinux.org/groups/x86_64/{group}/'\n\ndef find_group(name):\n\tssl_context = ssl.create_default_context()\n\tssl_context.check_hostname = False\n\tssl_context.verify_mode = ssl.CERT_NONE\n\ttry:\n\t\tresponse = urllib.request.urlopen(BASE_GROUP_URL.format(group=name), context=ssl_context)\n\texcept urllib.error.HTTPError as err:\n\t\tif err.code == 404:\n\t\t\treturn False\n\t\telse:\n\t\t\traise err\n\t\n\t# Just to be sure some code didn't slip through the exception\n\tif response.code == 200:\n\t\treturn True\n\ndef find_package(name):\n\t\"\"\"\n\tFinds a specific package via the package database.\n\tIt makes a simple web-request, which might be a bit slow.\n\t\"\"\"\n\tssl_context = ssl.create_default_context()\n\tssl_context.check_hostname = False\n\tssl_context.verify_mode = ssl.CERT_NONE\n\tresponse = urllib.request.urlopen(BASE_URL.format(package=name), context=ssl_context)\n\tdata = response.read().decode('UTF-8')\n\treturn json.loads(data)\n\ndef find_packages(*names):\n\t\"\"\"\n\tThis function returns the search results for many packages.\n\tThe function itself is rather slow, so consider not sending to\n\tmany packages to the search query.\n\t\"\"\"\n\tresult = {}\n\tfor package in names:\n\t\tresult[package] = find_package(package)\n\treturn result\n\ndef validate_package_list(packages :list):\n\t\"\"\"\n\tValidates a list of given packages.\n\tRaises `RequirementError` if one or more packages are not found.\n\t\"\"\"\n\tinvalid_packages = []\n\tfor package in packages:\n\t\tif not find_package(package)['results'] and not find_group(package):\n\t\t\tinvalid_packages.append(package)\n\t\n\tif invalid_packages:\n\t\traise RequirementError(f\"Invalid package names: {invalid_packages}\")\n\n\treturn True", "path": "archinstall/lib/packages.py"}]}
768
291
gh_patches_debug_20952
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-2081
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Running Wrapper documentation code example does not render Pointing out a documentation issue in the Running wrapper: The code example does not render properly: ![image](https://github.com/Lightning-AI/torchmetrics/assets/28459495/ecb30567-4b65-4bda-b3ff-0e6a66f4ff2d) https://github.com/Lightning-AI/torchmetrics/blob/99d6d9d6ac4eb1b3398241df558604e70521e6b0/src/torchmetrics/wrappers/running.py#L46-L83 I assume this is because the first line after each `Example:` does not start with `>>>`? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/wrappers/running.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from typing import Any, Optional, Sequence, Union 15 16 from torch import Tensor 17 18 from torchmetrics.metric import Metric 19 from torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE 20 from torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE 21 from torchmetrics.wrappers.abstract import WrapperMetric 22 23 if not _MATPLOTLIB_AVAILABLE: 24 __doctest_skip__ = ["Running.plot"] 25 26 27 class Running(WrapperMetric): 28 """Running wrapper for metrics. 29 30 Using this wrapper allows for calculating metrics over a running window of values, instead of the whole history of 31 values. This is beneficial when you want to get a better estimate of the metric during training and don't want to 32 wait for the whole training to finish to get epoch level estimates. 33 34 The running window is defined by the `window` argument. The window is a fixed size and this wrapper will store a 35 duplicate of the underlying metric state for each value in the window. Thus memory usage will increase linearly 36 with window size. Use accordingly. Also note that the running only works with metrics that have the 37 `full_state_update` set to `False`. 38 39 Importantly, the wrapper does not alter the value of the `forward` method of the underlying metric. Thus, forward 40 will still return the value on the current batch. To get the running value call `compute` instead. 41 42 Args: 43 base_metric: The metric to wrap. 44 window: The size of the running window. 45 46 Example: 47 # Single metric 48 >>> from torch import tensor 49 >>> from torchmetrics.wrappers import Running 50 >>> from torchmetrics.aggregation import SumMetric 51 >>> metric = Running(SumMetric(), window=3) 52 >>> for i in range(6): 53 ... current_val = metric(tensor([i])) 54 ... running_val = metric.compute() 55 ... total_val = tensor(sum(list(range(i+1)))) # value we would get from `compute` without running 56 ... print(f"{current_val=}, {running_val=}, {total_val=}") 57 current_val=tensor(0.), running_val=tensor(0.), total_val=tensor(0) 58 current_val=tensor(1.), running_val=tensor(1.), total_val=tensor(1) 59 current_val=tensor(2.), running_val=tensor(3.), total_val=tensor(3) 60 current_val=tensor(3.), running_val=tensor(6.), total_val=tensor(6) 61 current_val=tensor(4.), running_val=tensor(9.), total_val=tensor(10) 62 current_val=tensor(5.), running_val=tensor(12.), total_val=tensor(15) 63 64 Example: 65 # Metric collection 66 >>> from torch import tensor 67 >>> from torchmetrics.wrappers import Running 68 >>> from torchmetrics import MetricCollection 69 >>> from torchmetrics.aggregation import SumMetric, MeanMetric 70 >>> # note that running is input to collection, not the other way 71 >>> metric = MetricCollection({"sum": Running(SumMetric(), 3), "mean": Running(MeanMetric(), 3)}) 72 >>> for i in range(6): 73 ... current_val = metric(tensor([i])) 74 ... running_val = metric.compute() 75 ... print(f"{current_val=}, {running_val=}") 76 current_val={'mean': tensor(0.), 'sum': tensor(0.)}, running_val={'mean': tensor(0.), 'sum': tensor(0.)} 77 current_val={'mean': tensor(1.), 'sum': tensor(1.)}, running_val={'mean': tensor(0.5000), 'sum': tensor(1.)} 78 current_val={'mean': tensor(2.), 'sum': tensor(2.)}, running_val={'mean': tensor(1.), 'sum': tensor(3.)} 79 current_val={'mean': tensor(3.), 'sum': tensor(3.)}, running_val={'mean': tensor(2.), 'sum': tensor(6.)} 80 current_val={'mean': tensor(4.), 'sum': tensor(4.)}, running_val={'mean': tensor(3.), 'sum': tensor(9.)} 81 current_val={'mean': tensor(5.), 'sum': tensor(5.)}, running_val={'mean': tensor(4.), 'sum': tensor(12.)} 82 83 """ 84 85 def __init__(self, base_metric: Metric, window: int = 5) -> None: 86 super().__init__() 87 if not isinstance(base_metric, Metric): 88 raise ValueError( 89 f"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {base_metric}" 90 ) 91 if not (isinstance(window, int) and window > 0): 92 raise ValueError(f"Expected argument `window` to be a positive integer but got {window}") 93 self.base_metric = base_metric 94 self.window = window 95 96 if base_metric.full_state_update is not False: 97 raise ValueError( 98 f"Expected attribute `full_state_update` set to `False` but got {base_metric.full_state_update}" 99 ) 100 self._num_vals_seen = 0 101 102 for key in base_metric._defaults: 103 for i in range(window): 104 self.add_state( 105 name=key + f"_{i}", default=base_metric._defaults[key], dist_reduce_fx=base_metric._reductions[key] 106 ) 107 108 def update(self, *args: Any, **kwargs: Any) -> None: 109 """Update the underlying metric and save state afterwards.""" 110 val = self._num_vals_seen % self.window 111 self.base_metric.update(*args, **kwargs) 112 for key in self.base_metric._defaults: 113 setattr(self, key + f"_{val}", getattr(self.base_metric, key)) 114 self.base_metric.reset() 115 self._num_vals_seen += 1 116 117 def forward(self, *args: Any, **kwargs: Any) -> Any: 118 """Forward input to the underlying metric and save state afterwards.""" 119 val = self._num_vals_seen % self.window 120 res = self.base_metric.forward(*args, **kwargs) 121 for key in self.base_metric._defaults: 122 setattr(self, key + f"_{val}", getattr(self.base_metric, key)) 123 self.base_metric.reset() 124 self._num_vals_seen += 1 125 self._computed = None 126 return res 127 128 def compute(self) -> Any: 129 """Compute the metric over the running window.""" 130 for i in range(self.window): 131 self.base_metric._reduce_states({key: getattr(self, key + f"_{i}") for key in self.base_metric._defaults}) 132 val = self.base_metric.compute() 133 self.base_metric.reset() 134 return val 135 136 def reset(self) -> None: 137 """Reset metric.""" 138 super().reset() 139 self._num_vals_seen = 0 140 141 def plot( 142 self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None 143 ) -> _PLOT_OUT_TYPE: 144 """Plot a single or multiple values from the metric. 145 146 Args: 147 val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results. 148 If no value is provided, will automatically call `metric.compute` and plot that result. 149 ax: An matplotlib axis object. If provided will add plot to that axis 150 151 Returns: 152 Figure and Axes object 153 154 Raises: 155 ModuleNotFoundError: 156 If `matplotlib` is not installed 157 158 .. plot:: 159 :scale: 75 160 161 >>> # Example plotting a single value 162 >>> import torch 163 >>> from torchmetrics.wrappers import Running 164 >>> from torchmetrics.aggregation import SumMetric 165 >>> metric = Running(SumMetric(), 2) 166 >>> metric.update(torch.randn(20, 2)) 167 >>> fig_, ax_ = metric.plot() 168 169 .. plot:: 170 :scale: 75 171 172 >>> # Example plotting multiple values 173 >>> import torch 174 >>> from torchmetrics.wrappers import Running 175 >>> from torchmetrics.aggregation import SumMetric 176 >>> metric = Running(SumMetric(), 2) 177 >>> values = [ ] 178 >>> for _ in range(3): 179 ... values.append(metric(torch.randn(20, 2))) 180 >>> fig_, ax_ = metric.plot(values) 181 182 """ 183 return self._plot(val, ax) 184 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/wrappers/running.py b/src/torchmetrics/wrappers/running.py --- a/src/torchmetrics/wrappers/running.py +++ b/src/torchmetrics/wrappers/running.py @@ -43,8 +43,7 @@ base_metric: The metric to wrap. window: The size of the running window. - Example: - # Single metric + Example (single metric): >>> from torch import tensor >>> from torchmetrics.wrappers import Running >>> from torchmetrics.aggregation import SumMetric @@ -61,8 +60,7 @@ current_val=tensor(4.), running_val=tensor(9.), total_val=tensor(10) current_val=tensor(5.), running_val=tensor(12.), total_val=tensor(15) - Example: - # Metric collection + Example (metric collection): >>> from torch import tensor >>> from torchmetrics.wrappers import Running >>> from torchmetrics import MetricCollection
{"golden_diff": "diff --git a/src/torchmetrics/wrappers/running.py b/src/torchmetrics/wrappers/running.py\n--- a/src/torchmetrics/wrappers/running.py\n+++ b/src/torchmetrics/wrappers/running.py\n@@ -43,8 +43,7 @@\n base_metric: The metric to wrap.\n window: The size of the running window.\n \n- Example:\n- # Single metric\n+ Example (single metric):\n >>> from torch import tensor\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics.aggregation import SumMetric\n@@ -61,8 +60,7 @@\n current_val=tensor(4.), running_val=tensor(9.), total_val=tensor(10)\n current_val=tensor(5.), running_val=tensor(12.), total_val=tensor(15)\n \n- Example:\n- # Metric collection\n+ Example (metric collection):\n >>> from torch import tensor\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics import MetricCollection\n", "issue": "Running Wrapper documentation code example does not render\nPointing out a documentation issue in the Running wrapper:\r\n\r\nThe code example does not render properly: \r\n![image](https://github.com/Lightning-AI/torchmetrics/assets/28459495/ecb30567-4b65-4bda-b3ff-0e6a66f4ff2d)\r\n\r\nhttps://github.com/Lightning-AI/torchmetrics/blob/99d6d9d6ac4eb1b3398241df558604e70521e6b0/src/torchmetrics/wrappers/running.py#L46-L83\r\n\r\nI assume this is because the first line after each `Example:` does not start with `>>>`?\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Optional, Sequence, Union\n\nfrom torch import Tensor\n\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\nfrom torchmetrics.wrappers.abstract import WrapperMetric\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"Running.plot\"]\n\n\nclass Running(WrapperMetric):\n \"\"\"Running wrapper for metrics.\n\n Using this wrapper allows for calculating metrics over a running window of values, instead of the whole history of\n values. This is beneficial when you want to get a better estimate of the metric during training and don't want to\n wait for the whole training to finish to get epoch level estimates.\n\n The running window is defined by the `window` argument. The window is a fixed size and this wrapper will store a\n duplicate of the underlying metric state for each value in the window. Thus memory usage will increase linearly\n with window size. Use accordingly. Also note that the running only works with metrics that have the\n `full_state_update` set to `False`.\n\n Importantly, the wrapper does not alter the value of the `forward` method of the underlying metric. Thus, forward\n will still return the value on the current batch. To get the running value call `compute` instead.\n\n Args:\n base_metric: The metric to wrap.\n window: The size of the running window.\n\n Example:\n # Single metric\n >>> from torch import tensor\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics.aggregation import SumMetric\n >>> metric = Running(SumMetric(), window=3)\n >>> for i in range(6):\n ... current_val = metric(tensor([i]))\n ... running_val = metric.compute()\n ... total_val = tensor(sum(list(range(i+1)))) # value we would get from `compute` without running\n ... print(f\"{current_val=}, {running_val=}, {total_val=}\")\n current_val=tensor(0.), running_val=tensor(0.), total_val=tensor(0)\n current_val=tensor(1.), running_val=tensor(1.), total_val=tensor(1)\n current_val=tensor(2.), running_val=tensor(3.), total_val=tensor(3)\n current_val=tensor(3.), running_val=tensor(6.), total_val=tensor(6)\n current_val=tensor(4.), running_val=tensor(9.), total_val=tensor(10)\n current_val=tensor(5.), running_val=tensor(12.), total_val=tensor(15)\n\n Example:\n # Metric collection\n >>> from torch import tensor\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.aggregation import SumMetric, MeanMetric\n >>> # note that running is input to collection, not the other way\n >>> metric = MetricCollection({\"sum\": Running(SumMetric(), 3), \"mean\": Running(MeanMetric(), 3)})\n >>> for i in range(6):\n ... current_val = metric(tensor([i]))\n ... running_val = metric.compute()\n ... print(f\"{current_val=}, {running_val=}\")\n current_val={'mean': tensor(0.), 'sum': tensor(0.)}, running_val={'mean': tensor(0.), 'sum': tensor(0.)}\n current_val={'mean': tensor(1.), 'sum': tensor(1.)}, running_val={'mean': tensor(0.5000), 'sum': tensor(1.)}\n current_val={'mean': tensor(2.), 'sum': tensor(2.)}, running_val={'mean': tensor(1.), 'sum': tensor(3.)}\n current_val={'mean': tensor(3.), 'sum': tensor(3.)}, running_val={'mean': tensor(2.), 'sum': tensor(6.)}\n current_val={'mean': tensor(4.), 'sum': tensor(4.)}, running_val={'mean': tensor(3.), 'sum': tensor(9.)}\n current_val={'mean': tensor(5.), 'sum': tensor(5.)}, running_val={'mean': tensor(4.), 'sum': tensor(12.)}\n\n \"\"\"\n\n def __init__(self, base_metric: Metric, window: int = 5) -> None:\n super().__init__()\n if not isinstance(base_metric, Metric):\n raise ValueError(\n f\"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {base_metric}\"\n )\n if not (isinstance(window, int) and window > 0):\n raise ValueError(f\"Expected argument `window` to be a positive integer but got {window}\")\n self.base_metric = base_metric\n self.window = window\n\n if base_metric.full_state_update is not False:\n raise ValueError(\n f\"Expected attribute `full_state_update` set to `False` but got {base_metric.full_state_update}\"\n )\n self._num_vals_seen = 0\n\n for key in base_metric._defaults:\n for i in range(window):\n self.add_state(\n name=key + f\"_{i}\", default=base_metric._defaults[key], dist_reduce_fx=base_metric._reductions[key]\n )\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Update the underlying metric and save state afterwards.\"\"\"\n val = self._num_vals_seen % self.window\n self.base_metric.update(*args, **kwargs)\n for key in self.base_metric._defaults:\n setattr(self, key + f\"_{val}\", getattr(self.base_metric, key))\n self.base_metric.reset()\n self._num_vals_seen += 1\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward input to the underlying metric and save state afterwards.\"\"\"\n val = self._num_vals_seen % self.window\n res = self.base_metric.forward(*args, **kwargs)\n for key in self.base_metric._defaults:\n setattr(self, key + f\"_{val}\", getattr(self.base_metric, key))\n self.base_metric.reset()\n self._num_vals_seen += 1\n self._computed = None\n return res\n\n def compute(self) -> Any:\n \"\"\"Compute the metric over the running window.\"\"\"\n for i in range(self.window):\n self.base_metric._reduce_states({key: getattr(self, key + f\"_{i}\") for key in self.base_metric._defaults})\n val = self.base_metric.compute()\n self.base_metric.reset()\n return val\n\n def reset(self) -> None:\n \"\"\"Reset metric.\"\"\"\n super().reset()\n self._num_vals_seen = 0\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics.aggregation import SumMetric\n >>> metric = Running(SumMetric(), 2)\n >>> metric.update(torch.randn(20, 2))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics.aggregation import SumMetric\n >>> metric = Running(SumMetric(), 2)\n >>> values = [ ]\n >>> for _ in range(3):\n ... values.append(metric(torch.randn(20, 2)))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/wrappers/running.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Any, Optional, Sequence, Union\n\nfrom torch import Tensor\n\nfrom torchmetrics.metric import Metric\nfrom torchmetrics.utilities.imports import _MATPLOTLIB_AVAILABLE\nfrom torchmetrics.utilities.plot import _AX_TYPE, _PLOT_OUT_TYPE\nfrom torchmetrics.wrappers.abstract import WrapperMetric\n\nif not _MATPLOTLIB_AVAILABLE:\n __doctest_skip__ = [\"Running.plot\"]\n\n\nclass Running(WrapperMetric):\n \"\"\"Running wrapper for metrics.\n\n Using this wrapper allows for calculating metrics over a running window of values, instead of the whole history of\n values. This is beneficial when you want to get a better estimate of the metric during training and don't want to\n wait for the whole training to finish to get epoch level estimates.\n\n The running window is defined by the `window` argument. The window is a fixed size and this wrapper will store a\n duplicate of the underlying metric state for each value in the window. Thus memory usage will increase linearly\n with window size. Use accordingly. Also note that the running only works with metrics that have the\n `full_state_update` set to `False`.\n\n Importantly, the wrapper does not alter the value of the `forward` method of the underlying metric. Thus, forward\n will still return the value on the current batch. To get the running value call `compute` instead.\n\n Args:\n base_metric: The metric to wrap.\n window: The size of the running window.\n\n Example (single metric):\n >>> from torch import tensor\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics.aggregation import SumMetric\n >>> metric = Running(SumMetric(), window=3)\n >>> for i in range(6):\n ... current_val = metric(tensor([i]))\n ... running_val = metric.compute()\n ... total_val = tensor(sum(list(range(i+1)))) # value we would get from `compute` without running\n ... print(f\"{current_val=}, {running_val=}, {total_val=}\")\n current_val=tensor(0.), running_val=tensor(0.), total_val=tensor(0)\n current_val=tensor(1.), running_val=tensor(1.), total_val=tensor(1)\n current_val=tensor(2.), running_val=tensor(3.), total_val=tensor(3)\n current_val=tensor(3.), running_val=tensor(6.), total_val=tensor(6)\n current_val=tensor(4.), running_val=tensor(9.), total_val=tensor(10)\n current_val=tensor(5.), running_val=tensor(12.), total_val=tensor(15)\n\n Example (metric collection):\n >>> from torch import tensor\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics import MetricCollection\n >>> from torchmetrics.aggregation import SumMetric, MeanMetric\n >>> # note that running is input to collection, not the other way\n >>> metric = MetricCollection({\"sum\": Running(SumMetric(), 3), \"mean\": Running(MeanMetric(), 3)})\n >>> for i in range(6):\n ... current_val = metric(tensor([i]))\n ... running_val = metric.compute()\n ... print(f\"{current_val=}, {running_val=}\")\n current_val={'mean': tensor(0.), 'sum': tensor(0.)}, running_val={'mean': tensor(0.), 'sum': tensor(0.)}\n current_val={'mean': tensor(1.), 'sum': tensor(1.)}, running_val={'mean': tensor(0.5000), 'sum': tensor(1.)}\n current_val={'mean': tensor(2.), 'sum': tensor(2.)}, running_val={'mean': tensor(1.), 'sum': tensor(3.)}\n current_val={'mean': tensor(3.), 'sum': tensor(3.)}, running_val={'mean': tensor(2.), 'sum': tensor(6.)}\n current_val={'mean': tensor(4.), 'sum': tensor(4.)}, running_val={'mean': tensor(3.), 'sum': tensor(9.)}\n current_val={'mean': tensor(5.), 'sum': tensor(5.)}, running_val={'mean': tensor(4.), 'sum': tensor(12.)}\n\n \"\"\"\n\n def __init__(self, base_metric: Metric, window: int = 5) -> None:\n super().__init__()\n if not isinstance(base_metric, Metric):\n raise ValueError(\n f\"Expected argument `metric` to be an instance of `torchmetrics.Metric` but got {base_metric}\"\n )\n if not (isinstance(window, int) and window > 0):\n raise ValueError(f\"Expected argument `window` to be a positive integer but got {window}\")\n self.base_metric = base_metric\n self.window = window\n\n if base_metric.full_state_update is not False:\n raise ValueError(\n f\"Expected attribute `full_state_update` set to `False` but got {base_metric.full_state_update}\"\n )\n self._num_vals_seen = 0\n\n for key in base_metric._defaults:\n for i in range(window):\n self.add_state(\n name=key + f\"_{i}\", default=base_metric._defaults[key], dist_reduce_fx=base_metric._reductions[key]\n )\n\n def update(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Update the underlying metric and save state afterwards.\"\"\"\n val = self._num_vals_seen % self.window\n self.base_metric.update(*args, **kwargs)\n for key in self.base_metric._defaults:\n setattr(self, key + f\"_{val}\", getattr(self.base_metric, key))\n self.base_metric.reset()\n self._num_vals_seen += 1\n\n def forward(self, *args: Any, **kwargs: Any) -> Any:\n \"\"\"Forward input to the underlying metric and save state afterwards.\"\"\"\n val = self._num_vals_seen % self.window\n res = self.base_metric.forward(*args, **kwargs)\n for key in self.base_metric._defaults:\n setattr(self, key + f\"_{val}\", getattr(self.base_metric, key))\n self.base_metric.reset()\n self._num_vals_seen += 1\n self._computed = None\n return res\n\n def compute(self) -> Any:\n \"\"\"Compute the metric over the running window.\"\"\"\n for i in range(self.window):\n self.base_metric._reduce_states({key: getattr(self, key + f\"_{i}\") for key in self.base_metric._defaults})\n val = self.base_metric.compute()\n self.base_metric.reset()\n return val\n\n def reset(self) -> None:\n \"\"\"Reset metric.\"\"\"\n super().reset()\n self._num_vals_seen = 0\n\n def plot(\n self, val: Optional[Union[Tensor, Sequence[Tensor]]] = None, ax: Optional[_AX_TYPE] = None\n ) -> _PLOT_OUT_TYPE:\n \"\"\"Plot a single or multiple values from the metric.\n\n Args:\n val: Either a single result from calling `metric.forward` or `metric.compute` or a list of these results.\n If no value is provided, will automatically call `metric.compute` and plot that result.\n ax: An matplotlib axis object. If provided will add plot to that axis\n\n Returns:\n Figure and Axes object\n\n Raises:\n ModuleNotFoundError:\n If `matplotlib` is not installed\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting a single value\n >>> import torch\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics.aggregation import SumMetric\n >>> metric = Running(SumMetric(), 2)\n >>> metric.update(torch.randn(20, 2))\n >>> fig_, ax_ = metric.plot()\n\n .. plot::\n :scale: 75\n\n >>> # Example plotting multiple values\n >>> import torch\n >>> from torchmetrics.wrappers import Running\n >>> from torchmetrics.aggregation import SumMetric\n >>> metric = Running(SumMetric(), 2)\n >>> values = [ ]\n >>> for _ in range(3):\n ... values.append(metric(torch.randn(20, 2)))\n >>> fig_, ax_ = metric.plot(values)\n\n \"\"\"\n return self._plot(val, ax)\n", "path": "src/torchmetrics/wrappers/running.py"}]}
2,824
232
gh_patches_debug_63087
rasdani/github-patches
git_diff
translate__pootle-5160
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Ensure tests can be run with `--reuse-db` When iterating over a test that require DB access (or a few of them), currently a site-wide setup is made which in such scenario ends up being relatively time-consuming and tedious. Ideally one could use [pytest-django's `--reuse-db` flag](http://pytest-django.readthedocs.org/en/latest/database.html#reuse-db-reuse-the-testing-database-between-test-runs) to considerably reduce setup time on test iterations, however at the current state of things such feature cannot be used due to the way the Pootle test DB environment is setup. Let's try to fix that so we can benefit from `--reuse-db`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pytest_pootle/plugin.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 import os 10 import shutil 11 from pkgutil import iter_modules 12 13 import pytest 14 15 from . import fixtures 16 from .env import PootleTestEnv 17 from .fixtures import models as fixtures_models 18 from .fixtures.core import management as fixtures_core_management 19 from .fixtures.core import utils as fixtures_core_utils 20 from .fixtures import formats as fixtures_formats 21 from .fixtures import pootle_fs as fixtures_fs 22 23 24 def _load_fixtures(*modules): 25 for mod in modules: 26 path = mod.__path__ 27 prefix = '%s.' % mod.__name__ 28 29 for loader_, name, is_pkg in iter_modules(path, prefix): 30 if not is_pkg: 31 yield name 32 33 34 @pytest.fixture 35 def po_test_dir(request, tmpdir): 36 po_dir = str(tmpdir.mkdir("po")) 37 38 def rm_po_dir(): 39 if os.path.exists(po_dir): 40 shutil.rmtree(po_dir) 41 42 request.addfinalizer(rm_po_dir) 43 return po_dir 44 45 46 @pytest.fixture 47 def po_directory(request, po_test_dir, settings): 48 """Sets up a tmp directory for PO files.""" 49 from pootle_store.models import fs 50 51 translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY 52 53 # Adjust locations 54 settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir 55 fs.location = po_test_dir 56 57 def _cleanup(): 58 settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory 59 60 request.addfinalizer(_cleanup) 61 62 63 @pytest.fixture(scope='session') 64 def tests_use_db(request): 65 return bool( 66 [item for item in request.node.items 67 if item.get_marker('django_db')]) 68 69 70 @pytest.fixture(scope='session') 71 def tests_use_vfolders(request): 72 return bool( 73 [item for item in request.node.items 74 if item.get_marker('pootle_vfolders')]) 75 76 77 @pytest.fixture(scope='session') 78 def tests_use_migration(request, tests_use_db): 79 return bool( 80 tests_use_db 81 and [item for item in request.node.items 82 if item.get_marker('django_migration')]) 83 84 85 @pytest.fixture(autouse=True, scope='session') 86 def setup_db_if_needed(request, tests_use_db): 87 """Sets up the site DB only if tests requested to use the DB (autouse).""" 88 if tests_use_db: 89 return request.getfuncargvalue('post_db_setup') 90 91 92 @pytest.fixture(scope='session') 93 def post_db_setup(translations_directory, django_db_setup, django_db_blocker, 94 tests_use_db, tests_use_vfolders, request): 95 """Sets up the site DB for the test session.""" 96 if tests_use_db: 97 with django_db_blocker.unblock(): 98 PootleTestEnv().setup( 99 vfolders=tests_use_vfolders) 100 101 102 @pytest.fixture(scope='session') 103 def django_db_use_migrations(tests_use_migration): 104 return tests_use_migration 105 106 107 pytest_plugins = tuple( 108 _load_fixtures( 109 fixtures, 110 fixtures_core_management, 111 fixtures_core_utils, 112 fixtures_formats, 113 fixtures_models, 114 fixtures_fs)) 115 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pytest_pootle/plugin.py b/pytest_pootle/plugin.py --- a/pytest_pootle/plugin.py +++ b/pytest_pootle/plugin.py @@ -85,7 +85,7 @@ @pytest.fixture(autouse=True, scope='session') def setup_db_if_needed(request, tests_use_db): """Sets up the site DB only if tests requested to use the DB (autouse).""" - if tests_use_db: + if tests_use_db and not request.config.getvalue('reuse_db'): return request.getfuncargvalue('post_db_setup')
{"golden_diff": "diff --git a/pytest_pootle/plugin.py b/pytest_pootle/plugin.py\n--- a/pytest_pootle/plugin.py\n+++ b/pytest_pootle/plugin.py\n@@ -85,7 +85,7 @@\n @pytest.fixture(autouse=True, scope='session')\n def setup_db_if_needed(request, tests_use_db):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n- if tests_use_db:\n+ if tests_use_db and not request.config.getvalue('reuse_db'):\n return request.getfuncargvalue('post_db_setup')\n", "issue": "Ensure tests can be run with `--reuse-db`\nWhen iterating over a test that require DB access (or a few of them), currently a site-wide setup is made which in such scenario ends up being relatively time-consuming and tedious.\n\nIdeally one could use [pytest-django's `--reuse-db` flag](http://pytest-django.readthedocs.org/en/latest/database.html#reuse-db-reuse-the-testing-database-between-test-runs) to considerably reduce setup time on test iterations, however at the current state of things such feature cannot be used due to the way the Pootle test DB environment is setup.\n\nLet's try to fix that so we can benefit from `--reuse-db`.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nimport shutil\nfrom pkgutil import iter_modules\n\nimport pytest\n\nfrom . import fixtures\nfrom .env import PootleTestEnv\nfrom .fixtures import models as fixtures_models\nfrom .fixtures.core import management as fixtures_core_management\nfrom .fixtures.core import utils as fixtures_core_utils\nfrom .fixtures import formats as fixtures_formats\nfrom .fixtures import pootle_fs as fixtures_fs\n\n\ndef _load_fixtures(*modules):\n for mod in modules:\n path = mod.__path__\n prefix = '%s.' % mod.__name__\n\n for loader_, name, is_pkg in iter_modules(path, prefix):\n if not is_pkg:\n yield name\n\n\[email protected]\ndef po_test_dir(request, tmpdir):\n po_dir = str(tmpdir.mkdir(\"po\"))\n\n def rm_po_dir():\n if os.path.exists(po_dir):\n shutil.rmtree(po_dir)\n\n request.addfinalizer(rm_po_dir)\n return po_dir\n\n\[email protected]\ndef po_directory(request, po_test_dir, settings):\n \"\"\"Sets up a tmp directory for PO files.\"\"\"\n from pootle_store.models import fs\n\n translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY\n\n # Adjust locations\n settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir\n fs.location = po_test_dir\n\n def _cleanup():\n settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory\n\n request.addfinalizer(_cleanup)\n\n\[email protected](scope='session')\ndef tests_use_db(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('django_db')])\n\n\[email protected](scope='session')\ndef tests_use_vfolders(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('pootle_vfolders')])\n\n\[email protected](scope='session')\ndef tests_use_migration(request, tests_use_db):\n return bool(\n tests_use_db\n and [item for item in request.node.items\n if item.get_marker('django_migration')])\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request, tests_use_db):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n if tests_use_db:\n return request.getfuncargvalue('post_db_setup')\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, django_db_setup, django_db_blocker,\n tests_use_db, tests_use_vfolders, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n if tests_use_db:\n with django_db_blocker.unblock():\n PootleTestEnv().setup(\n vfolders=tests_use_vfolders)\n\n\[email protected](scope='session')\ndef django_db_use_migrations(tests_use_migration):\n return tests_use_migration\n\n\npytest_plugins = tuple(\n _load_fixtures(\n fixtures,\n fixtures_core_management,\n fixtures_core_utils,\n fixtures_formats,\n fixtures_models,\n fixtures_fs))\n", "path": "pytest_pootle/plugin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nimport shutil\nfrom pkgutil import iter_modules\n\nimport pytest\n\nfrom . import fixtures\nfrom .env import PootleTestEnv\nfrom .fixtures import models as fixtures_models\nfrom .fixtures.core import management as fixtures_core_management\nfrom .fixtures.core import utils as fixtures_core_utils\nfrom .fixtures import formats as fixtures_formats\nfrom .fixtures import pootle_fs as fixtures_fs\n\n\ndef _load_fixtures(*modules):\n for mod in modules:\n path = mod.__path__\n prefix = '%s.' % mod.__name__\n\n for loader_, name, is_pkg in iter_modules(path, prefix):\n if not is_pkg:\n yield name\n\n\[email protected]\ndef po_test_dir(request, tmpdir):\n po_dir = str(tmpdir.mkdir(\"po\"))\n\n def rm_po_dir():\n if os.path.exists(po_dir):\n shutil.rmtree(po_dir)\n\n request.addfinalizer(rm_po_dir)\n return po_dir\n\n\[email protected]\ndef po_directory(request, po_test_dir, settings):\n \"\"\"Sets up a tmp directory for PO files.\"\"\"\n from pootle_store.models import fs\n\n translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY\n\n # Adjust locations\n settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir\n fs.location = po_test_dir\n\n def _cleanup():\n settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory\n\n request.addfinalizer(_cleanup)\n\n\[email protected](scope='session')\ndef tests_use_db(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('django_db')])\n\n\[email protected](scope='session')\ndef tests_use_vfolders(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('pootle_vfolders')])\n\n\[email protected](scope='session')\ndef tests_use_migration(request, tests_use_db):\n return bool(\n tests_use_db\n and [item for item in request.node.items\n if item.get_marker('django_migration')])\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request, tests_use_db):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n if tests_use_db and not request.config.getvalue('reuse_db'):\n return request.getfuncargvalue('post_db_setup')\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, django_db_setup, django_db_blocker,\n tests_use_db, tests_use_vfolders, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n if tests_use_db:\n with django_db_blocker.unblock():\n PootleTestEnv().setup(\n vfolders=tests_use_vfolders)\n\n\[email protected](scope='session')\ndef django_db_use_migrations(tests_use_migration):\n return tests_use_migration\n\n\npytest_plugins = tuple(\n _load_fixtures(\n fixtures,\n fixtures_core_management,\n fixtures_core_utils,\n fixtures_formats,\n fixtures_models,\n fixtures_fs))\n", "path": "pytest_pootle/plugin.py"}]}
1,361
129
gh_patches_debug_15855
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-10668
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Django: adapt admin code for 3.x It seems that we missed an upgrade to make it fully compatible with Django 3.x We are using `admin.ACTION_CHECKBOX_NAME` when it was deprecated and it was removed already: > The compatibility import of django.contrib.admin.helpers.ACTION_CHECKBOX_NAME in django.contrib.admin is removed. (from https://docs.djangoproject.com/en/4.0/releases/3.1/#id1) The code lives at https://github.com/readthedocs/readthedocs.org/blob/e94c26074e9abdf7056b4e6502c52f8a6b128055/readthedocs/notifications/views.py#L48 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/notifications/views.py` Content: ``` 1 """Django views for the notifications app.""" 2 from django.contrib import admin, messages 3 from django.http import HttpResponseRedirect 4 from django.views.generic import FormView 5 6 from .forms import SendNotificationForm 7 8 9 class SendNotificationView(FormView): 10 11 """ 12 Form view for sending notifications to users from admin pages. 13 14 Accepts the following additional parameters: 15 16 :param queryset: Queryset to use to determine the users to send emails to 17 :param action_name: Name of the action to pass to the form template, 18 determines the action to pass back to the admin view 19 :param notification_classes: List of :py:class:`Notification` classes to 20 display in the form 21 """ 22 23 form_class = SendNotificationForm 24 template_name = "notifications/send_notification_form.html" 25 action_name = "send_email" 26 notification_classes = [] 27 28 def get_form_kwargs(self): 29 """ 30 Override form kwargs based on input fields. 31 32 The admin posts to this view initially, so detect the send button on 33 form post variables. Drop additional fields if we see the send button. 34 """ 35 kwargs = super().get_form_kwargs() 36 kwargs["notification_classes"] = self.notification_classes 37 if "send" not in self.request.POST: 38 kwargs.pop("data", None) 39 kwargs.pop("files", None) 40 return kwargs 41 42 def get_initial(self): 43 """Add selected ids to initial form data.""" 44 initial = super().get_initial() 45 initial["_selected_action"] = self.request.POST.getlist( 46 admin.ACTION_CHECKBOX_NAME, 47 ) 48 return initial 49 50 def form_valid(self, form): 51 """If form is valid, send notification to recipients.""" 52 count = 0 53 notification_cls = form.cleaned_data["source"] 54 for obj in self.get_queryset().all(): 55 for recipient in self.get_object_recipients(obj): 56 notification = notification_cls( 57 context_object=obj, 58 request=self.request, 59 user=recipient, 60 ) 61 notification.send() 62 count += 1 63 if count == 0: 64 self.message_user("No recipients to send to", level=messages.ERROR) 65 else: 66 self.message_user("Queued {} messages".format(count)) 67 return HttpResponseRedirect(self.request.get_full_path()) 68 69 def get_object_recipients(self, obj): 70 """ 71 Iterate over queryset objects and return User objects. 72 73 This allows for non-User querysets to pass back a list of Users to send 74 to. By default, assume we're working with :py:class:`User` objects and 75 just yield the single object. 76 77 For example, this could be made to return project owners with:: 78 79 for owner in AdminPermission.members(project): 80 yield owner 81 82 :param obj: object from queryset, type is dependent on model class 83 :rtype: django.contrib.auth.models.User 84 """ 85 yield obj 86 87 def get_queryset(self): 88 return self.kwargs.get("queryset") 89 90 def get_context_data(self, **kwargs): 91 """Return queryset in context.""" 92 context = super().get_context_data(**kwargs) 93 recipients = [] 94 for obj in self.get_queryset().all(): 95 recipients.extend(self.get_object_recipients(obj)) 96 context["recipients"] = recipients 97 context["action_name"] = self.action_name 98 return context 99 100 def message_user( 101 self, 102 message, 103 level=messages.INFO, 104 extra_tags="", 105 fail_silently=False, 106 ): 107 """ 108 Implementation of. 109 110 :py:meth:`django.contrib.admin.options.ModelAdmin.message_user` 111 112 Send message through messages framework 113 """ 114 # TODO generalize this or check if implementation in ModelAdmin is 115 # usable here 116 messages.add_message( 117 self.request, 118 level, 119 message, 120 extra_tags=extra_tags, 121 fail_silently=fail_silently, 122 ) 123 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/notifications/views.py b/readthedocs/notifications/views.py --- a/readthedocs/notifications/views.py +++ b/readthedocs/notifications/views.py @@ -1,5 +1,5 @@ """Django views for the notifications app.""" -from django.contrib import admin, messages +from django.contrib import messages from django.http import HttpResponseRedirect from django.views.generic import FormView @@ -42,9 +42,7 @@ def get_initial(self): """Add selected ids to initial form data.""" initial = super().get_initial() - initial["_selected_action"] = self.request.POST.getlist( - admin.ACTION_CHECKBOX_NAME, - ) + initial["_selected_action"] = self.request.POST.getlist("_selected_action") return initial def form_valid(self, form):
{"golden_diff": "diff --git a/readthedocs/notifications/views.py b/readthedocs/notifications/views.py\n--- a/readthedocs/notifications/views.py\n+++ b/readthedocs/notifications/views.py\n@@ -1,5 +1,5 @@\n \"\"\"Django views for the notifications app.\"\"\"\n-from django.contrib import admin, messages\n+from django.contrib import messages\n from django.http import HttpResponseRedirect\n from django.views.generic import FormView\n \n@@ -42,9 +42,7 @@\n def get_initial(self):\n \"\"\"Add selected ids to initial form data.\"\"\"\n initial = super().get_initial()\n- initial[\"_selected_action\"] = self.request.POST.getlist(\n- admin.ACTION_CHECKBOX_NAME,\n- )\n+ initial[\"_selected_action\"] = self.request.POST.getlist(\"_selected_action\")\n return initial\n \n def form_valid(self, form):\n", "issue": "Django: adapt admin code for 3.x\nIt seems that we missed an upgrade to make it fully compatible with Django 3.x\r\n\r\nWe are using `admin.ACTION_CHECKBOX_NAME` when it was deprecated and it was removed already:\r\n\r\n> The compatibility import of django.contrib.admin.helpers.ACTION_CHECKBOX_NAME in django.contrib.admin is removed.\r\n\r\n(from https://docs.djangoproject.com/en/4.0/releases/3.1/#id1)\r\n\r\nThe code lives at https://github.com/readthedocs/readthedocs.org/blob/e94c26074e9abdf7056b4e6502c52f8a6b128055/readthedocs/notifications/views.py#L48\n", "before_files": [{"content": "\"\"\"Django views for the notifications app.\"\"\"\nfrom django.contrib import admin, messages\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import FormView\n\nfrom .forms import SendNotificationForm\n\n\nclass SendNotificationView(FormView):\n\n \"\"\"\n Form view for sending notifications to users from admin pages.\n\n Accepts the following additional parameters:\n\n :param queryset: Queryset to use to determine the users to send emails to\n :param action_name: Name of the action to pass to the form template,\n determines the action to pass back to the admin view\n :param notification_classes: List of :py:class:`Notification` classes to\n display in the form\n \"\"\"\n\n form_class = SendNotificationForm\n template_name = \"notifications/send_notification_form.html\"\n action_name = \"send_email\"\n notification_classes = []\n\n def get_form_kwargs(self):\n \"\"\"\n Override form kwargs based on input fields.\n\n The admin posts to this view initially, so detect the send button on\n form post variables. Drop additional fields if we see the send button.\n \"\"\"\n kwargs = super().get_form_kwargs()\n kwargs[\"notification_classes\"] = self.notification_classes\n if \"send\" not in self.request.POST:\n kwargs.pop(\"data\", None)\n kwargs.pop(\"files\", None)\n return kwargs\n\n def get_initial(self):\n \"\"\"Add selected ids to initial form data.\"\"\"\n initial = super().get_initial()\n initial[\"_selected_action\"] = self.request.POST.getlist(\n admin.ACTION_CHECKBOX_NAME,\n )\n return initial\n\n def form_valid(self, form):\n \"\"\"If form is valid, send notification to recipients.\"\"\"\n count = 0\n notification_cls = form.cleaned_data[\"source\"]\n for obj in self.get_queryset().all():\n for recipient in self.get_object_recipients(obj):\n notification = notification_cls(\n context_object=obj,\n request=self.request,\n user=recipient,\n )\n notification.send()\n count += 1\n if count == 0:\n self.message_user(\"No recipients to send to\", level=messages.ERROR)\n else:\n self.message_user(\"Queued {} messages\".format(count))\n return HttpResponseRedirect(self.request.get_full_path())\n\n def get_object_recipients(self, obj):\n \"\"\"\n Iterate over queryset objects and return User objects.\n\n This allows for non-User querysets to pass back a list of Users to send\n to. By default, assume we're working with :py:class:`User` objects and\n just yield the single object.\n\n For example, this could be made to return project owners with::\n\n for owner in AdminPermission.members(project):\n yield owner\n\n :param obj: object from queryset, type is dependent on model class\n :rtype: django.contrib.auth.models.User\n \"\"\"\n yield obj\n\n def get_queryset(self):\n return self.kwargs.get(\"queryset\")\n\n def get_context_data(self, **kwargs):\n \"\"\"Return queryset in context.\"\"\"\n context = super().get_context_data(**kwargs)\n recipients = []\n for obj in self.get_queryset().all():\n recipients.extend(self.get_object_recipients(obj))\n context[\"recipients\"] = recipients\n context[\"action_name\"] = self.action_name\n return context\n\n def message_user(\n self,\n message,\n level=messages.INFO,\n extra_tags=\"\",\n fail_silently=False,\n ):\n \"\"\"\n Implementation of.\n\n :py:meth:`django.contrib.admin.options.ModelAdmin.message_user`\n\n Send message through messages framework\n \"\"\"\n # TODO generalize this or check if implementation in ModelAdmin is\n # usable here\n messages.add_message(\n self.request,\n level,\n message,\n extra_tags=extra_tags,\n fail_silently=fail_silently,\n )\n", "path": "readthedocs/notifications/views.py"}], "after_files": [{"content": "\"\"\"Django views for the notifications app.\"\"\"\nfrom django.contrib import messages\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic import FormView\n\nfrom .forms import SendNotificationForm\n\n\nclass SendNotificationView(FormView):\n\n \"\"\"\n Form view for sending notifications to users from admin pages.\n\n Accepts the following additional parameters:\n\n :param queryset: Queryset to use to determine the users to send emails to\n :param action_name: Name of the action to pass to the form template,\n determines the action to pass back to the admin view\n :param notification_classes: List of :py:class:`Notification` classes to\n display in the form\n \"\"\"\n\n form_class = SendNotificationForm\n template_name = \"notifications/send_notification_form.html\"\n action_name = \"send_email\"\n notification_classes = []\n\n def get_form_kwargs(self):\n \"\"\"\n Override form kwargs based on input fields.\n\n The admin posts to this view initially, so detect the send button on\n form post variables. Drop additional fields if we see the send button.\n \"\"\"\n kwargs = super().get_form_kwargs()\n kwargs[\"notification_classes\"] = self.notification_classes\n if \"send\" not in self.request.POST:\n kwargs.pop(\"data\", None)\n kwargs.pop(\"files\", None)\n return kwargs\n\n def get_initial(self):\n \"\"\"Add selected ids to initial form data.\"\"\"\n initial = super().get_initial()\n initial[\"_selected_action\"] = self.request.POST.getlist(\"_selected_action\")\n return initial\n\n def form_valid(self, form):\n \"\"\"If form is valid, send notification to recipients.\"\"\"\n count = 0\n notification_cls = form.cleaned_data[\"source\"]\n for obj in self.get_queryset().all():\n for recipient in self.get_object_recipients(obj):\n notification = notification_cls(\n context_object=obj,\n request=self.request,\n user=recipient,\n )\n notification.send()\n count += 1\n if count == 0:\n self.message_user(\"No recipients to send to\", level=messages.ERROR)\n else:\n self.message_user(\"Queued {} messages\".format(count))\n return HttpResponseRedirect(self.request.get_full_path())\n\n def get_object_recipients(self, obj):\n \"\"\"\n Iterate over queryset objects and return User objects.\n\n This allows for non-User querysets to pass back a list of Users to send\n to. By default, assume we're working with :py:class:`User` objects and\n just yield the single object.\n\n For example, this could be made to return project owners with::\n\n for owner in AdminPermission.members(project):\n yield owner\n\n :param obj: object from queryset, type is dependent on model class\n :rtype: django.contrib.auth.models.User\n \"\"\"\n yield obj\n\n def get_queryset(self):\n return self.kwargs.get(\"queryset\")\n\n def get_context_data(self, **kwargs):\n \"\"\"Return queryset in context.\"\"\"\n context = super().get_context_data(**kwargs)\n recipients = []\n for obj in self.get_queryset().all():\n recipients.extend(self.get_object_recipients(obj))\n context[\"recipients\"] = recipients\n context[\"action_name\"] = self.action_name\n return context\n\n def message_user(\n self,\n message,\n level=messages.INFO,\n extra_tags=\"\",\n fail_silently=False,\n ):\n \"\"\"\n Implementation of.\n\n :py:meth:`django.contrib.admin.options.ModelAdmin.message_user`\n\n Send message through messages framework\n \"\"\"\n # TODO generalize this or check if implementation in ModelAdmin is\n # usable here\n messages.add_message(\n self.request,\n level,\n message,\n extra_tags=extra_tags,\n fail_silently=fail_silently,\n )\n", "path": "readthedocs/notifications/views.py"}]}
1,492
178