problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
10.2k
golden_diff
stringlengths
151
4.94k
verification_info
stringlengths
582
21k
num_tokens
int64
271
2.05k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_23432
rasdani/github-patches
git_diff
avocado-framework__avocado-5236
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support for codecoverage when running Python tests Avocado doesn't have a documented and/or supported way for generating code coverage information when running Python based tests. The first objective is to drop the use of the custom `selftests/run_coverage` script and the additional job, in favor of reusing one already existing CI job with Avocado: * running code coverage tools * keeping the information * pushing the information Even though Python based tests and code coverage is the primary goal of this issue, it's probable that the solution will be about providing a flexible way to add a "wrapper" of sorts before the actual execution of tests. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `avocado/core/runners/avocado_instrumented.py` Content: ``` 1 import multiprocessing 2 import tempfile 3 import time 4 import traceback 5 6 from avocado.core import nrunner 7 from avocado.core.runners.utils import messages 8 from avocado.core.test import TestID 9 from avocado.core.tree import TreeNodeEnvOnly 10 from avocado.core.utils import loader 11 from avocado.core.varianter import is_empty_variant 12 13 14 class AvocadoInstrumentedTestRunner(nrunner.BaseRunner): 15 """ 16 Runner for Avocado INSTRUMENTED tests 17 18 Runnable attributes usage: 19 20 * uri: path to a test file, combined with an Avocado.Test 21 inherited class name and method. The test file path and 22 class and method names should be separated by a ":". One 23 example of a valid uri is "mytest.py:Class.test_method". 24 25 * args: not used 26 """ 27 DEFAULT_TIMEOUT = 86400 28 29 @staticmethod 30 def _create_params(runnable): 31 """Create params for the test""" 32 if runnable.variant is None: 33 return None 34 35 # rebuild the variant tree 36 variant_tree_nodes = [TreeNodeEnvOnly(path, env) for path, env 37 in runnable.variant['variant']] 38 39 if not is_empty_variant(variant_tree_nodes): 40 tree_nodes = variant_tree_nodes 41 paths = runnable.variant['paths'] 42 return tree_nodes, paths 43 44 @staticmethod 45 def _run_avocado(runnable, queue): 46 try: 47 # This assumes that a proper resolution (see resolver module) 48 # was performed, and that a URI contains: 49 # 1) path to python module 50 # 2) class 51 # 3) method 52 # 53 # To be defined: if the resolution uri should be composed like 54 # this, or broken down and stored into other data fields 55 module_path, klass_method = runnable.uri.split(':', 1) 56 57 klass, method = klass_method.split('.', 1) 58 59 params = AvocadoInstrumentedTestRunner._create_params(runnable) 60 result_dir = (runnable.output_dir or 61 tempfile.mkdtemp(prefix=".avocado-task")) 62 test_factory = [klass, 63 {'name': TestID(1, runnable.uri), 64 'methodName': method, 65 'config': runnable.config, 66 'modulePath': module_path, 67 'params': params, 68 'tags': runnable.tags, 69 'run.results_dir': result_dir, 70 }] 71 72 messages.start_logging(runnable.config, queue) 73 instance = loader.load_test(test_factory) 74 early_state = instance.get_state() 75 early_state['type'] = "early_state" 76 queue.put(early_state) 77 instance.run_avocado() 78 state = instance.get_state() 79 fail_reason = state.get('fail_reason') 80 queue.put(messages.WhiteboardMessage.get(state['whiteboard'])) 81 queue.put(messages.FinishedMessage.get(state['status'].lower(), 82 fail_reason=fail_reason)) 83 except Exception as e: 84 queue.put(messages.StderrMessage.get(traceback.format_exc())) 85 queue.put(messages.FinishedMessage.get('error', fail_reason=str(e))) 86 87 def run(self): 88 yield messages.StartedMessage.get() 89 try: 90 queue = multiprocessing.SimpleQueue() 91 process = multiprocessing.Process(target=self._run_avocado, 92 args=(self.runnable, queue)) 93 94 process.start() 95 96 time_started = time.monotonic() 97 98 timeout = float(self.DEFAULT_TIMEOUT) 99 most_current_execution_state_time = None 100 while True: 101 time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL) 102 now = time.monotonic() 103 if queue.empty(): 104 if most_current_execution_state_time is not None: 105 next_execution_state_mark = (most_current_execution_state_time + 106 nrunner.RUNNER_RUN_STATUS_INTERVAL) 107 if (most_current_execution_state_time is None or 108 now > next_execution_state_mark): 109 most_current_execution_state_time = now 110 yield messages.RunningMessage.get() 111 if (now - time_started) > timeout: 112 process.terminate() 113 yield messages.FinishedMessage.get('interrupted', 114 'timeout') 115 break 116 else: 117 message = queue.get() 118 if message.get('type') == 'early_state': 119 timeout = float(message.get('timeout') or 120 self.DEFAULT_TIMEOUT) 121 else: 122 yield message 123 if message.get('status') == 'finished': 124 break 125 except Exception as e: 126 yield messages.StderrMessage.get(traceback.format_exc()) 127 yield messages.FinishedMessage.get('error', fail_reason=str(e)) 128 129 130 class RunnerApp(nrunner.BaseRunnerApp): 131 PROG_NAME = 'avocado-runner-avocado-instrumented' 132 PROG_DESCRIPTION = 'nrunner application for avocado-instrumented tests' 133 RUNNABLE_KINDS_CAPABLE = { 134 'avocado-instrumented': AvocadoInstrumentedTestRunner 135 } 136 137 138 def main(): 139 nrunner.main(RunnerApp) 140 141 142 if __name__ == '__main__': 143 main() 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/avocado/core/runners/avocado_instrumented.py b/avocado/core/runners/avocado_instrumented.py --- a/avocado/core/runners/avocado_instrumented.py +++ b/avocado/core/runners/avocado_instrumented.py @@ -1,4 +1,5 @@ import multiprocessing +import os import tempfile import time import traceback @@ -70,11 +71,22 @@ }] messages.start_logging(runnable.config, queue) + + if 'COVERAGE_RUN' in os.environ: + from coverage import Coverage + coverage = Coverage() + coverage.start() + instance = loader.load_test(test_factory) early_state = instance.get_state() early_state['type'] = "early_state" queue.put(early_state) instance.run_avocado() + + if 'COVERAGE_RUN' in os.environ: + coverage.stop() + coverage.save() + state = instance.get_state() fail_reason = state.get('fail_reason') queue.put(messages.WhiteboardMessage.get(state['whiteboard']))
{"golden_diff": "diff --git a/avocado/core/runners/avocado_instrumented.py b/avocado/core/runners/avocado_instrumented.py\n--- a/avocado/core/runners/avocado_instrumented.py\n+++ b/avocado/core/runners/avocado_instrumented.py\n@@ -1,4 +1,5 @@\n import multiprocessing\n+import os\n import tempfile\n import time\n import traceback\n@@ -70,11 +71,22 @@\n }]\n \n messages.start_logging(runnable.config, queue)\n+\n+ if 'COVERAGE_RUN' in os.environ:\n+ from coverage import Coverage\n+ coverage = Coverage()\n+ coverage.start()\n+\n instance = loader.load_test(test_factory)\n early_state = instance.get_state()\n early_state['type'] = \"early_state\"\n queue.put(early_state)\n instance.run_avocado()\n+\n+ if 'COVERAGE_RUN' in os.environ:\n+ coverage.stop()\n+ coverage.save()\n+\n state = instance.get_state()\n fail_reason = state.get('fail_reason')\n queue.put(messages.WhiteboardMessage.get(state['whiteboard']))\n", "issue": "Support for codecoverage when running Python tests\nAvocado doesn't have a documented and/or supported way for generating code coverage information when running Python based tests.\r\n\r\nThe first objective is to drop the use of the custom `selftests/run_coverage` script and the additional job, in favor of reusing one already existing CI job with Avocado:\r\n\r\n * running code coverage tools\r\n * keeping the information\r\n * pushing the information\r\n\r\nEven though Python based tests and code coverage is the primary goal of this issue, it's probable that the solution will be about providing a flexible way to add a \"wrapper\" of sorts before the actual execution of tests.\r\n \n", "before_files": [{"content": "import multiprocessing\nimport tempfile\nimport time\nimport traceback\n\nfrom avocado.core import nrunner\nfrom avocado.core.runners.utils import messages\nfrom avocado.core.test import TestID\nfrom avocado.core.tree import TreeNodeEnvOnly\nfrom avocado.core.utils import loader\nfrom avocado.core.varianter import is_empty_variant\n\n\nclass AvocadoInstrumentedTestRunner(nrunner.BaseRunner):\n \"\"\"\n Runner for Avocado INSTRUMENTED tests\n\n Runnable attributes usage:\n\n * uri: path to a test file, combined with an Avocado.Test\n inherited class name and method. The test file path and\n class and method names should be separated by a \":\". One\n example of a valid uri is \"mytest.py:Class.test_method\".\n\n * args: not used\n \"\"\"\n DEFAULT_TIMEOUT = 86400\n\n @staticmethod\n def _create_params(runnable):\n \"\"\"Create params for the test\"\"\"\n if runnable.variant is None:\n return None\n\n # rebuild the variant tree\n variant_tree_nodes = [TreeNodeEnvOnly(path, env) for path, env\n in runnable.variant['variant']]\n\n if not is_empty_variant(variant_tree_nodes):\n tree_nodes = variant_tree_nodes\n paths = runnable.variant['paths']\n return tree_nodes, paths\n\n @staticmethod\n def _run_avocado(runnable, queue):\n try:\n # This assumes that a proper resolution (see resolver module)\n # was performed, and that a URI contains:\n # 1) path to python module\n # 2) class\n # 3) method\n #\n # To be defined: if the resolution uri should be composed like\n # this, or broken down and stored into other data fields\n module_path, klass_method = runnable.uri.split(':', 1)\n\n klass, method = klass_method.split('.', 1)\n\n params = AvocadoInstrumentedTestRunner._create_params(runnable)\n result_dir = (runnable.output_dir or\n tempfile.mkdtemp(prefix=\".avocado-task\"))\n test_factory = [klass,\n {'name': TestID(1, runnable.uri),\n 'methodName': method,\n 'config': runnable.config,\n 'modulePath': module_path,\n 'params': params,\n 'tags': runnable.tags,\n 'run.results_dir': result_dir,\n }]\n\n messages.start_logging(runnable.config, queue)\n instance = loader.load_test(test_factory)\n early_state = instance.get_state()\n early_state['type'] = \"early_state\"\n queue.put(early_state)\n instance.run_avocado()\n state = instance.get_state()\n fail_reason = state.get('fail_reason')\n queue.put(messages.WhiteboardMessage.get(state['whiteboard']))\n queue.put(messages.FinishedMessage.get(state['status'].lower(),\n fail_reason=fail_reason))\n except Exception as e:\n queue.put(messages.StderrMessage.get(traceback.format_exc()))\n queue.put(messages.FinishedMessage.get('error', fail_reason=str(e)))\n\n def run(self):\n yield messages.StartedMessage.get()\n try:\n queue = multiprocessing.SimpleQueue()\n process = multiprocessing.Process(target=self._run_avocado,\n args=(self.runnable, queue))\n\n process.start()\n\n time_started = time.monotonic()\n\n timeout = float(self.DEFAULT_TIMEOUT)\n most_current_execution_state_time = None\n while True:\n time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL)\n now = time.monotonic()\n if queue.empty():\n if most_current_execution_state_time is not None:\n next_execution_state_mark = (most_current_execution_state_time +\n nrunner.RUNNER_RUN_STATUS_INTERVAL)\n if (most_current_execution_state_time is None or\n now > next_execution_state_mark):\n most_current_execution_state_time = now\n yield messages.RunningMessage.get()\n if (now - time_started) > timeout:\n process.terminate()\n yield messages.FinishedMessage.get('interrupted',\n 'timeout')\n break\n else:\n message = queue.get()\n if message.get('type') == 'early_state':\n timeout = float(message.get('timeout') or\n self.DEFAULT_TIMEOUT)\n else:\n yield message\n if message.get('status') == 'finished':\n break\n except Exception as e:\n yield messages.StderrMessage.get(traceback.format_exc())\n yield messages.FinishedMessage.get('error', fail_reason=str(e))\n\n\nclass RunnerApp(nrunner.BaseRunnerApp):\n PROG_NAME = 'avocado-runner-avocado-instrumented'\n PROG_DESCRIPTION = 'nrunner application for avocado-instrumented tests'\n RUNNABLE_KINDS_CAPABLE = {\n 'avocado-instrumented': AvocadoInstrumentedTestRunner\n }\n\n\ndef main():\n nrunner.main(RunnerApp)\n\n\nif __name__ == '__main__':\n main()\n", "path": "avocado/core/runners/avocado_instrumented.py"}], "after_files": [{"content": "import multiprocessing\nimport os\nimport tempfile\nimport time\nimport traceback\n\nfrom avocado.core import nrunner\nfrom avocado.core.runners.utils import messages\nfrom avocado.core.test import TestID\nfrom avocado.core.tree import TreeNodeEnvOnly\nfrom avocado.core.utils import loader\nfrom avocado.core.varianter import is_empty_variant\n\n\nclass AvocadoInstrumentedTestRunner(nrunner.BaseRunner):\n \"\"\"\n Runner for Avocado INSTRUMENTED tests\n\n Runnable attributes usage:\n\n * uri: path to a test file, combined with an Avocado.Test\n inherited class name and method. The test file path and\n class and method names should be separated by a \":\". One\n example of a valid uri is \"mytest.py:Class.test_method\".\n\n * args: not used\n \"\"\"\n DEFAULT_TIMEOUT = 86400\n\n @staticmethod\n def _create_params(runnable):\n \"\"\"Create params for the test\"\"\"\n if runnable.variant is None:\n return None\n\n # rebuild the variant tree\n variant_tree_nodes = [TreeNodeEnvOnly(path, env) for path, env\n in runnable.variant['variant']]\n\n if not is_empty_variant(variant_tree_nodes):\n tree_nodes = variant_tree_nodes\n paths = runnable.variant['paths']\n return tree_nodes, paths\n\n @staticmethod\n def _run_avocado(runnable, queue):\n try:\n # This assumes that a proper resolution (see resolver module)\n # was performed, and that a URI contains:\n # 1) path to python module\n # 2) class\n # 3) method\n #\n # To be defined: if the resolution uri should be composed like\n # this, or broken down and stored into other data fields\n module_path, klass_method = runnable.uri.split(':', 1)\n\n klass, method = klass_method.split('.', 1)\n\n params = AvocadoInstrumentedTestRunner._create_params(runnable)\n result_dir = (runnable.output_dir or\n tempfile.mkdtemp(prefix=\".avocado-task\"))\n test_factory = [klass,\n {'name': TestID(1, runnable.uri),\n 'methodName': method,\n 'config': runnable.config,\n 'modulePath': module_path,\n 'params': params,\n 'tags': runnable.tags,\n 'run.results_dir': result_dir,\n }]\n\n messages.start_logging(runnable.config, queue)\n\n if 'COVERAGE_RUN' in os.environ:\n from coverage import Coverage\n coverage = Coverage()\n coverage.start()\n\n instance = loader.load_test(test_factory)\n early_state = instance.get_state()\n early_state['type'] = \"early_state\"\n queue.put(early_state)\n instance.run_avocado()\n\n if 'COVERAGE_RUN' in os.environ:\n coverage.stop()\n coverage.save()\n\n state = instance.get_state()\n fail_reason = state.get('fail_reason')\n queue.put(messages.WhiteboardMessage.get(state['whiteboard']))\n queue.put(messages.FinishedMessage.get(state['status'].lower(),\n fail_reason=fail_reason))\n except Exception as e:\n queue.put(messages.StderrMessage.get(traceback.format_exc()))\n queue.put(messages.FinishedMessage.get('error', fail_reason=str(e)))\n\n def run(self):\n yield messages.StartedMessage.get()\n try:\n queue = multiprocessing.SimpleQueue()\n process = multiprocessing.Process(target=self._run_avocado,\n args=(self.runnable, queue))\n\n process.start()\n\n time_started = time.monotonic()\n\n timeout = float(self.DEFAULT_TIMEOUT)\n most_current_execution_state_time = None\n while True:\n time.sleep(nrunner.RUNNER_RUN_CHECK_INTERVAL)\n now = time.monotonic()\n if queue.empty():\n if most_current_execution_state_time is not None:\n next_execution_state_mark = (most_current_execution_state_time +\n nrunner.RUNNER_RUN_STATUS_INTERVAL)\n if (most_current_execution_state_time is None or\n now > next_execution_state_mark):\n most_current_execution_state_time = now\n yield messages.RunningMessage.get()\n if (now - time_started) > timeout:\n process.terminate()\n yield messages.FinishedMessage.get('interrupted',\n 'timeout')\n break\n else:\n message = queue.get()\n if message.get('type') == 'early_state':\n timeout = float(message.get('timeout') or\n self.DEFAULT_TIMEOUT)\n else:\n yield message\n if message.get('status') == 'finished':\n break\n except Exception as e:\n yield messages.StderrMessage.get(traceback.format_exc())\n yield messages.FinishedMessage.get('error', fail_reason=str(e))\n\n\nclass RunnerApp(nrunner.BaseRunnerApp):\n PROG_NAME = 'avocado-runner-avocado-instrumented'\n PROG_DESCRIPTION = 'nrunner application for avocado-instrumented tests'\n RUNNABLE_KINDS_CAPABLE = {\n 'avocado-instrumented': AvocadoInstrumentedTestRunner\n }\n\n\ndef main():\n nrunner.main(RunnerApp)\n\n\nif __name__ == '__main__':\n main()\n", "path": "avocado/core/runners/avocado_instrumented.py"}]}
1,758
244
gh_patches_debug_34104
rasdani/github-patches
git_diff
pallets__werkzeug-2777
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Elapsed and timestamp values are not available to filename_format function When the `ProfilerMiddleware` `filename_format` argument is a callable, the callable will only receive the WSGI environ variable. However, the default format string can use the `elapsed` and `time` values. This means that one using a format function cannot recreate a similar output pattern. In my case, I'd like to use both, but format the timestamp differently. Example: I'd like a filename of the form `2023-08-21:14:05:05.POST.myurl.13ms.prof`. I can get the timestamp by doing my own `datetime.now()` call, but the elapsed time is not available. This problem is solvable by subclassing `ProfilerMiddleware` or writing ones own profiling middleware, but this seems like a useful thing to have in the core library so library users don't need to duplicate the `__call__()` code in their own projects. I can submit a PR if this change is welcome. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/werkzeug/middleware/profiler.py` Content: ``` 1 """ 2 Application Profiler 3 ==================== 4 5 This module provides a middleware that profiles each request with the 6 :mod:`cProfile` module. This can help identify bottlenecks in your code 7 that may be slowing down your application. 8 9 .. autoclass:: ProfilerMiddleware 10 11 :copyright: 2007 Pallets 12 :license: BSD-3-Clause 13 """ 14 from __future__ import annotations 15 16 import os.path 17 import sys 18 import time 19 import typing as t 20 from pstats import Stats 21 22 try: 23 from cProfile import Profile 24 except ImportError: 25 from profile import Profile # type: ignore 26 27 if t.TYPE_CHECKING: 28 from _typeshed.wsgi import StartResponse 29 from _typeshed.wsgi import WSGIApplication 30 from _typeshed.wsgi import WSGIEnvironment 31 32 33 class ProfilerMiddleware: 34 """Wrap a WSGI application and profile the execution of each 35 request. Responses are buffered so that timings are more exact. 36 37 If ``stream`` is given, :class:`pstats.Stats` are written to it 38 after each request. If ``profile_dir`` is given, :mod:`cProfile` 39 data files are saved to that directory, one file per request. 40 41 The filename can be customized by passing ``filename_format``. If 42 it is a string, it will be formatted using :meth:`str.format` with 43 the following fields available: 44 45 - ``{method}`` - The request method; GET, POST, etc. 46 - ``{path}`` - The request path or 'root' should one not exist. 47 - ``{elapsed}`` - The elapsed time of the request. 48 - ``{time}`` - The time of the request. 49 50 If it is a callable, it will be called with the WSGI ``environ`` 51 dict and should return a filename. 52 53 :param app: The WSGI application to wrap. 54 :param stream: Write stats to this stream. Disable with ``None``. 55 :param sort_by: A tuple of columns to sort stats by. See 56 :meth:`pstats.Stats.sort_stats`. 57 :param restrictions: A tuple of restrictions to filter stats by. See 58 :meth:`pstats.Stats.print_stats`. 59 :param profile_dir: Save profile data files to this directory. 60 :param filename_format: Format string for profile data file names, 61 or a callable returning a name. See explanation above. 62 63 .. code-block:: python 64 65 from werkzeug.middleware.profiler import ProfilerMiddleware 66 app = ProfilerMiddleware(app) 67 68 .. versionchanged:: 0.15 69 Stats are written even if ``profile_dir`` is given, and can be 70 disable by passing ``stream=None``. 71 72 .. versionadded:: 0.15 73 Added ``filename_format``. 74 75 .. versionadded:: 0.9 76 Added ``restrictions`` and ``profile_dir``. 77 """ 78 79 def __init__( 80 self, 81 app: WSGIApplication, 82 stream: t.IO[str] | None = sys.stdout, 83 sort_by: t.Iterable[str] = ("time", "calls"), 84 restrictions: t.Iterable[str | int | float] = (), 85 profile_dir: str | None = None, 86 filename_format: str = "{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof", 87 ) -> None: 88 self._app = app 89 self._stream = stream 90 self._sort_by = sort_by 91 self._restrictions = restrictions 92 self._profile_dir = profile_dir 93 self._filename_format = filename_format 94 95 def __call__( 96 self, environ: WSGIEnvironment, start_response: StartResponse 97 ) -> t.Iterable[bytes]: 98 response_body: list[bytes] = [] 99 100 def catching_start_response(status, headers, exc_info=None): # type: ignore 101 start_response(status, headers, exc_info) 102 return response_body.append 103 104 def runapp() -> None: 105 app_iter = self._app( 106 environ, t.cast("StartResponse", catching_start_response) 107 ) 108 response_body.extend(app_iter) 109 110 if hasattr(app_iter, "close"): 111 app_iter.close() 112 113 profile = Profile() 114 start = time.time() 115 profile.runcall(runapp) 116 body = b"".join(response_body) 117 elapsed = time.time() - start 118 119 if self._profile_dir is not None: 120 if callable(self._filename_format): 121 filename = self._filename_format(environ) 122 else: 123 filename = self._filename_format.format( 124 method=environ["REQUEST_METHOD"], 125 path=environ["PATH_INFO"].strip("/").replace("/", ".") or "root", 126 elapsed=elapsed * 1000.0, 127 time=time.time(), 128 ) 129 filename = os.path.join(self._profile_dir, filename) 130 profile.dump_stats(filename) 131 132 if self._stream is not None: 133 stats = Stats(profile, stream=self._stream) 134 stats.sort_stats(*self._sort_by) 135 print("-" * 80, file=self._stream) 136 path_info = environ.get("PATH_INFO", "") 137 print(f"PATH: {path_info!r}", file=self._stream) 138 stats.print_stats(*self._restrictions) 139 print(f"{'-' * 80}\n", file=self._stream) 140 141 return [body] 142 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/werkzeug/middleware/profiler.py b/src/werkzeug/middleware/profiler.py --- a/src/werkzeug/middleware/profiler.py +++ b/src/werkzeug/middleware/profiler.py @@ -44,11 +44,16 @@ - ``{method}`` - The request method; GET, POST, etc. - ``{path}`` - The request path or 'root' should one not exist. - - ``{elapsed}`` - The elapsed time of the request. + - ``{elapsed}`` - The elapsed time of the request in milliseconds. - ``{time}`` - The time of the request. - If it is a callable, it will be called with the WSGI ``environ`` - dict and should return a filename. + If it is a callable, it will be called with the WSGI ``environ`` and + be expected to return a filename string. The ``environ`` dictionary + will also have the ``"werkzeug.profiler"`` key populated with a + dictionary containing the following fields (more may be added in the + future): + - ``{elapsed}`` - The elapsed time of the request in milliseconds. + - ``{time}`` - The time of the request. :param app: The WSGI application to wrap. :param stream: Write stats to this stream. Disable with ``None``. @@ -65,6 +70,10 @@ from werkzeug.middleware.profiler import ProfilerMiddleware app = ProfilerMiddleware(app) + .. versionchanged:: 3.0 + Added the ``"werkzeug.profiler"`` key to the ``filename_format(environ)`` + parameter with the ``elapsed`` and ``time`` fields. + .. versionchanged:: 0.15 Stats are written even if ``profile_dir`` is given, and can be disable by passing ``stream=None``. @@ -118,6 +127,10 @@ if self._profile_dir is not None: if callable(self._filename_format): + environ["werkzeug.profiler"] = { + "elapsed": elapsed * 1000.0, + "time": time.time(), + } filename = self._filename_format(environ) else: filename = self._filename_format.format(
{"golden_diff": "diff --git a/src/werkzeug/middleware/profiler.py b/src/werkzeug/middleware/profiler.py\n--- a/src/werkzeug/middleware/profiler.py\n+++ b/src/werkzeug/middleware/profiler.py\n@@ -44,11 +44,16 @@\n \n - ``{method}`` - The request method; GET, POST, etc.\n - ``{path}`` - The request path or 'root' should one not exist.\n- - ``{elapsed}`` - The elapsed time of the request.\n+ - ``{elapsed}`` - The elapsed time of the request in milliseconds.\n - ``{time}`` - The time of the request.\n \n- If it is a callable, it will be called with the WSGI ``environ``\n- dict and should return a filename.\n+ If it is a callable, it will be called with the WSGI ``environ`` and\n+ be expected to return a filename string. The ``environ`` dictionary\n+ will also have the ``\"werkzeug.profiler\"`` key populated with a\n+ dictionary containing the following fields (more may be added in the\n+ future):\n+ - ``{elapsed}`` - The elapsed time of the request in milliseconds.\n+ - ``{time}`` - The time of the request.\n \n :param app: The WSGI application to wrap.\n :param stream: Write stats to this stream. Disable with ``None``.\n@@ -65,6 +70,10 @@\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n \n+ .. versionchanged:: 3.0\n+ Added the ``\"werkzeug.profiler\"`` key to the ``filename_format(environ)``\n+ parameter with the ``elapsed`` and ``time`` fields.\n+\n .. versionchanged:: 0.15\n Stats are written even if ``profile_dir`` is given, and can be\n disable by passing ``stream=None``.\n@@ -118,6 +127,10 @@\n \n if self._profile_dir is not None:\n if callable(self._filename_format):\n+ environ[\"werkzeug.profiler\"] = {\n+ \"elapsed\": elapsed * 1000.0,\n+ \"time\": time.time(),\n+ }\n filename = self._filename_format(environ)\n else:\n filename = self._filename_format.format(\n", "issue": "Elapsed and timestamp values are not available to filename_format function\nWhen the `ProfilerMiddleware` `filename_format` argument is a callable, the callable will only receive the WSGI environ variable. However, the default format string can use the `elapsed` and `time` values. This means that one using a format function cannot recreate a similar output pattern. In my case, I'd like to use both, but format the timestamp differently.\r\n\r\n\r\nExample: I'd like a filename of the form `2023-08-21:14:05:05.POST.myurl.13ms.prof`. I can get the timestamp by doing my own `datetime.now()` call, but the elapsed time is not available.\r\n\r\nThis problem is solvable by subclassing `ProfilerMiddleware` or writing ones own profiling middleware, but this seems like a useful thing to have in the core library so library users don't need to duplicate the `__call__()` code in their own projects.\r\n\r\nI can submit a PR if this change is welcome.\r\n\n", "before_files": [{"content": "\"\"\"\nApplication Profiler\n====================\n\nThis module provides a middleware that profiles each request with the\n:mod:`cProfile` module. This can help identify bottlenecks in your code\nthat may be slowing down your application.\n\n.. autoclass:: ProfilerMiddleware\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nfrom __future__ import annotations\n\nimport os.path\nimport sys\nimport time\nimport typing as t\nfrom pstats import Stats\n\ntry:\n from cProfile import Profile\nexcept ImportError:\n from profile import Profile # type: ignore\n\nif t.TYPE_CHECKING:\n from _typeshed.wsgi import StartResponse\n from _typeshed.wsgi import WSGIApplication\n from _typeshed.wsgi import WSGIEnvironment\n\n\nclass ProfilerMiddleware:\n \"\"\"Wrap a WSGI application and profile the execution of each\n request. Responses are buffered so that timings are more exact.\n\n If ``stream`` is given, :class:`pstats.Stats` are written to it\n after each request. If ``profile_dir`` is given, :mod:`cProfile`\n data files are saved to that directory, one file per request.\n\n The filename can be customized by passing ``filename_format``. If\n it is a string, it will be formatted using :meth:`str.format` with\n the following fields available:\n\n - ``{method}`` - The request method; GET, POST, etc.\n - ``{path}`` - The request path or 'root' should one not exist.\n - ``{elapsed}`` - The elapsed time of the request.\n - ``{time}`` - The time of the request.\n\n If it is a callable, it will be called with the WSGI ``environ``\n dict and should return a filename.\n\n :param app: The WSGI application to wrap.\n :param stream: Write stats to this stream. Disable with ``None``.\n :param sort_by: A tuple of columns to sort stats by. See\n :meth:`pstats.Stats.sort_stats`.\n :param restrictions: A tuple of restrictions to filter stats by. See\n :meth:`pstats.Stats.print_stats`.\n :param profile_dir: Save profile data files to this directory.\n :param filename_format: Format string for profile data file names,\n or a callable returning a name. See explanation above.\n\n .. code-block:: python\n\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n\n .. versionchanged:: 0.15\n Stats are written even if ``profile_dir`` is given, and can be\n disable by passing ``stream=None``.\n\n .. versionadded:: 0.15\n Added ``filename_format``.\n\n .. versionadded:: 0.9\n Added ``restrictions`` and ``profile_dir``.\n \"\"\"\n\n def __init__(\n self,\n app: WSGIApplication,\n stream: t.IO[str] | None = sys.stdout,\n sort_by: t.Iterable[str] = (\"time\", \"calls\"),\n restrictions: t.Iterable[str | int | float] = (),\n profile_dir: str | None = None,\n filename_format: str = \"{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof\",\n ) -> None:\n self._app = app\n self._stream = stream\n self._sort_by = sort_by\n self._restrictions = restrictions\n self._profile_dir = profile_dir\n self._filename_format = filename_format\n\n def __call__(\n self, environ: WSGIEnvironment, start_response: StartResponse\n ) -> t.Iterable[bytes]:\n response_body: list[bytes] = []\n\n def catching_start_response(status, headers, exc_info=None): # type: ignore\n start_response(status, headers, exc_info)\n return response_body.append\n\n def runapp() -> None:\n app_iter = self._app(\n environ, t.cast(\"StartResponse\", catching_start_response)\n )\n response_body.extend(app_iter)\n\n if hasattr(app_iter, \"close\"):\n app_iter.close()\n\n profile = Profile()\n start = time.time()\n profile.runcall(runapp)\n body = b\"\".join(response_body)\n elapsed = time.time() - start\n\n if self._profile_dir is not None:\n if callable(self._filename_format):\n filename = self._filename_format(environ)\n else:\n filename = self._filename_format.format(\n method=environ[\"REQUEST_METHOD\"],\n path=environ[\"PATH_INFO\"].strip(\"/\").replace(\"/\", \".\") or \"root\",\n elapsed=elapsed * 1000.0,\n time=time.time(),\n )\n filename = os.path.join(self._profile_dir, filename)\n profile.dump_stats(filename)\n\n if self._stream is not None:\n stats = Stats(profile, stream=self._stream)\n stats.sort_stats(*self._sort_by)\n print(\"-\" * 80, file=self._stream)\n path_info = environ.get(\"PATH_INFO\", \"\")\n print(f\"PATH: {path_info!r}\", file=self._stream)\n stats.print_stats(*self._restrictions)\n print(f\"{'-' * 80}\\n\", file=self._stream)\n\n return [body]\n", "path": "src/werkzeug/middleware/profiler.py"}], "after_files": [{"content": "\"\"\"\nApplication Profiler\n====================\n\nThis module provides a middleware that profiles each request with the\n:mod:`cProfile` module. This can help identify bottlenecks in your code\nthat may be slowing down your application.\n\n.. autoclass:: ProfilerMiddleware\n\n:copyright: 2007 Pallets\n:license: BSD-3-Clause\n\"\"\"\nfrom __future__ import annotations\n\nimport os.path\nimport sys\nimport time\nimport typing as t\nfrom pstats import Stats\n\ntry:\n from cProfile import Profile\nexcept ImportError:\n from profile import Profile # type: ignore\n\nif t.TYPE_CHECKING:\n from _typeshed.wsgi import StartResponse\n from _typeshed.wsgi import WSGIApplication\n from _typeshed.wsgi import WSGIEnvironment\n\n\nclass ProfilerMiddleware:\n \"\"\"Wrap a WSGI application and profile the execution of each\n request. Responses are buffered so that timings are more exact.\n\n If ``stream`` is given, :class:`pstats.Stats` are written to it\n after each request. If ``profile_dir`` is given, :mod:`cProfile`\n data files are saved to that directory, one file per request.\n\n The filename can be customized by passing ``filename_format``. If\n it is a string, it will be formatted using :meth:`str.format` with\n the following fields available:\n\n - ``{method}`` - The request method; GET, POST, etc.\n - ``{path}`` - The request path or 'root' should one not exist.\n - ``{elapsed}`` - The elapsed time of the request in milliseconds.\n - ``{time}`` - The time of the request.\n\n If it is a callable, it will be called with the WSGI ``environ`` and\n be expected to return a filename string. The ``environ`` dictionary\n will also have the ``\"werkzeug.profiler\"`` key populated with a\n dictionary containing the following fields (more may be added in the\n future):\n - ``{elapsed}`` - The elapsed time of the request in milliseconds.\n - ``{time}`` - The time of the request.\n\n :param app: The WSGI application to wrap.\n :param stream: Write stats to this stream. Disable with ``None``.\n :param sort_by: A tuple of columns to sort stats by. See\n :meth:`pstats.Stats.sort_stats`.\n :param restrictions: A tuple of restrictions to filter stats by. See\n :meth:`pstats.Stats.print_stats`.\n :param profile_dir: Save profile data files to this directory.\n :param filename_format: Format string for profile data file names,\n or a callable returning a name. See explanation above.\n\n .. code-block:: python\n\n from werkzeug.middleware.profiler import ProfilerMiddleware\n app = ProfilerMiddleware(app)\n\n .. versionchanged:: 3.0\n Added the ``\"werkzeug.profiler\"`` key to the ``filename_format(environ)``\n parameter with the ``elapsed`` and ``time`` fields.\n\n .. versionchanged:: 0.15\n Stats are written even if ``profile_dir`` is given, and can be\n disable by passing ``stream=None``.\n\n .. versionadded:: 0.15\n Added ``filename_format``.\n\n .. versionadded:: 0.9\n Added ``restrictions`` and ``profile_dir``.\n \"\"\"\n\n def __init__(\n self,\n app: WSGIApplication,\n stream: t.IO[str] | None = sys.stdout,\n sort_by: t.Iterable[str] = (\"time\", \"calls\"),\n restrictions: t.Iterable[str | int | float] = (),\n profile_dir: str | None = None,\n filename_format: str = \"{method}.{path}.{elapsed:.0f}ms.{time:.0f}.prof\",\n ) -> None:\n self._app = app\n self._stream = stream\n self._sort_by = sort_by\n self._restrictions = restrictions\n self._profile_dir = profile_dir\n self._filename_format = filename_format\n\n def __call__(\n self, environ: WSGIEnvironment, start_response: StartResponse\n ) -> t.Iterable[bytes]:\n response_body: list[bytes] = []\n\n def catching_start_response(status, headers, exc_info=None): # type: ignore\n start_response(status, headers, exc_info)\n return response_body.append\n\n def runapp() -> None:\n app_iter = self._app(\n environ, t.cast(\"StartResponse\", catching_start_response)\n )\n response_body.extend(app_iter)\n\n if hasattr(app_iter, \"close\"):\n app_iter.close()\n\n profile = Profile()\n start = time.time()\n profile.runcall(runapp)\n body = b\"\".join(response_body)\n elapsed = time.time() - start\n\n if self._profile_dir is not None:\n if callable(self._filename_format):\n environ[\"werkzeug.profiler\"] = {\n \"elapsed\": elapsed * 1000.0,\n \"time\": time.time(),\n }\n filename = self._filename_format(environ)\n else:\n filename = self._filename_format.format(\n method=environ[\"REQUEST_METHOD\"],\n path=environ[\"PATH_INFO\"].strip(\"/\").replace(\"/\", \".\") or \"root\",\n elapsed=elapsed * 1000.0,\n time=time.time(),\n )\n filename = os.path.join(self._profile_dir, filename)\n profile.dump_stats(filename)\n\n if self._stream is not None:\n stats = Stats(profile, stream=self._stream)\n stats.sort_stats(*self._sort_by)\n print(\"-\" * 80, file=self._stream)\n path_info = environ.get(\"PATH_INFO\", \"\")\n print(f\"PATH: {path_info!r}\", file=self._stream)\n stats.print_stats(*self._restrictions)\n print(f\"{'-' * 80}\\n\", file=self._stream)\n\n return [body]\n", "path": "src/werkzeug/middleware/profiler.py"}]}
1,974
542
gh_patches_debug_16133
rasdani/github-patches
git_diff
pyscript__pyscript-1017
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Antigravity example is broken after stdout/display changes ### Checklist - [X] I added a descriptive title - [X] I searched for other issues and couldn't find a solution or duplication - [X] I already searched in Google and didn't find any good information or help ### What happened? After the changes related to stdout/err and display, the antigravity example is broken. It currently fails with the following error: ``` Traceback (most recent call last): File "/lib/python3.10/site-packages/_pyodide/_base.py", line 435, in eval_code .run(globals, locals) File "/lib/python3.10/site-packages/_pyodide/_base.py", line 304, in run coroutine = eval(self.code, globals, locals) File "<exec>", line 1, in <module> File "/home/pyodide/antigravity.py", line 44, in <module> _auto = Antigravity(append=True) File "/home/pyodide/antigravity.py", line 14, in __init__ target = target or sys.stdout._out AttributeError: '_io.TextIOWrapper' object has no attribute '_out' ``` This is due to the fact that we are not custom changing `sys.stdout` the same way. It seems like the best option here would be to implement the right `_repr_` method but would love to hear @philippjfr on this **NOTE: ** goes without saying that we should add a test as well :) ### What browsers are you seeing the problem on? (if applicable) _No response_ ### Console info _No response_ ### Additional Context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/antigravity.py` Content: ``` 1 import random 2 import sys 3 4 from js import DOMParser, document, setInterval 5 from pyodide.ffi import create_proxy 6 from pyodide.http import open_url 7 8 9 class Antigravity: 10 11 url = "./antigravity.svg" 12 13 def __init__(self, target=None, interval=10, append=True, fly=False): 14 target = target or sys.stdout._out 15 self.target = ( 16 document.getElementById(target) if isinstance(target, str) else target 17 ) 18 doc = DOMParser.new().parseFromString( 19 open_url(self.url).read(), "image/svg+xml" 20 ) 21 self.node = doc.documentElement 22 if append: 23 self.target.append(self.node) 24 else: 25 self.target.replaceChildren(self.node) 26 self.xoffset, self.yoffset = 0, 0 27 self.interval = interval 28 if fly: 29 self.fly() 30 31 def fly(self): 32 setInterval(create_proxy(self.move), self.interval) 33 34 def move(self): 35 char = self.node.getElementsByTagName("g")[1] 36 char.setAttribute("transform", f"translate({self.xoffset}, {-self.yoffset})") 37 self.xoffset += random.normalvariate(0, 1) / 20 38 if self.yoffset < 50: 39 self.yoffset += 0.1 40 else: 41 self.yoffset += random.normalvariate(0, 1) / 20 42 43 44 _auto = Antigravity(append=True) 45 fly = _auto.fly 46 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/antigravity.py b/examples/antigravity.py --- a/examples/antigravity.py +++ b/examples/antigravity.py @@ -1,5 +1,4 @@ import random -import sys from js import DOMParser, document, setInterval from pyodide.ffi import create_proxy @@ -11,9 +10,10 @@ url = "./antigravity.svg" def __init__(self, target=None, interval=10, append=True, fly=False): - target = target or sys.stdout._out self.target = ( - document.getElementById(target) if isinstance(target, str) else target + document.getElementById(target) + if isinstance(target, str) + else document.body ) doc = DOMParser.new().parseFromString( open_url(self.url).read(), "image/svg+xml"
{"golden_diff": "diff --git a/examples/antigravity.py b/examples/antigravity.py\n--- a/examples/antigravity.py\n+++ b/examples/antigravity.py\n@@ -1,5 +1,4 @@\n import random\n-import sys\n \n from js import DOMParser, document, setInterval\n from pyodide.ffi import create_proxy\n@@ -11,9 +10,10 @@\n url = \"./antigravity.svg\"\n \n def __init__(self, target=None, interval=10, append=True, fly=False):\n- target = target or sys.stdout._out\n self.target = (\n- document.getElementById(target) if isinstance(target, str) else target\n+ document.getElementById(target)\n+ if isinstance(target, str)\n+ else document.body\n )\n doc = DOMParser.new().parseFromString(\n open_url(self.url).read(), \"image/svg+xml\"\n", "issue": "Antigravity example is broken after stdout/display changes\n### Checklist\n\n- [X] I added a descriptive title\n- [X] I searched for other issues and couldn't find a solution or duplication\n- [X] I already searched in Google and didn't find any good information or help\n\n### What happened?\n\nAfter the changes related to stdout/err and display, the antigravity example is broken. It currently fails with the following error:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/lib/python3.10/site-packages/_pyodide/_base.py\", line 435, in eval_code\r\n .run(globals, locals)\r\n File \"/lib/python3.10/site-packages/_pyodide/_base.py\", line 304, in run\r\n coroutine = eval(self.code, globals, locals)\r\n File \"<exec>\", line 1, in <module>\r\n File \"/home/pyodide/antigravity.py\", line 44, in <module>\r\n _auto = Antigravity(append=True)\r\n File \"/home/pyodide/antigravity.py\", line 14, in __init__\r\n target = target or sys.stdout._out\r\nAttributeError: '_io.TextIOWrapper' object has no attribute '_out'\r\n```\r\n\r\nThis is due to the fact that we are not custom changing `sys.stdout` the same way. \r\n\r\nIt seems like the best option here would be to implement the right `_repr_` method but would love to hear @philippjfr on this\r\n\r\n**NOTE: ** goes without saying that we should add a test as well :)\n\n### What browsers are you seeing the problem on? (if applicable)\n\n_No response_\n\n### Console info\n\n_No response_\n\n### Additional Context\n\n_No response_\n", "before_files": [{"content": "import random\nimport sys\n\nfrom js import DOMParser, document, setInterval\nfrom pyodide.ffi import create_proxy\nfrom pyodide.http import open_url\n\n\nclass Antigravity:\n\n url = \"./antigravity.svg\"\n\n def __init__(self, target=None, interval=10, append=True, fly=False):\n target = target or sys.stdout._out\n self.target = (\n document.getElementById(target) if isinstance(target, str) else target\n )\n doc = DOMParser.new().parseFromString(\n open_url(self.url).read(), \"image/svg+xml\"\n )\n self.node = doc.documentElement\n if append:\n self.target.append(self.node)\n else:\n self.target.replaceChildren(self.node)\n self.xoffset, self.yoffset = 0, 0\n self.interval = interval\n if fly:\n self.fly()\n\n def fly(self):\n setInterval(create_proxy(self.move), self.interval)\n\n def move(self):\n char = self.node.getElementsByTagName(\"g\")[1]\n char.setAttribute(\"transform\", f\"translate({self.xoffset}, {-self.yoffset})\")\n self.xoffset += random.normalvariate(0, 1) / 20\n if self.yoffset < 50:\n self.yoffset += 0.1\n else:\n self.yoffset += random.normalvariate(0, 1) / 20\n\n\n_auto = Antigravity(append=True)\nfly = _auto.fly\n", "path": "examples/antigravity.py"}], "after_files": [{"content": "import random\n\nfrom js import DOMParser, document, setInterval\nfrom pyodide.ffi import create_proxy\nfrom pyodide.http import open_url\n\n\nclass Antigravity:\n\n url = \"./antigravity.svg\"\n\n def __init__(self, target=None, interval=10, append=True, fly=False):\n self.target = (\n document.getElementById(target)\n if isinstance(target, str)\n else document.body\n )\n doc = DOMParser.new().parseFromString(\n open_url(self.url).read(), \"image/svg+xml\"\n )\n self.node = doc.documentElement\n if append:\n self.target.append(self.node)\n else:\n self.target.replaceChildren(self.node)\n self.xoffset, self.yoffset = 0, 0\n self.interval = interval\n if fly:\n self.fly()\n\n def fly(self):\n setInterval(create_proxy(self.move), self.interval)\n\n def move(self):\n char = self.node.getElementsByTagName(\"g\")[1]\n char.setAttribute(\"transform\", f\"translate({self.xoffset}, {-self.yoffset})\")\n self.xoffset += random.normalvariate(0, 1) / 20\n if self.yoffset < 50:\n self.yoffset += 0.1\n else:\n self.yoffset += random.normalvariate(0, 1) / 20\n\n\n_auto = Antigravity(append=True)\nfly = _auto.fly\n", "path": "examples/antigravity.py"}]}
1,042
192
gh_patches_debug_1660
rasdani/github-patches
git_diff
e2nIEE__pandapower-2106
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [bug] Pandapower interferes with matplotlib savefig ### Bug report checklis - [X] Searched the [issues page](https://github.com/e2nIEE/pandapower/issues) for similar reports - [X] Read the relevant sections of the [documentation](https://pandapower.readthedocs.io/en/latest/about.html) - [X] Browse the [tutorials](https://github.com/e2nIEE/pandapower/tree/develop/tutorials) and [tests](https://github.com/e2nIEE/pandapower/tree/develop/pandapower/test) for usefull code snippets and examples of use - [X] Reproduced the issue after updating with `pip install --upgrade pandapower` (or `git pull`) - [X] Tried basic troubleshooting (if a bug/error) like restarting the interpreter and checking the pythonpath ### Reproducible Example ```python import matplotlib.pyplot as plt import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` ### Issue Description and Traceback When pandapower is imported, matplotlib `savefig()` may run into a bug where the `GraphicsContextBase._capstyle` is set to a `str` instead of a `CapStyle` instance. Calling the proper `set_capstyle()` method solves this issue. Also, somehow, this issue does not arise when calling `fig.savefig('test.png')`. It only arises when the figure save type is SVG. The following code works fine. Notice that I have commented out `import pandapower`: ```python import matplotlib.pyplot as plt # import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` However, if I uncomment the `import pandapower` line, then I will get an error: ```python import matplotlib.pyplot as plt import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` Error: ``` Traceback (most recent call last): File "/home/user/testenv/test.py", line 6, in <module> fig.savefig('test.svg') File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py", line 3378, in savefig self.canvas.print_figure(fname, **kwargs) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 2366, in print_figure result = print_method( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 2232, in <lambda> print_method = functools.wraps(meth)(lambda *args, **kwargs: meth( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 1369, in print_svg self.figure.draw(renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 95, in draw_wrapper result = draw(artist, renderer, *args, **kwargs) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper return draw(artist, renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py", line 3175, in draw mimage._draw_list_compositing_images( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py", line 131, in _draw_list_compositing_images a.draw(renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper return draw(artist, renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/axes/_base.py", line 3064, in draw mimage._draw_list_compositing_images( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py", line 131, in _draw_list_compositing_images a.draw(renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper return draw(artist, renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py", line 972, in draw super().draw(renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py", line 72, in draw_wrapper return draw(artist, renderer) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py", line 405, in draw renderer.draw_markers( File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 717, in draw_markers style = self._get_style_dict(gc, rgbFace) File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py", line 609, in _get_style_dict if gc.get_capstyle() != 'butt': File "/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py", line 820, in get_capstyle return self._capstyle.name AttributeError: 'str' object has no attribute 'name' ``` ### Expected Behavior I would expect the following 2 code blocks to produce identical (or at least similar) results: ```python import matplotlib.pyplot as plt # import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` and ```python import matplotlib.pyplot as plt import pandapower fig, ax = plt.subplots() ax.scatter(range(5), range(5)) fig.savefig('test.svg') ``` The 1st code block works fine, whereas the 2nd code block throws an `AttributeError`. ### Installed Versions OS: Ubuntu 22.04 LTS Python 3.10 Matplotlib 3.7.2 Pandapower 2.13.1 ### Label - [X] Relevant labels are selected --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pandapower/plotting/__init__.py` Content: ``` 1 from pandapower.plotting.collections import * 2 from pandapower.plotting.colormaps import * 3 from pandapower.plotting.generic_geodata import * 4 from pandapower.plotting.powerflow_results import * 5 from pandapower.plotting.simple_plot import * 6 from pandapower.plotting.plotly import * 7 from pandapower.plotting.geo import * 8 from pandapower.plotting.plotting_toolbox import set_line_geodata_from_bus_geodata 9 from pandapower.plotting.to_html import to_html 10 11 import types 12 13 try: 14 from matplotlib.backend_bases import GraphicsContextBase, RendererBase 15 16 class GC(GraphicsContextBase): 17 def __init__(self): 18 super().__init__() 19 self._capstyle = 'round' 20 21 def custom_new_gc(self): 22 return GC() 23 24 RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase) 25 26 except ImportError: 27 pass 28 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pandapower/plotting/__init__.py b/pandapower/plotting/__init__.py --- a/pandapower/plotting/__init__.py +++ b/pandapower/plotting/__init__.py @@ -16,7 +16,7 @@ class GC(GraphicsContextBase): def __init__(self): super().__init__() - self._capstyle = 'round' + self.set_capstyle('round') def custom_new_gc(self): return GC()
{"golden_diff": "diff --git a/pandapower/plotting/__init__.py b/pandapower/plotting/__init__.py\n--- a/pandapower/plotting/__init__.py\n+++ b/pandapower/plotting/__init__.py\n@@ -16,7 +16,7 @@\n class GC(GraphicsContextBase):\n def __init__(self):\n super().__init__()\n- self._capstyle = 'round'\n+ self.set_capstyle('round')\n \n def custom_new_gc(self):\n return GC()\n", "issue": "[bug] Pandapower interferes with matplotlib savefig\n### Bug report checklis\n\n- [X] Searched the [issues page](https://github.com/e2nIEE/pandapower/issues) for similar reports\n\n- [X] Read the relevant sections of the [documentation](https://pandapower.readthedocs.io/en/latest/about.html)\n\n- [X] Browse the [tutorials](https://github.com/e2nIEE/pandapower/tree/develop/tutorials) and [tests](https://github.com/e2nIEE/pandapower/tree/develop/pandapower/test) for usefull code snippets and examples of use\n\n- [X] Reproduced the issue after updating with `pip install --upgrade pandapower` (or `git pull`)\n\n- [X] Tried basic troubleshooting (if a bug/error) like restarting the interpreter and checking the pythonpath\n\n\n### Reproducible Example\n\n```python\nimport matplotlib.pyplot as plt\r\nimport pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\n```\n\n\n### Issue Description and Traceback\n\nWhen pandapower is imported, matplotlib `savefig()` may run into a bug where the `GraphicsContextBase._capstyle` is set to a `str` instead of a `CapStyle` instance. Calling the proper `set_capstyle()` method solves this issue. Also, somehow, this issue does not arise when calling `fig.savefig('test.png')`. It only arises when the figure save type is SVG.\r\n\r\nThe following code works fine. Notice that I have commented out `import pandapower`:\r\n```python\r\nimport matplotlib.pyplot as plt\r\n# import pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\r\n```\r\n\r\nHowever, if I uncomment the `import pandapower` line, then I will get an error:\r\n```python\r\nimport matplotlib.pyplot as plt\r\nimport pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\r\n```\r\n\r\nError:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/user/testenv/test.py\", line 6, in <module>\r\n fig.savefig('test.svg')\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py\", line 3378, in savefig\r\n self.canvas.print_figure(fname, **kwargs)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py\", line 2366, in print_figure\r\n result = print_method(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py\", line 2232, in <lambda>\r\n print_method = functools.wraps(meth)(lambda *args, **kwargs: meth(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py\", line 1369, in print_svg\r\n self.figure.draw(renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 95, in draw_wrapper\r\n result = draw(artist, renderer, *args, **kwargs)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 72, in draw_wrapper\r\n return draw(artist, renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/figure.py\", line 3175, in draw\r\n mimage._draw_list_compositing_images(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py\", line 131, in _draw_list_compositing_images\r\n a.draw(renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 72, in draw_wrapper\r\n return draw(artist, renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/axes/_base.py\", line 3064, in draw\r\n mimage._draw_list_compositing_images(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/image.py\", line 131, in _draw_list_compositing_images\r\n a.draw(renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 72, in draw_wrapper\r\n return draw(artist, renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py\", line 972, in draw\r\n super().draw(renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/artist.py\", line 72, in draw_wrapper\r\n return draw(artist, renderer)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/collections.py\", line 405, in draw\r\n renderer.draw_markers(\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py\", line 717, in draw_markers\r\n style = self._get_style_dict(gc, rgbFace)\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backends/backend_svg.py\", line 609, in _get_style_dict\r\n if gc.get_capstyle() != 'butt':\r\n File \"/home/user/miniconda3/envs/testenv/lib/python3.10/site-packages/matplotlib/backend_bases.py\", line 820, in get_capstyle\r\n return self._capstyle.name\r\nAttributeError: 'str' object has no attribute 'name'\r\n```\n\n### Expected Behavior\n\nI would expect the following 2 code blocks to produce identical (or at least similar) results:\r\n\r\n```python\r\nimport matplotlib.pyplot as plt\r\n# import pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\r\n```\r\n\r\nand \r\n\r\n```python\r\nimport matplotlib.pyplot as plt\r\nimport pandapower\r\n\r\nfig, ax = plt.subplots()\r\nax.scatter(range(5), range(5))\r\nfig.savefig('test.svg')\r\n```\r\n\r\nThe 1st code block works fine, whereas the 2nd code block throws an `AttributeError`.\n\n### Installed Versions\n\nOS: Ubuntu 22.04 LTS\r\nPython 3.10\r\nMatplotlib 3.7.2\r\nPandapower 2.13.1\n\n### Label\n\n- [X] Relevant labels are selected\n\n", "before_files": [{"content": "from pandapower.plotting.collections import *\nfrom pandapower.plotting.colormaps import *\nfrom pandapower.plotting.generic_geodata import *\nfrom pandapower.plotting.powerflow_results import *\nfrom pandapower.plotting.simple_plot import *\nfrom pandapower.plotting.plotly import *\nfrom pandapower.plotting.geo import *\nfrom pandapower.plotting.plotting_toolbox import set_line_geodata_from_bus_geodata\nfrom pandapower.plotting.to_html import to_html\n\nimport types\n\ntry:\n from matplotlib.backend_bases import GraphicsContextBase, RendererBase\n\n class GC(GraphicsContextBase):\n def __init__(self):\n super().__init__()\n self._capstyle = 'round'\n\n def custom_new_gc(self):\n return GC()\n\n RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase)\n\nexcept ImportError:\n pass\n", "path": "pandapower/plotting/__init__.py"}], "after_files": [{"content": "from pandapower.plotting.collections import *\nfrom pandapower.plotting.colormaps import *\nfrom pandapower.plotting.generic_geodata import *\nfrom pandapower.plotting.powerflow_results import *\nfrom pandapower.plotting.simple_plot import *\nfrom pandapower.plotting.plotly import *\nfrom pandapower.plotting.geo import *\nfrom pandapower.plotting.plotting_toolbox import set_line_geodata_from_bus_geodata\nfrom pandapower.plotting.to_html import to_html\n\nimport types\n\ntry:\n from matplotlib.backend_bases import GraphicsContextBase, RendererBase\n\n class GC(GraphicsContextBase):\n def __init__(self):\n super().__init__()\n self.set_capstyle('round')\n\n def custom_new_gc(self):\n return GC()\n\n RendererBase.new_gc = types.MethodType(custom_new_gc, RendererBase)\n\nexcept ImportError:\n pass\n", "path": "pandapower/plotting/__init__.py"}]}
2,025
118
gh_patches_debug_34258
rasdani/github-patches
git_diff
beetbox__beets-4197
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- kodiupdate: support updating multiple Kodi instances Pretty straitforward ask, it would be nice if when running beets using the Kodi update client that you could enter multiple clients syntax would be something to the tune of ... kodi: host: x.x.x.x port: 8080 user: user pwd: password kodi: host: x.x.x.x port: 8080 user: user pwd: password kodi: host: x.x.x.x port: 8080 user: user pwd: password can this please be implemented --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `beetsplug/kodiupdate.py` Content: ``` 1 # This file is part of beets. 2 # Copyright 2017, Pauli Kettunen. 3 # 4 # Permission is hereby granted, free of charge, to any person obtaining 5 # a copy of this software and associated documentation files (the 6 # "Software"), to deal in the Software without restriction, including 7 # without limitation the rights to use, copy, modify, merge, publish, 8 # distribute, sublicense, and/or sell copies of the Software, and to 9 # permit persons to whom the Software is furnished to do so, subject to 10 # the following conditions: 11 # 12 # The above copyright notice and this permission notice shall be 13 # included in all copies or substantial portions of the Software. 14 15 """Updates a Kodi library whenever the beets library is changed. 16 This is based on the Plex Update plugin. 17 18 Put something like the following in your config.yaml to configure: 19 kodi: 20 host: localhost 21 port: 8080 22 user: user 23 pwd: secret 24 """ 25 26 import requests 27 from beets import config 28 from beets.plugins import BeetsPlugin 29 30 31 def update_kodi(host, port, user, password): 32 """Sends request to the Kodi api to start a library refresh. 33 """ 34 url = f"http://{host}:{port}/jsonrpc" 35 36 """Content-Type: application/json is mandatory 37 according to the kodi jsonrpc documentation""" 38 39 headers = {'Content-Type': 'application/json'} 40 41 # Create the payload. Id seems to be mandatory. 42 payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1} 43 r = requests.post( 44 url, 45 auth=(user, password), 46 json=payload, 47 headers=headers) 48 49 return r 50 51 52 class KodiUpdate(BeetsPlugin): 53 def __init__(self): 54 super().__init__() 55 56 # Adding defaults. 57 config['kodi'].add({ 58 'host': 'localhost', 59 'port': 8080, 60 'user': 'kodi', 61 'pwd': 'kodi'}) 62 63 config['kodi']['pwd'].redact = True 64 self.register_listener('database_change', self.listen_for_db_change) 65 66 def listen_for_db_change(self, lib, model): 67 """Listens for beets db change and register the update""" 68 self.register_listener('cli_exit', self.update) 69 70 def update(self, lib): 71 """When the client exists try to send refresh request to Kodi server. 72 """ 73 self._log.info('Requesting a Kodi library update...') 74 75 # Try to send update request. 76 try: 77 r = update_kodi( 78 config['kodi']['host'].get(), 79 config['kodi']['port'].get(), 80 config['kodi']['user'].get(), 81 config['kodi']['pwd'].get()) 82 r.raise_for_status() 83 84 except requests.exceptions.RequestException as e: 85 self._log.warning('Kodi update failed: {0}', 86 str(e)) 87 return 88 89 json = r.json() 90 if json.get('result') != 'OK': 91 self._log.warning('Kodi update failed: JSON response was {0!r}', 92 json) 93 return 94 95 self._log.info('Kodi update triggered') 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/beetsplug/kodiupdate.py b/beetsplug/kodiupdate.py --- a/beetsplug/kodiupdate.py +++ b/beetsplug/kodiupdate.py @@ -54,11 +54,12 @@ super().__init__() # Adding defaults. - config['kodi'].add({ + config['kodi'].add([{ 'host': 'localhost', 'port': 8080, 'user': 'kodi', - 'pwd': 'kodi'}) + 'pwd': 'kodi' + }]) config['kodi']['pwd'].redact = True self.register_listener('database_change', self.listen_for_db_change) @@ -72,24 +73,34 @@ """ self._log.info('Requesting a Kodi library update...') - # Try to send update request. - try: - r = update_kodi( - config['kodi']['host'].get(), - config['kodi']['port'].get(), - config['kodi']['user'].get(), - config['kodi']['pwd'].get()) - r.raise_for_status() - - except requests.exceptions.RequestException as e: - self._log.warning('Kodi update failed: {0}', - str(e)) - return - - json = r.json() - if json.get('result') != 'OK': - self._log.warning('Kodi update failed: JSON response was {0!r}', - json) - return - - self._log.info('Kodi update triggered') + kodi = config['kodi'].get() + + # Backwards compatibility in case not configured as an array + if not isinstance(kodi, list): + kodi = [kodi] + + for instance in kodi: + # Try to send update request. + try: + r = update_kodi( + instance['host'], + instance['port'], + instance['user'], + instance['pwd'] + ) + r.raise_for_status() + + json = r.json() + if json.get('result') != 'OK': + self._log.warning( + 'Kodi update failed: JSON response was {0!r}', json + ) + continue + + self._log.info( + 'Kodi update triggered for {0}:{1}', + instance['host'], instance['port'] + ) + except requests.exceptions.RequestException as e: + self._log.warning('Kodi update failed: {0}', str(e)) + continue
{"golden_diff": "diff --git a/beetsplug/kodiupdate.py b/beetsplug/kodiupdate.py\n--- a/beetsplug/kodiupdate.py\n+++ b/beetsplug/kodiupdate.py\n@@ -54,11 +54,12 @@\n super().__init__()\n \n # Adding defaults.\n- config['kodi'].add({\n+ config['kodi'].add([{\n 'host': 'localhost',\n 'port': 8080,\n 'user': 'kodi',\n- 'pwd': 'kodi'})\n+ 'pwd': 'kodi'\n+ }])\n \n config['kodi']['pwd'].redact = True\n self.register_listener('database_change', self.listen_for_db_change)\n@@ -72,24 +73,34 @@\n \"\"\"\n self._log.info('Requesting a Kodi library update...')\n \n- # Try to send update request.\n- try:\n- r = update_kodi(\n- config['kodi']['host'].get(),\n- config['kodi']['port'].get(),\n- config['kodi']['user'].get(),\n- config['kodi']['pwd'].get())\n- r.raise_for_status()\n-\n- except requests.exceptions.RequestException as e:\n- self._log.warning('Kodi update failed: {0}',\n- str(e))\n- return\n-\n- json = r.json()\n- if json.get('result') != 'OK':\n- self._log.warning('Kodi update failed: JSON response was {0!r}',\n- json)\n- return\n-\n- self._log.info('Kodi update triggered')\n+ kodi = config['kodi'].get()\n+\n+ # Backwards compatibility in case not configured as an array\n+ if not isinstance(kodi, list):\n+ kodi = [kodi]\n+\n+ for instance in kodi:\n+ # Try to send update request.\n+ try:\n+ r = update_kodi(\n+ instance['host'],\n+ instance['port'],\n+ instance['user'],\n+ instance['pwd']\n+ )\n+ r.raise_for_status()\n+\n+ json = r.json()\n+ if json.get('result') != 'OK':\n+ self._log.warning(\n+ 'Kodi update failed: JSON response was {0!r}', json\n+ )\n+ continue\n+\n+ self._log.info(\n+ 'Kodi update triggered for {0}:{1}',\n+ instance['host'], instance['port']\n+ )\n+ except requests.exceptions.RequestException as e:\n+ self._log.warning('Kodi update failed: {0}', str(e))\n+ continue\n", "issue": "kodiupdate: support updating multiple Kodi instances\nPretty straitforward ask, it would be nice if when running beets using the Kodi update client that you could enter multiple clients\r\n\r\nsyntax would be something to the tune of ...\r\n\r\nkodi:\r\n host: x.x.x.x\r\n port: 8080\r\n user: user\r\n pwd: password\r\nkodi:\r\n host: x.x.x.x\r\n port: 8080\r\n user: user\r\n pwd: password\r\nkodi:\r\n host: x.x.x.x\r\n port: 8080\r\n user: user\r\n pwd: password\r\ncan this please be implemented\n", "before_files": [{"content": "# This file is part of beets.\n# Copyright 2017, Pauli Kettunen.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Updates a Kodi library whenever the beets library is changed.\nThis is based on the Plex Update plugin.\n\nPut something like the following in your config.yaml to configure:\n kodi:\n host: localhost\n port: 8080\n user: user\n pwd: secret\n\"\"\"\n\nimport requests\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef update_kodi(host, port, user, password):\n \"\"\"Sends request to the Kodi api to start a library refresh.\n \"\"\"\n url = f\"http://{host}:{port}/jsonrpc\"\n\n \"\"\"Content-Type: application/json is mandatory\n according to the kodi jsonrpc documentation\"\"\"\n\n headers = {'Content-Type': 'application/json'}\n\n # Create the payload. Id seems to be mandatory.\n payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}\n r = requests.post(\n url,\n auth=(user, password),\n json=payload,\n headers=headers)\n\n return r\n\n\nclass KodiUpdate(BeetsPlugin):\n def __init__(self):\n super().__init__()\n\n # Adding defaults.\n config['kodi'].add({\n 'host': 'localhost',\n 'port': 8080,\n 'user': 'kodi',\n 'pwd': 'kodi'})\n\n config['kodi']['pwd'].redact = True\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Kodi server.\n \"\"\"\n self._log.info('Requesting a Kodi library update...')\n\n # Try to send update request.\n try:\n r = update_kodi(\n config['kodi']['host'].get(),\n config['kodi']['port'].get(),\n config['kodi']['user'].get(),\n config['kodi']['pwd'].get())\n r.raise_for_status()\n\n except requests.exceptions.RequestException as e:\n self._log.warning('Kodi update failed: {0}',\n str(e))\n return\n\n json = r.json()\n if json.get('result') != 'OK':\n self._log.warning('Kodi update failed: JSON response was {0!r}',\n json)\n return\n\n self._log.info('Kodi update triggered')\n", "path": "beetsplug/kodiupdate.py"}], "after_files": [{"content": "# This file is part of beets.\n# Copyright 2017, Pauli Kettunen.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Updates a Kodi library whenever the beets library is changed.\nThis is based on the Plex Update plugin.\n\nPut something like the following in your config.yaml to configure:\n kodi:\n host: localhost\n port: 8080\n user: user\n pwd: secret\n\"\"\"\n\nimport requests\nfrom beets import config\nfrom beets.plugins import BeetsPlugin\n\n\ndef update_kodi(host, port, user, password):\n \"\"\"Sends request to the Kodi api to start a library refresh.\n \"\"\"\n url = f\"http://{host}:{port}/jsonrpc\"\n\n \"\"\"Content-Type: application/json is mandatory\n according to the kodi jsonrpc documentation\"\"\"\n\n headers = {'Content-Type': 'application/json'}\n\n # Create the payload. Id seems to be mandatory.\n payload = {'jsonrpc': '2.0', 'method': 'AudioLibrary.Scan', 'id': 1}\n r = requests.post(\n url,\n auth=(user, password),\n json=payload,\n headers=headers)\n\n return r\n\n\nclass KodiUpdate(BeetsPlugin):\n def __init__(self):\n super().__init__()\n\n # Adding defaults.\n config['kodi'].add([{\n 'host': 'localhost',\n 'port': 8080,\n 'user': 'kodi',\n 'pwd': 'kodi'\n }])\n\n config['kodi']['pwd'].redact = True\n self.register_listener('database_change', self.listen_for_db_change)\n\n def listen_for_db_change(self, lib, model):\n \"\"\"Listens for beets db change and register the update\"\"\"\n self.register_listener('cli_exit', self.update)\n\n def update(self, lib):\n \"\"\"When the client exists try to send refresh request to Kodi server.\n \"\"\"\n self._log.info('Requesting a Kodi library update...')\n\n kodi = config['kodi'].get()\n\n # Backwards compatibility in case not configured as an array\n if not isinstance(kodi, list):\n kodi = [kodi]\n\n for instance in kodi:\n # Try to send update request.\n try:\n r = update_kodi(\n instance['host'],\n instance['port'],\n instance['user'],\n instance['pwd']\n )\n r.raise_for_status()\n\n json = r.json()\n if json.get('result') != 'OK':\n self._log.warning(\n 'Kodi update failed: JSON response was {0!r}', json\n )\n continue\n\n self._log.info(\n 'Kodi update triggered for {0}:{1}',\n instance['host'], instance['port']\n )\n except requests.exceptions.RequestException as e:\n self._log.warning('Kodi update failed: {0}', str(e))\n continue\n", "path": "beetsplug/kodiupdate.py"}]}
1,285
584
gh_patches_debug_27570
rasdani/github-patches
git_diff
rotki__rotki-62
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Etherscan balance query for more than 20 accounts won't work Etherscan has a 20 account limit per query. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rotkehlchen/ethchain.py` Content: ``` 1 import os 2 from web3 import Web3, HTTPProvider 3 from requests import ConnectionError 4 5 from rotkehlchen.utils import from_wei, rlk_jsonloads, request_get 6 from rotkehlchen.fval import FVal 7 8 import logging 9 logger = logging.getLogger(__name__) 10 11 12 class Ethchain(object): 13 def __init__(self, ethrpc_port, attempt_connect=True): 14 self.web3 = None 15 self.rpc_port = ethrpc_port 16 self.connected = False 17 if attempt_connect: 18 self.attempt_connect(ethrpc_port) 19 20 def attempt_connect(self, ethrpc_port, mainnet_check=True): 21 if self.rpc_port == ethrpc_port and self.connected: 22 # We are already connected 23 return True, 'Already connected to an ethereum node' 24 25 if self.web3: 26 del self.web3 27 28 try: 29 self.web3 = Web3(HTTPProvider('http://localhost:{}'.format(ethrpc_port))) 30 except ConnectionError: 31 logger.warn('Could not connect to a local ethereum node. Will use etherscan only') 32 self.connected = False 33 return False, 'Failed to connect to ethereum node at port {}'.format(ethrpc_port) 34 35 if self.web3.isConnected(): 36 dir_path = os.path.dirname(os.path.realpath(__file__)) 37 with open(os.path.join(dir_path, 'data', 'token_abi.json'), 'r') as f: 38 self.token_abi = rlk_jsonloads(f.read()) 39 40 # Also make sure we are actually connected to the Ethereum mainnet 41 if mainnet_check: 42 genesis_hash = self.web3.eth.getBlock(0)['hash'].hex() 43 target = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3' 44 if genesis_hash != target: 45 logger.warn( 46 'Connected to a local ethereum node but it is not on the ethereum mainnet' 47 ) 48 self.connected = False 49 message = ( 50 'Connected to ethereum node at port {} but it is not on ' 51 'the ethereum mainnet'.format(ethrpc_port) 52 ) 53 return False, message 54 55 self.connected = True 56 return True, '' 57 else: 58 logger.warn('Could not connect to a local ethereum node. Will use etherscan only') 59 self.connected = False 60 message = 'Failed to connect to ethereum node at port {}'.format(ethrpc_port) 61 62 # If we get here we did not connnect 63 return False, message 64 65 def set_rpc_port(self, port): 66 """ Attempts to set the RPC port for the ethereum client. 67 68 Returns a tuple (result, message) 69 - result: Boolean for success or failure of changing the rpc port 70 - message: A message containing information on what happened. Can 71 be populated both in case of success or failure""" 72 result, message = self.attempt_connect(port) 73 if result: 74 self.ethrpc_port = port 75 return result, message 76 77 def get_eth_balance(self, account): 78 if not self.connected: 79 eth_resp = request_get( 80 'https://api.etherscan.io/api?module=account&action=balance&address=%s' 81 % account 82 ) 83 if eth_resp['status'] != 1: 84 raise ValueError('Failed to query etherscan for accounts balance') 85 amount = FVal(eth_resp['result']) 86 return from_wei(amount) 87 else: 88 return from_wei(self.web3.eth.getBalance(account)) 89 90 def get_multieth_balance(self, accounts): 91 """Returns a dict with keys being accounts and balances in ETH""" 92 balances = {} 93 if not self.connected: 94 # TODO: accounts.length should be less than 20. If more we gotta do 95 # multiple calls 96 eth_resp = request_get( 97 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' % 98 ','.join(accounts) 99 ) 100 if eth_resp['status'] != 1: 101 raise ValueError('Failed to query etherscan for accounts balance') 102 eth_accounts = eth_resp['result'] 103 for account_entry in eth_accounts: 104 amount = FVal(account_entry['balance']) 105 balances[account_entry['account']] = from_wei(amount) 106 107 else: 108 for account in accounts: 109 amount = FVal(self.web3.eth.getBalance(account)) 110 balances[account] = from_wei(amount) 111 112 return balances 113 114 def get_multitoken_balance(self, token_symbol, token_address, token_decimals, accounts): 115 """Return a dictionary with keys being accounts and value balances of token 116 Balance value is normalized through the token decimals. 117 """ 118 balances = {} 119 if self.connected: 120 token_contract = self.web3.eth.contract( 121 address=token_address, 122 abi=self.token_abi 123 ) 124 for account in accounts: 125 token_amount = FVal(token_contract.functions.balanceOf(account).call()) 126 if token_amount != 0: 127 balances[account] = token_amount / (FVal(10) ** FVal(token_decimals)) 128 else: 129 for account in accounts: 130 print('Checking token {} for account {}'.format(token_symbol, account)) 131 resp = request_get( 132 'https://api.etherscan.io/api?module=account&action=' 133 'tokenbalance&contractaddress={}&address={}'.format( 134 token_address, 135 account, 136 )) 137 if resp['status'] != 1: 138 raise ValueError( 139 'Failed to query etherscan for {} token balance of {}'.format( 140 token_symbol, 141 account, 142 )) 143 token_amount = FVal(resp['result']) 144 if token_amount != 0: 145 balances[account] = token_amount / (FVal(10) ** FVal(token_decimals)) 146 147 return balances 148 149 def get_token_balance(self, token_symbol, token_address, token_decimals, account): 150 res = self.get_multitoken_balance(token_symbol, token_address, token_decimals, [account]) 151 return res.get(account, 0) 152 153 def get_block_by_number(self, num): 154 if not self.connected: 155 return None 156 157 return self.web3.eth.getBlock(num) 158 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rotkehlchen/ethchain.py b/rotkehlchen/ethchain.py --- a/rotkehlchen/ethchain.py +++ b/rotkehlchen/ethchain.py @@ -90,19 +90,25 @@ def get_multieth_balance(self, accounts): """Returns a dict with keys being accounts and balances in ETH""" balances = {} + if not self.connected: - # TODO: accounts.length should be less than 20. If more we gotta do - # multiple calls - eth_resp = request_get( - 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' % - ','.join(accounts) - ) - if eth_resp['status'] != 1: - raise ValueError('Failed to query etherscan for accounts balance') - eth_accounts = eth_resp['result'] - for account_entry in eth_accounts: - amount = FVal(account_entry['balance']) - balances[account_entry['account']] = from_wei(amount) + if len(accounts) > 20: + new_accounts = [accounts[x:x+2] for x in range(0, len(accounts), 2)] + else: + new_accounts = [accounts] + + for account_slice in new_accounts: + eth_resp = request_get( + 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' % + ','.join(account_slice) + ) + if eth_resp['status'] != 1: + raise ValueError('Failed to query etherscan for accounts balance') + eth_accounts = eth_resp['result'] + + for account_entry in eth_accounts: + amount = FVal(account_entry['balance']) + balances[account_entry['account']] = from_wei(amount) else: for account in accounts:
{"golden_diff": "diff --git a/rotkehlchen/ethchain.py b/rotkehlchen/ethchain.py\n--- a/rotkehlchen/ethchain.py\n+++ b/rotkehlchen/ethchain.py\n@@ -90,19 +90,25 @@\n def get_multieth_balance(self, accounts):\n \"\"\"Returns a dict with keys being accounts and balances in ETH\"\"\"\n balances = {}\n+\n if not self.connected:\n- # TODO: accounts.length should be less than 20. If more we gotta do\n- # multiple calls\n- eth_resp = request_get(\n- 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %\n- ','.join(accounts)\n- )\n- if eth_resp['status'] != 1:\n- raise ValueError('Failed to query etherscan for accounts balance')\n- eth_accounts = eth_resp['result']\n- for account_entry in eth_accounts:\n- amount = FVal(account_entry['balance'])\n- balances[account_entry['account']] = from_wei(amount)\n+ if len(accounts) > 20:\n+ new_accounts = [accounts[x:x+2] for x in range(0, len(accounts), 2)]\n+ else:\n+ new_accounts = [accounts]\n+\n+ for account_slice in new_accounts:\n+ eth_resp = request_get(\n+ 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %\n+ ','.join(account_slice)\n+ )\n+ if eth_resp['status'] != 1:\n+ raise ValueError('Failed to query etherscan for accounts balance')\n+ eth_accounts = eth_resp['result']\n+\n+ for account_entry in eth_accounts:\n+ amount = FVal(account_entry['balance'])\n+ balances[account_entry['account']] = from_wei(amount)\n \n else:\n for account in accounts:\n", "issue": "Etherscan balance query for more than 20 accounts won't work\nEtherscan has a 20 account limit per query.\n", "before_files": [{"content": "import os\nfrom web3 import Web3, HTTPProvider\nfrom requests import ConnectionError\n\nfrom rotkehlchen.utils import from_wei, rlk_jsonloads, request_get\nfrom rotkehlchen.fval import FVal\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Ethchain(object):\n def __init__(self, ethrpc_port, attempt_connect=True):\n self.web3 = None\n self.rpc_port = ethrpc_port\n self.connected = False\n if attempt_connect:\n self.attempt_connect(ethrpc_port)\n\n def attempt_connect(self, ethrpc_port, mainnet_check=True):\n if self.rpc_port == ethrpc_port and self.connected:\n # We are already connected\n return True, 'Already connected to an ethereum node'\n\n if self.web3:\n del self.web3\n\n try:\n self.web3 = Web3(HTTPProvider('http://localhost:{}'.format(ethrpc_port)))\n except ConnectionError:\n logger.warn('Could not connect to a local ethereum node. Will use etherscan only')\n self.connected = False\n return False, 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)\n\n if self.web3.isConnected():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(dir_path, 'data', 'token_abi.json'), 'r') as f:\n self.token_abi = rlk_jsonloads(f.read())\n\n # Also make sure we are actually connected to the Ethereum mainnet\n if mainnet_check:\n genesis_hash = self.web3.eth.getBlock(0)['hash'].hex()\n target = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'\n if genesis_hash != target:\n logger.warn(\n 'Connected to a local ethereum node but it is not on the ethereum mainnet'\n )\n self.connected = False\n message = (\n 'Connected to ethereum node at port {} but it is not on '\n 'the ethereum mainnet'.format(ethrpc_port)\n )\n return False, message\n\n self.connected = True\n return True, ''\n else:\n logger.warn('Could not connect to a local ethereum node. Will use etherscan only')\n self.connected = False\n message = 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)\n\n # If we get here we did not connnect\n return False, message\n\n def set_rpc_port(self, port):\n \"\"\" Attempts to set the RPC port for the ethereum client.\n\n Returns a tuple (result, message)\n - result: Boolean for success or failure of changing the rpc port\n - message: A message containing information on what happened. Can\n be populated both in case of success or failure\"\"\"\n result, message = self.attempt_connect(port)\n if result:\n self.ethrpc_port = port\n return result, message\n\n def get_eth_balance(self, account):\n if not self.connected:\n eth_resp = request_get(\n 'https://api.etherscan.io/api?module=account&action=balance&address=%s'\n % account\n )\n if eth_resp['status'] != 1:\n raise ValueError('Failed to query etherscan for accounts balance')\n amount = FVal(eth_resp['result'])\n return from_wei(amount)\n else:\n return from_wei(self.web3.eth.getBalance(account))\n\n def get_multieth_balance(self, accounts):\n \"\"\"Returns a dict with keys being accounts and balances in ETH\"\"\"\n balances = {}\n if not self.connected:\n # TODO: accounts.length should be less than 20. If more we gotta do\n # multiple calls\n eth_resp = request_get(\n 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %\n ','.join(accounts)\n )\n if eth_resp['status'] != 1:\n raise ValueError('Failed to query etherscan for accounts balance')\n eth_accounts = eth_resp['result']\n for account_entry in eth_accounts:\n amount = FVal(account_entry['balance'])\n balances[account_entry['account']] = from_wei(amount)\n\n else:\n for account in accounts:\n amount = FVal(self.web3.eth.getBalance(account))\n balances[account] = from_wei(amount)\n\n return balances\n\n def get_multitoken_balance(self, token_symbol, token_address, token_decimals, accounts):\n \"\"\"Return a dictionary with keys being accounts and value balances of token\n Balance value is normalized through the token decimals.\n \"\"\"\n balances = {}\n if self.connected:\n token_contract = self.web3.eth.contract(\n address=token_address,\n abi=self.token_abi\n )\n for account in accounts:\n token_amount = FVal(token_contract.functions.balanceOf(account).call())\n if token_amount != 0:\n balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))\n else:\n for account in accounts:\n print('Checking token {} for account {}'.format(token_symbol, account))\n resp = request_get(\n 'https://api.etherscan.io/api?module=account&action='\n 'tokenbalance&contractaddress={}&address={}'.format(\n token_address,\n account,\n ))\n if resp['status'] != 1:\n raise ValueError(\n 'Failed to query etherscan for {} token balance of {}'.format(\n token_symbol,\n account,\n ))\n token_amount = FVal(resp['result'])\n if token_amount != 0:\n balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))\n\n return balances\n\n def get_token_balance(self, token_symbol, token_address, token_decimals, account):\n res = self.get_multitoken_balance(token_symbol, token_address, token_decimals, [account])\n return res.get(account, 0)\n\n def get_block_by_number(self, num):\n if not self.connected:\n return None\n\n return self.web3.eth.getBlock(num)\n", "path": "rotkehlchen/ethchain.py"}], "after_files": [{"content": "import os\nfrom web3 import Web3, HTTPProvider\nfrom requests import ConnectionError\n\nfrom rotkehlchen.utils import from_wei, rlk_jsonloads, request_get\nfrom rotkehlchen.fval import FVal\n\nimport logging\nlogger = logging.getLogger(__name__)\n\n\nclass Ethchain(object):\n def __init__(self, ethrpc_port, attempt_connect=True):\n self.web3 = None\n self.rpc_port = ethrpc_port\n self.connected = False\n if attempt_connect:\n self.attempt_connect(ethrpc_port)\n\n def attempt_connect(self, ethrpc_port, mainnet_check=True):\n if self.rpc_port == ethrpc_port and self.connected:\n # We are already connected\n return True, 'Already connected to an ethereum node'\n\n if self.web3:\n del self.web3\n\n try:\n self.web3 = Web3(HTTPProvider('http://localhost:{}'.format(ethrpc_port)))\n except ConnectionError:\n logger.warn('Could not connect to a local ethereum node. Will use etherscan only')\n self.connected = False\n return False, 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)\n\n if self.web3.isConnected():\n dir_path = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(dir_path, 'data', 'token_abi.json'), 'r') as f:\n self.token_abi = rlk_jsonloads(f.read())\n\n # Also make sure we are actually connected to the Ethereum mainnet\n if mainnet_check:\n genesis_hash = self.web3.eth.getBlock(0)['hash'].hex()\n target = '0xd4e56740f876aef8c010b86a40d5f56745a118d0906a34e69aec8c0db1cb8fa3'\n if genesis_hash != target:\n logger.warn(\n 'Connected to a local ethereum node but it is not on the ethereum mainnet'\n )\n self.connected = False\n message = (\n 'Connected to ethereum node at port {} but it is not on '\n 'the ethereum mainnet'.format(ethrpc_port)\n )\n return False, message\n\n self.connected = True\n return True, ''\n else:\n logger.warn('Could not connect to a local ethereum node. Will use etherscan only')\n self.connected = False\n message = 'Failed to connect to ethereum node at port {}'.format(ethrpc_port)\n\n # If we get here we did not connnect\n return False, message\n\n def set_rpc_port(self, port):\n \"\"\" Attempts to set the RPC port for the ethereum client.\n\n Returns a tuple (result, message)\n - result: Boolean for success or failure of changing the rpc port\n - message: A message containing information on what happened. Can\n be populated both in case of success or failure\"\"\"\n result, message = self.attempt_connect(port)\n if result:\n self.ethrpc_port = port\n return result, message\n\n def get_eth_balance(self, account):\n if not self.connected:\n eth_resp = request_get(\n 'https://api.etherscan.io/api?module=account&action=balance&address=%s'\n % account\n )\n if eth_resp['status'] != 1:\n raise ValueError('Failed to query etherscan for accounts balance')\n amount = FVal(eth_resp['result'])\n return from_wei(amount)\n else:\n return from_wei(self.web3.eth.getBalance(account))\n\n def get_multieth_balance(self, accounts):\n \"\"\"Returns a dict with keys being accounts and balances in ETH\"\"\"\n balances = {}\n\n if not self.connected:\n if len(accounts) > 20:\n new_accounts = [accounts[x:x+2] for x in range(0, len(accounts), 2)]\n else:\n new_accounts = [accounts]\n\n for account_slice in new_accounts:\n eth_resp = request_get(\n 'https://api.etherscan.io/api?module=account&action=balancemulti&address=%s' %\n ','.join(account_slice)\n )\n if eth_resp['status'] != 1:\n raise ValueError('Failed to query etherscan for accounts balance')\n eth_accounts = eth_resp['result']\n\n for account_entry in eth_accounts:\n amount = FVal(account_entry['balance'])\n balances[account_entry['account']] = from_wei(amount)\n\n else:\n for account in accounts:\n amount = FVal(self.web3.eth.getBalance(account))\n balances[account] = from_wei(amount)\n\n return balances\n\n def get_multitoken_balance(self, token_symbol, token_address, token_decimals, accounts):\n \"\"\"Return a dictionary with keys being accounts and value balances of token\n Balance value is normalized through the token decimals.\n \"\"\"\n balances = {}\n if self.connected:\n token_contract = self.web3.eth.contract(\n address=token_address,\n abi=self.token_abi\n )\n for account in accounts:\n token_amount = FVal(token_contract.functions.balanceOf(account).call())\n if token_amount != 0:\n balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))\n else:\n for account in accounts:\n print('Checking token {} for account {}'.format(token_symbol, account))\n resp = request_get(\n 'https://api.etherscan.io/api?module=account&action='\n 'tokenbalance&contractaddress={}&address={}'.format(\n token_address,\n account,\n ))\n if resp['status'] != 1:\n raise ValueError(\n 'Failed to query etherscan for {} token balance of {}'.format(\n token_symbol,\n account,\n ))\n token_amount = FVal(resp['result'])\n if token_amount != 0:\n balances[account] = token_amount / (FVal(10) ** FVal(token_decimals))\n\n return balances\n\n def get_token_balance(self, token_symbol, token_address, token_decimals, account):\n res = self.get_multitoken_balance(token_symbol, token_address, token_decimals, [account])\n return res.get(account, 0)\n\n def get_block_by_number(self, num):\n if not self.connected:\n return None\n\n return self.web3.eth.getBlock(num)\n", "path": "rotkehlchen/ethchain.py"}]}
2,014
424
gh_patches_debug_340
rasdani/github-patches
git_diff
facebookresearch__nevergrad-705
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wrong dates in changelog All the dates at https://github.com/facebookresearch/nevergrad/blob/master/CHANGELOG.md shows 2019, but seems it should be 2020. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. 2 # 3 # This source code is licensed under the MIT license found in the 4 # LICENSE file in the root directory of this source tree. 5 6 # Configuration file for the Sphinx documentation builder. 7 # 8 # This file only contains a selection of the most common options. For a full 9 # list see the documentation: 10 # http://www.sphinx-doc.org/en/master/config 11 12 # -- Path setup -------------------------------------------------------------- 13 14 # If extensions (or modules to document with autodoc) are in another directory, 15 # add these directories to sys.path here. If the directory is relative to the 16 # documentation root, use os.path.abspath to make it absolute, like shown here. 17 18 import os 19 import sys 20 sys.path.insert(0, os.path.abspath('..')) 21 22 23 # -- Project information ----------------------------------------------------- 24 25 project = 'nevergrad' 26 copyright = '2019, Facebook AI Research' # pylint: disable=redefined-builtin 27 author = 'Facebook AI Research' 28 29 30 # -- General configuration --------------------------------------------------- 31 32 # Add any Sphinx extension module names here, as strings. They can be 33 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 34 # ones. 35 extensions = ['sphinx.ext.autodoc', 36 'sphinx.ext.githubpages', 37 'sphinx.ext.coverage', 38 'sphinx.ext.napoleon', 39 'sphinx.ext.autosummary', 40 'recommonmark', 41 ] 42 43 source_suffix = { 44 '.rst': 'restructuredtext', 45 '.txt': 'markdown', 46 '.md': 'markdown', 47 } 48 49 master_doc = 'index' 50 51 # Add any paths that contain templates here, relative to this directory. 52 templates_path = [] 53 54 # List of patterns, relative to source directory, that match files and 55 # directories to ignore when looking for source files. 56 # This pattern also affects html_static_path and html_extra_path. 57 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 58 59 60 # -- Options for HTML output ------------------------------------------------- 61 62 # The theme to use for HTML and HTML Help pages. See the documentation for 63 # a list of builtin themes. 64 # 65 html_theme = 'sphinx_rtd_theme' 66 67 # Add any paths that contain custom static files (such as style sheets) here, 68 # relative to this directory. They are copied after the builtin static files, 69 # so a file named "default.css" will overwrite the builtin "default.css". 70 html_static_path = [] 71 72 # -- Other -- 73 linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*'] 74 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -70,4 +70,5 @@ html_static_path = [] # -- Other -- -linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*'] +linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*', + r'https://arxiv.org/abs/*'] # Transient certificate error :(
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -70,4 +70,5 @@\n html_static_path = []\n \n # -- Other --\n-linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*']\n+linkcheck_ignore = [r'https://gecco-2020.sigevo.org/*',\n+ r'https://arxiv.org/abs/*'] # Transient certificate error :(\n", "issue": "Wrong dates in changelog\nAll the dates at https://github.com/facebookresearch/nevergrad/blob/master/CHANGELOG.md shows 2019, but seems it should be 2020.\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'nevergrad'\ncopyright = '2019, Facebook AI Research' # pylint: disable=redefined-builtin\nauthor = 'Facebook AI Research'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n 'recommonmark',\n ]\n\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'markdown',\n '.md': 'markdown',\n}\n\nmaster_doc = 'index'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = []\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# -- Other --\nlinkcheck_ignore = [r'https://gecco-2020.sigevo.org/*']\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\nproject = 'nevergrad'\ncopyright = '2019, Facebook AI Research' # pylint: disable=redefined-builtin\nauthor = 'Facebook AI Research'\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = ['sphinx.ext.autodoc',\n 'sphinx.ext.githubpages',\n 'sphinx.ext.coverage',\n 'sphinx.ext.napoleon',\n 'sphinx.ext.autosummary',\n 'recommonmark',\n ]\n\nsource_suffix = {\n '.rst': 'restructuredtext',\n '.txt': 'markdown',\n '.md': 'markdown',\n}\n\nmaster_doc = 'index'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = []\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'sphinx_rtd_theme'\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = []\n\n# -- Other --\nlinkcheck_ignore = [r'https://gecco-2020.sigevo.org/*',\n r'https://arxiv.org/abs/*'] # Transient certificate error :(\n", "path": "docs/conf.py"}]}
988
109
gh_patches_debug_13293
rasdani/github-patches
git_diff
ray-project__ray-8842
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Asyncio] InvalidStateError when multiple awaits on same oid <!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant--> ### What is the problem? *Ray version and other system information (Python version, TensorFlow version, OS):* ### Reproduction (REQUIRED) Please provide a script that can be run to reproduce the issue. The script should have **no external library dependencies** (i.e., use fake or mock data / environments): If we cannot run your script, we cannot fix your issue. ```python import ray import time ray.init() @ray.remote def f(): time.sleep(5) oid = f.remote() await asyncio.wait_for(oid, timeout=1) await asyncio.wait_for(oid, timeout=1) ``` Output ``` Exception in callback get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65 handle: <Handle get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65> Traceback (most recent call last): File "/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py", line 145, in _run self._callback(*self._args) File "/Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py", line 83, in done_callback user_future.set_result(result.result) asyncio.base_futures.InvalidStateError: invalid state ``` - [ ] I have verified my script runs in a clean environment and reproduces the issue. - [ ] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `python/ray/async_compat.py` Content: ``` 1 """ 2 This file should only be imported from Python 3. 3 It will raise SyntaxError when importing from Python 2. 4 """ 5 import asyncio 6 from collections import namedtuple 7 import time 8 import inspect 9 10 import ray 11 12 13 def sync_to_async(func): 14 """Convert a blocking function to async function""" 15 16 if inspect.iscoroutinefunction(func): 17 return func 18 19 async def wrapper(*args, **kwargs): 20 return func(*args, **kwargs) 21 22 return wrapper 23 24 25 # Class encapsulate the get result from direct actor. 26 # Case 1: plasma_fallback_id=None, result=<Object> 27 # Case 2: plasma_fallback_id=ObjectID, result=None 28 AsyncGetResponse = namedtuple("AsyncGetResponse", 29 ["plasma_fallback_id", "result"]) 30 31 32 def get_async(object_id): 33 """Asyncio compatible version of ray.get""" 34 # Delayed import because raylet import this file and 35 # it creates circular imports. 36 from ray.experimental.async_api import init as async_api_init, as_future 37 from ray.experimental.async_plasma import PlasmaObjectFuture 38 39 assert isinstance(object_id, ray.ObjectID), "Batched get is not supported." 40 41 # Setup 42 async_api_init() 43 loop = asyncio.get_event_loop() 44 core_worker = ray.worker.global_worker.core_worker 45 46 # Here's the callback used to implement async get logic. 47 # What we want: 48 # - If direct call, first try to get it from in memory store. 49 # If the object if promoted to plasma, retry it from plasma API. 50 # - If not direct call, directly use plasma API to get it. 51 user_future = loop.create_future() 52 53 # We have three future objects here. 54 # user_future is directly returned to the user from this function. 55 # and it will be eventually fulfilled by the final result. 56 # inner_future is the first attempt to retrieve the object. It can be 57 # fulfilled by either core_worker.get_async or plasma_api.as_future. 58 # When inner_future completes, done_callback will be invoked. This 59 # callback set the final object in user_future if the object hasn't 60 # been promoted by plasma, otherwise it will retry from plasma. 61 # retry_plasma_future is only created when we are getting objects that's 62 # promoted to plasma. It will also invoke the done_callback when it's 63 # fulfilled. 64 65 def done_callback(future): 66 result = future.result() 67 # Result from async plasma, transparently pass it to user future 68 if isinstance(future, PlasmaObjectFuture): 69 if isinstance(result, ray.exceptions.RayTaskError): 70 ray.worker.last_task_error_raise_time = time.time() 71 user_future.set_exception(result.as_instanceof_cause()) 72 else: 73 user_future.set_result(result) 74 else: 75 # Result from direct call. 76 assert isinstance(result, AsyncGetResponse), result 77 if result.plasma_fallback_id is None: 78 if isinstance(result.result, ray.exceptions.RayTaskError): 79 ray.worker.last_task_error_raise_time = time.time() 80 user_future.set_exception( 81 result.result.as_instanceof_cause()) 82 else: 83 user_future.set_result(result.result) 84 else: 85 # Schedule plasma to async get, use the the same callback. 86 retry_plasma_future = as_future(result.plasma_fallback_id) 87 retry_plasma_future.add_done_callback(done_callback) 88 # A hack to keep reference to the future so it doesn't get GC. 89 user_future.retry_plasma_future = retry_plasma_future 90 91 inner_future = loop.create_future() 92 # We must add the done_callback before sending to in_memory_store_get 93 inner_future.add_done_callback(done_callback) 94 core_worker.in_memory_store_get_async(object_id, inner_future) 95 # A hack to keep reference to inner_future so it doesn't get GC. 96 user_future.inner_future = inner_future 97 # A hack to keep a reference to the object ID for ref counting. 98 user_future.object_id = object_id 99 100 return user_future 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/python/ray/async_compat.py b/python/ray/async_compat.py --- a/python/ray/async_compat.py +++ b/python/ray/async_compat.py @@ -75,6 +75,11 @@ # Result from direct call. assert isinstance(result, AsyncGetResponse), result if result.plasma_fallback_id is None: + # If this future has result set already, we just need to + # skip the set result/exception procedure. + if user_future.done(): + return + if isinstance(result.result, ray.exceptions.RayTaskError): ray.worker.last_task_error_raise_time = time.time() user_future.set_exception(
{"golden_diff": "diff --git a/python/ray/async_compat.py b/python/ray/async_compat.py\n--- a/python/ray/async_compat.py\n+++ b/python/ray/async_compat.py\n@@ -75,6 +75,11 @@\n # Result from direct call.\n assert isinstance(result, AsyncGetResponse), result\n if result.plasma_fallback_id is None:\n+ # If this future has result set already, we just need to\n+ # skip the set result/exception procedure.\n+ if user_future.done():\n+ return\n+\n if isinstance(result.result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(\n", "issue": "[Asyncio] InvalidStateError when multiple awaits on same oid\n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n\r\n### What is the problem?\r\n\r\n*Ray version and other system information (Python version, TensorFlow version, OS):*\r\n\r\n### Reproduction (REQUIRED)\r\nPlease provide a script that can be run to reproduce the issue. The script should have **no external library dependencies** (i.e., use fake or mock data / environments):\r\n\r\nIf we cannot run your script, we cannot fix your issue.\r\n```python\r\nimport ray\r\nimport time\r\n\r\nray.init()\r\[email protected]\r\ndef f():\r\n time.sleep(5)\r\n\r\noid = f.remote()\r\nawait asyncio.wait_for(oid, timeout=1)\r\nawait asyncio.wait_for(oid, timeout=1)\r\n```\r\n\r\nOutput\r\n```\r\nException in callback get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65\r\nhandle: <Handle get_async.<locals>.done_callback(<Future finis... result=None)>) at /Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py:65>\r\nTraceback (most recent call last):\r\n File \"/Users/simonmo/miniconda3/lib/python3.6/asyncio/events.py\", line 145, in _run\r\n self._callback(*self._args)\r\n File \"/Users/simonmo/Desktop/ray/ray/python/ray/async_compat.py\", line 83, in done_callback\r\n user_future.set_result(result.result)\r\nasyncio.base_futures.InvalidStateError: invalid state\r\n```\r\n- [ ] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [ ] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "\"\"\"\nThis file should only be imported from Python 3.\nIt will raise SyntaxError when importing from Python 2.\n\"\"\"\nimport asyncio\nfrom collections import namedtuple\nimport time\nimport inspect\n\nimport ray\n\n\ndef sync_to_async(func):\n \"\"\"Convert a blocking function to async function\"\"\"\n\n if inspect.iscoroutinefunction(func):\n return func\n\n async def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# Class encapsulate the get result from direct actor.\n# Case 1: plasma_fallback_id=None, result=<Object>\n# Case 2: plasma_fallback_id=ObjectID, result=None\nAsyncGetResponse = namedtuple(\"AsyncGetResponse\",\n [\"plasma_fallback_id\", \"result\"])\n\n\ndef get_async(object_id):\n \"\"\"Asyncio compatible version of ray.get\"\"\"\n # Delayed import because raylet import this file and\n # it creates circular imports.\n from ray.experimental.async_api import init as async_api_init, as_future\n from ray.experimental.async_plasma import PlasmaObjectFuture\n\n assert isinstance(object_id, ray.ObjectID), \"Batched get is not supported.\"\n\n # Setup\n async_api_init()\n loop = asyncio.get_event_loop()\n core_worker = ray.worker.global_worker.core_worker\n\n # Here's the callback used to implement async get logic.\n # What we want:\n # - If direct call, first try to get it from in memory store.\n # If the object if promoted to plasma, retry it from plasma API.\n # - If not direct call, directly use plasma API to get it.\n user_future = loop.create_future()\n\n # We have three future objects here.\n # user_future is directly returned to the user from this function.\n # and it will be eventually fulfilled by the final result.\n # inner_future is the first attempt to retrieve the object. It can be\n # fulfilled by either core_worker.get_async or plasma_api.as_future.\n # When inner_future completes, done_callback will be invoked. This\n # callback set the final object in user_future if the object hasn't\n # been promoted by plasma, otherwise it will retry from plasma.\n # retry_plasma_future is only created when we are getting objects that's\n # promoted to plasma. It will also invoke the done_callback when it's\n # fulfilled.\n\n def done_callback(future):\n result = future.result()\n # Result from async plasma, transparently pass it to user future\n if isinstance(future, PlasmaObjectFuture):\n if isinstance(result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(result.as_instanceof_cause())\n else:\n user_future.set_result(result)\n else:\n # Result from direct call.\n assert isinstance(result, AsyncGetResponse), result\n if result.plasma_fallback_id is None:\n if isinstance(result.result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(\n result.result.as_instanceof_cause())\n else:\n user_future.set_result(result.result)\n else:\n # Schedule plasma to async get, use the the same callback.\n retry_plasma_future = as_future(result.plasma_fallback_id)\n retry_plasma_future.add_done_callback(done_callback)\n # A hack to keep reference to the future so it doesn't get GC.\n user_future.retry_plasma_future = retry_plasma_future\n\n inner_future = loop.create_future()\n # We must add the done_callback before sending to in_memory_store_get\n inner_future.add_done_callback(done_callback)\n core_worker.in_memory_store_get_async(object_id, inner_future)\n # A hack to keep reference to inner_future so it doesn't get GC.\n user_future.inner_future = inner_future\n # A hack to keep a reference to the object ID for ref counting.\n user_future.object_id = object_id\n\n return user_future\n", "path": "python/ray/async_compat.py"}], "after_files": [{"content": "\"\"\"\nThis file should only be imported from Python 3.\nIt will raise SyntaxError when importing from Python 2.\n\"\"\"\nimport asyncio\nfrom collections import namedtuple\nimport time\nimport inspect\n\nimport ray\n\n\ndef sync_to_async(func):\n \"\"\"Convert a blocking function to async function\"\"\"\n\n if inspect.iscoroutinefunction(func):\n return func\n\n async def wrapper(*args, **kwargs):\n return func(*args, **kwargs)\n\n return wrapper\n\n\n# Class encapsulate the get result from direct actor.\n# Case 1: plasma_fallback_id=None, result=<Object>\n# Case 2: plasma_fallback_id=ObjectID, result=None\nAsyncGetResponse = namedtuple(\"AsyncGetResponse\",\n [\"plasma_fallback_id\", \"result\"])\n\n\ndef get_async(object_id):\n \"\"\"Asyncio compatible version of ray.get\"\"\"\n # Delayed import because raylet import this file and\n # it creates circular imports.\n from ray.experimental.async_api import init as async_api_init, as_future\n from ray.experimental.async_plasma import PlasmaObjectFuture\n\n assert isinstance(object_id, ray.ObjectID), \"Batched get is not supported.\"\n\n # Setup\n async_api_init()\n loop = asyncio.get_event_loop()\n core_worker = ray.worker.global_worker.core_worker\n\n # Here's the callback used to implement async get logic.\n # What we want:\n # - If direct call, first try to get it from in memory store.\n # If the object if promoted to plasma, retry it from plasma API.\n # - If not direct call, directly use plasma API to get it.\n user_future = loop.create_future()\n\n # We have three future objects here.\n # user_future is directly returned to the user from this function.\n # and it will be eventually fulfilled by the final result.\n # inner_future is the first attempt to retrieve the object. It can be\n # fulfilled by either core_worker.get_async or plasma_api.as_future.\n # When inner_future completes, done_callback will be invoked. This\n # callback set the final object in user_future if the object hasn't\n # been promoted by plasma, otherwise it will retry from plasma.\n # retry_plasma_future is only created when we are getting objects that's\n # promoted to plasma. It will also invoke the done_callback when it's\n # fulfilled.\n\n def done_callback(future):\n result = future.result()\n # Result from async plasma, transparently pass it to user future\n if isinstance(future, PlasmaObjectFuture):\n if isinstance(result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(result.as_instanceof_cause())\n else:\n user_future.set_result(result)\n else:\n # Result from direct call.\n assert isinstance(result, AsyncGetResponse), result\n if result.plasma_fallback_id is None:\n # If this future has result set already, we just need to\n # skip the set result/exception procedure.\n if user_future.done():\n return\n\n if isinstance(result.result, ray.exceptions.RayTaskError):\n ray.worker.last_task_error_raise_time = time.time()\n user_future.set_exception(\n result.result.as_instanceof_cause())\n else:\n user_future.set_result(result.result)\n else:\n # Schedule plasma to async get, use the the same callback.\n retry_plasma_future = as_future(result.plasma_fallback_id)\n retry_plasma_future.add_done_callback(done_callback)\n # A hack to keep reference to the future so it doesn't get GC.\n user_future.retry_plasma_future = retry_plasma_future\n\n inner_future = loop.create_future()\n # We must add the done_callback before sending to in_memory_store_get\n inner_future.add_done_callback(done_callback)\n core_worker.in_memory_store_get_async(object_id, inner_future)\n # A hack to keep reference to inner_future so it doesn't get GC.\n user_future.inner_future = inner_future\n # A hack to keep a reference to the object ID for ref counting.\n user_future.object_id = object_id\n\n return user_future\n", "path": "python/ray/async_compat.py"}]}
1,726
152
gh_patches_debug_675
rasdani/github-patches
git_diff
joke2k__faker-1423
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Faker adds path objects to sys.path_importer_cache, breaking other packages * Faker version: 6.6.3 * OS: Gentoo Linux After importing `faker`, entries with `PosixPath` objects are added as keys to `sys.path_importer_cache`. However, the keys are supposed to be regular `str`s there, and the wrong type breaks software relying on `str` methods being available, e.g. astroid: ``` ___________________________________________ ClassNodeTest.test_slots_added_dynamically_still_inferred ____________________________________________ self = <tests.unittest_scoped_nodes.ClassNodeTest testMethod=test_slots_added_dynamically_still_inferred> def tearDown(self): del sys.path[0] datadir = find("") for key in list(sys.path_importer_cache): > if key.startswith(datadir): E AttributeError: 'PosixPath' object has no attribute 'startswith' tests/resources.py:41: AttributeError ``` Note that since Faker installs a pytest plugin, it is autoloaded by default in all programs' test suites. ### Steps to reproduce ``` import sys import faker print(sys.path_importer_cache) ``` ### Expected behavior The printed dict should only contain `str` keys. ### Actual behavior ``` [...] PosixPath('/usr/lib/python3.9/site-packages/faker/providers/address'): FileFinder(PosixPath('/usr/lib/python3.9/site-packages/faker/providers/address')), PosixPath('/usr/lib/python3.9/site-packages/faker/providers/automotive'): FileFinder(PosixPath('/usr/lib/python3.9/site-packages/faker/providers/automotive')), [...] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/utils/loading.py` Content: ``` 1 import pkgutil 2 import sys 3 4 from importlib import import_module 5 from pathlib import Path 6 from types import ModuleType 7 from typing import List, Set 8 9 10 def get_path(module: ModuleType) -> str: 11 if getattr(sys, 'frozen', False): 12 # frozen 13 14 if getattr(sys, '_MEIPASS', False): 15 # PyInstaller 16 lib_dir = Path(getattr(sys, '_MEIPASS')) 17 else: 18 # others 19 lib_dir = Path(sys.executable).parent / 'lib' 20 21 path = lib_dir.joinpath(*module.__package__.split(".")) 22 else: 23 # unfrozen 24 path = Path(module.__file__).parent 25 return path 26 27 28 def list_module(module: ModuleType) -> List[str]: 29 path = get_path(module) 30 31 if getattr(sys, '_MEIPASS', False): 32 # PyInstaller 33 return [file.parent.name for file in Path(path).glob('*/__init__.py')] 34 else: 35 return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg] 36 37 38 def find_available_locales(providers: List[str]) -> List[str]: 39 available_locales: Set[str] = set() 40 41 for provider_path in providers: 42 43 provider_module = import_module(provider_path) 44 if getattr(provider_module, 'localized', False): 45 langs = list_module(provider_module) 46 available_locales.update(langs) 47 available_locales: List[str] = sorted(available_locales) 48 return available_locales 49 50 51 def find_available_providers(modules: List[ModuleType]) -> List[str]: 52 available_providers = set() 53 for providers_mod in modules: 54 if providers_mod.__package__: 55 providers = [ 56 '.'.join([providers_mod.__package__, mod]) 57 for mod in list_module(providers_mod) if mod != '__pycache__' 58 ] 59 available_providers.update(providers) 60 return sorted(available_providers) 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/utils/loading.py b/faker/utils/loading.py --- a/faker/utils/loading.py +++ b/faker/utils/loading.py @@ -22,7 +22,7 @@ else: # unfrozen path = Path(module.__file__).parent - return path + return str(path) def list_module(module: ModuleType) -> List[str]:
{"golden_diff": "diff --git a/faker/utils/loading.py b/faker/utils/loading.py\n--- a/faker/utils/loading.py\n+++ b/faker/utils/loading.py\n@@ -22,7 +22,7 @@\n else:\n # unfrozen\n path = Path(module.__file__).parent\n- return path\n+ return str(path)\n \n \n def list_module(module: ModuleType) -> List[str]:\n", "issue": "Faker adds path objects to sys.path_importer_cache, breaking other packages\n* Faker version: 6.6.3\r\n* OS: Gentoo Linux\r\n\r\nAfter importing `faker`, entries with `PosixPath` objects are added as keys to `sys.path_importer_cache`. However, the keys are supposed to be regular `str`s there, and the wrong type breaks software relying on `str` methods being available, e.g. astroid:\r\n\r\n```\r\n___________________________________________ ClassNodeTest.test_slots_added_dynamically_still_inferred ____________________________________________\r\n\r\nself = <tests.unittest_scoped_nodes.ClassNodeTest testMethod=test_slots_added_dynamically_still_inferred>\r\n\r\n def tearDown(self):\r\n del sys.path[0]\r\n datadir = find(\"\")\r\n for key in list(sys.path_importer_cache):\r\n> if key.startswith(datadir):\r\nE AttributeError: 'PosixPath' object has no attribute 'startswith'\r\n\r\ntests/resources.py:41: AttributeError\r\n```\r\n\r\nNote that since Faker installs a pytest plugin, it is autoloaded by default in all programs' test suites.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\nimport sys\r\nimport faker\r\nprint(sys.path_importer_cache)\r\n```\r\n\r\n### Expected behavior\r\n\r\nThe printed dict should only contain `str` keys.\r\n\r\n### Actual behavior\r\n\r\n```\r\n[...] PosixPath('/usr/lib/python3.9/site-packages/faker/providers/address'): FileFinder(PosixPath('/usr/lib/python3.9/site-packages/faker/providers/address')), PosixPath('/usr/lib/python3.9/site-packages/faker/providers/automotive'): FileFinder(PosixPath('/usr/lib/python3.9/site-packages/faker/providers/automotive')), [...]\r\n```\r\n\n", "before_files": [{"content": "import pkgutil\nimport sys\n\nfrom importlib import import_module\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import List, Set\n\n\ndef get_path(module: ModuleType) -> str:\n if getattr(sys, 'frozen', False):\n # frozen\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n lib_dir = Path(getattr(sys, '_MEIPASS'))\n else:\n # others\n lib_dir = Path(sys.executable).parent / 'lib'\n\n path = lib_dir.joinpath(*module.__package__.split(\".\"))\n else:\n # unfrozen\n path = Path(module.__file__).parent\n return path\n\n\ndef list_module(module: ModuleType) -> List[str]:\n path = get_path(module)\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n return [file.parent.name for file in Path(path).glob('*/__init__.py')]\n else:\n return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n\n\ndef find_available_locales(providers: List[str]) -> List[str]:\n available_locales: Set[str] = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n available_locales: List[str] = sorted(available_locales)\n return available_locales\n\n\ndef find_available_providers(modules: List[ModuleType]) -> List[str]:\n available_providers = set()\n for providers_mod in modules:\n if providers_mod.__package__:\n providers = [\n '.'.join([providers_mod.__package__, mod])\n for mod in list_module(providers_mod) if mod != '__pycache__'\n ]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py"}], "after_files": [{"content": "import pkgutil\nimport sys\n\nfrom importlib import import_module\nfrom pathlib import Path\nfrom types import ModuleType\nfrom typing import List, Set\n\n\ndef get_path(module: ModuleType) -> str:\n if getattr(sys, 'frozen', False):\n # frozen\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n lib_dir = Path(getattr(sys, '_MEIPASS'))\n else:\n # others\n lib_dir = Path(sys.executable).parent / 'lib'\n\n path = lib_dir.joinpath(*module.__package__.split(\".\"))\n else:\n # unfrozen\n path = Path(module.__file__).parent\n return str(path)\n\n\ndef list_module(module: ModuleType) -> List[str]:\n path = get_path(module)\n\n if getattr(sys, '_MEIPASS', False):\n # PyInstaller\n return [file.parent.name for file in Path(path).glob('*/__init__.py')]\n else:\n return [name for _, name, is_pkg in pkgutil.iter_modules([path]) if is_pkg]\n\n\ndef find_available_locales(providers: List[str]) -> List[str]:\n available_locales: Set[str] = set()\n\n for provider_path in providers:\n\n provider_module = import_module(provider_path)\n if getattr(provider_module, 'localized', False):\n langs = list_module(provider_module)\n available_locales.update(langs)\n available_locales: List[str] = sorted(available_locales)\n return available_locales\n\n\ndef find_available_providers(modules: List[ModuleType]) -> List[str]:\n available_providers = set()\n for providers_mod in modules:\n if providers_mod.__package__:\n providers = [\n '.'.join([providers_mod.__package__, mod])\n for mod in list_module(providers_mod) if mod != '__pycache__'\n ]\n available_providers.update(providers)\n return sorted(available_providers)\n", "path": "faker/utils/loading.py"}]}
1,148
86
gh_patches_debug_33060
rasdani/github-patches
git_diff
pypa__virtualenv-1794
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- activate_this.py failed for python2 virtualenvs **Issue** It seems recently pipenv introduced a new type of activate_this.py. On windows the content of activate_this.py has something like this: ``` prev_length = len(sys.path) for lib in "'..\\Lib\\site-packages".split(os.pathsep): path = os.path.realpath(os.path.join(bin_dir, lib)) site.addsitedir(path.decode("utf-8") if "'yes" else path) sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length] ``` As you can see the "'..\\Lib\\site-packages" is obviously wrong. **Environment** Provide at least: - OS: Windows 10 - ``pip list`` of the host python where ``virtualenv`` is installed: ```console virtualenv 20.0.18 virtualenv-clone 0.5.4 ``` **Output of the virtual environment creation** As I'm using virtualenv through pipenv, so I failed to grab the virtualenv output ``` [ ==] Creating virtual environment...created virtual environment CPython2.7.17.final.0-64 in 641ms creator CPython2Windows(dest=C:\Users\win10\.virtualenvs\win10-obmjl69F, clear=False, global=False) seeder FromAppData(download=False, pip=latest, setuptools=latest, wheel=latest, via=copy, app_data_dir=C:\Users\win10\AppData\Local\pypa\virtualenv\seed-app-data\v1.0.1) activators BashActivator,BatchActivator,FishActivator,PowerShellActivator,PythonActivator ``` However I've located the related code and wrote out its runtime variable information: The following is the output of _repr_unicode function in ```src/virtualenv/activation/python/__init__.py``` ``` '(win10) 'C:\\Users\\win10\\.virtualenvs\\win10-obmjl69F 'win10-obmjl69F 'Scripts '; '..\\Lib\\site-packages 'yes ``` As you can see, there's an additional ' before each item. I've done a small experiment on python 3.6 and 3.7: ``` >>> value = "..\\123456" >>> repr(value.encode("utf-8")) "b'..\\\\123456'" >>> repr(value.encode("utf-8"))[1:-1] "'..\\\\123456" >>> ``` I believe there's something wrong with this function. This function is introduced in PR #1503 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/virtualenv/activation/python/__init__.py` Content: ``` 1 from __future__ import absolute_import, unicode_literals 2 3 import os 4 from collections import OrderedDict 5 6 from virtualenv.util.path import Path 7 from virtualenv.util.six import ensure_text 8 9 from ..via_template import ViaTemplateActivator 10 11 12 class PythonActivator(ViaTemplateActivator): 13 def templates(self): 14 yield Path("activate_this.py") 15 16 def replacements(self, creator, dest_folder): 17 replacements = super(PythonActivator, self).replacements(creator, dest_folder) 18 lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs) 19 win_py2 = creator.interpreter.platform == "win32" and creator.interpreter.version_info.major == 2 20 replacements.update( 21 { 22 "__LIB_FOLDERS__": ensure_text(os.pathsep.join(lib_folders.keys())), 23 "__DECODE_PATH__": ("yes" if win_py2 else ""), 24 } 25 ) 26 return replacements 27 28 @staticmethod 29 def _repr_unicode(creator, value): 30 py2 = creator.interpreter.version_info.major == 2 31 if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals 32 value = ensure_text(repr(value.encode("utf-8"))[1:-1]) 33 return value 34 ``` Path: `src/virtualenv/activation/via_template.py` Content: ``` 1 from __future__ import absolute_import, unicode_literals 2 3 import os 4 import sys 5 from abc import ABCMeta, abstractmethod 6 7 from six import add_metaclass 8 9 from virtualenv.util.six import ensure_text 10 11 from .activator import Activator 12 13 if sys.version_info >= (3, 7): 14 from importlib.resources import read_text 15 else: 16 from importlib_resources import read_text 17 18 19 @add_metaclass(ABCMeta) 20 class ViaTemplateActivator(Activator): 21 @abstractmethod 22 def templates(self): 23 raise NotImplementedError 24 25 def generate(self, creator): 26 dest_folder = creator.bin_dir 27 replacements = self.replacements(creator, dest_folder) 28 self._generate(replacements, self.templates(), dest_folder, creator) 29 if self.flag_prompt is not None: 30 creator.pyenv_cfg["prompt"] = self.flag_prompt 31 32 def replacements(self, creator, dest_folder): 33 return { 34 "__VIRTUAL_PROMPT__": "" if self.flag_prompt is None else self.flag_prompt, 35 "__VIRTUAL_ENV__": ensure_text(str(creator.dest)), 36 "__VIRTUAL_NAME__": creator.env_name, 37 "__BIN_NAME__": ensure_text(str(creator.bin_dir.relative_to(creator.dest))), 38 "__PATH_SEP__": ensure_text(os.pathsep), 39 } 40 41 def _generate(self, replacements, templates, to_folder, creator): 42 for template in templates: 43 text = self.instantiate_template(replacements, template, creator) 44 dest = to_folder / self.as_name(template) 45 dest.write_text(text, encoding="utf-8") 46 47 def as_name(self, template): 48 return template.name 49 50 def instantiate_template(self, replacements, template, creator): 51 # read text and do replacements 52 text = read_text(self.__module__, str(template), encoding="utf-8", errors="strict") 53 for key, value in replacements.items(): 54 value = self._repr_unicode(creator, value) 55 text = text.replace(key, value) 56 return text 57 58 @staticmethod 59 def _repr_unicode(creator, value): 60 # by default we just let it be unicode 61 return value 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py --- a/src/virtualenv/activation/python/__init__.py +++ b/src/virtualenv/activation/python/__init__.py @@ -1,6 +1,7 @@ from __future__ import absolute_import, unicode_literals import os +import sys from collections import OrderedDict from virtualenv.util.path import Path @@ -29,5 +30,5 @@ def _repr_unicode(creator, value): py2 = creator.interpreter.version_info.major == 2 if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals - value = ensure_text(repr(value.encode("utf-8"))[1:-1]) + value = ensure_text(repr(value.encode("utf-8"))[2 if sys.version_info[0] == 3 else 1 : -1]) return value diff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py --- a/src/virtualenv/activation/via_template.py +++ b/src/virtualenv/activation/via_template.py @@ -25,9 +25,10 @@ def generate(self, creator): dest_folder = creator.bin_dir replacements = self.replacements(creator, dest_folder) - self._generate(replacements, self.templates(), dest_folder, creator) + at_path = self._generate(replacements, self.templates(), dest_folder, creator) if self.flag_prompt is not None: creator.pyenv_cfg["prompt"] = self.flag_prompt + return at_path def replacements(self, creator, dest_folder): return { @@ -43,6 +44,7 @@ text = self.instantiate_template(replacements, template, creator) dest = to_folder / self.as_name(template) dest.write_text(text, encoding="utf-8") + return dest def as_name(self, template): return template.name
{"golden_diff": "diff --git a/src/virtualenv/activation/python/__init__.py b/src/virtualenv/activation/python/__init__.py\n--- a/src/virtualenv/activation/python/__init__.py\n+++ b/src/virtualenv/activation/python/__init__.py\n@@ -1,6 +1,7 @@\n from __future__ import absolute_import, unicode_literals\n \n import os\n+import sys\n from collections import OrderedDict\n \n from virtualenv.util.path import Path\n@@ -29,5 +30,5 @@\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n- value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n+ value = ensure_text(repr(value.encode(\"utf-8\"))[2 if sys.version_info[0] == 3 else 1 : -1])\n return value\ndiff --git a/src/virtualenv/activation/via_template.py b/src/virtualenv/activation/via_template.py\n--- a/src/virtualenv/activation/via_template.py\n+++ b/src/virtualenv/activation/via_template.py\n@@ -25,9 +25,10 @@\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n- self._generate(replacements, self.templates(), dest_folder, creator)\n+ at_path = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n+ return at_path\n \n def replacements(self, creator, dest_folder):\n return {\n@@ -43,6 +44,7 @@\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n+ return dest\n \n def as_name(self, template):\n return template.name\n", "issue": "activate_this.py failed for python2 virtualenvs\n**Issue**\r\n\r\nIt seems recently pipenv introduced a new type of activate_this.py. On windows the content of activate_this.py has something like this:\r\n```\r\nprev_length = len(sys.path)\r\nfor lib in \"'..\\\\Lib\\\\site-packages\".split(os.pathsep):\r\n path = os.path.realpath(os.path.join(bin_dir, lib))\r\n site.addsitedir(path.decode(\"utf-8\") if \"'yes\" else path)\r\nsys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]\r\n```\r\nAs you can see the \"'..\\\\Lib\\\\site-packages\" is obviously wrong.\r\n\r\n**Environment**\r\n\r\nProvide at least:\r\n- OS: Windows 10\r\n- ``pip list`` of the host python where ``virtualenv`` is installed:\r\n\r\n ```console\r\n virtualenv 20.0.18\r\n virtualenv-clone 0.5.4\r\n ```\r\n\r\n**Output of the virtual environment creation**\r\n\r\nAs I'm using virtualenv through pipenv, so I failed to grab the virtualenv output\r\n\r\n```\r\n[ ==] Creating virtual environment...created virtual environment CPython2.7.17.final.0-64 in 641ms\r\n\r\n creator CPython2Windows(dest=C:\\Users\\win10\\.virtualenvs\\win10-obmjl69F, clear=False, global=False)\r\n\r\n seeder FromAppData(download=False, pip=latest, setuptools=latest, wheel=latest, via=copy, app_data_dir=C:\\Users\\win10\\AppData\\Local\\pypa\\virtualenv\\seed-app-data\\v1.0.1)\r\n\r\n activators BashActivator,BatchActivator,FishActivator,PowerShellActivator,PythonActivator\r\n```\r\n\r\nHowever I've located the related code and wrote out its runtime variable information:\r\nThe following is the output of _repr_unicode function in ```src/virtualenv/activation/python/__init__.py```\r\n\r\n```\r\n'(win10) \r\n'C:\\\\Users\\\\win10\\\\.virtualenvs\\\\win10-obmjl69F\r\n'win10-obmjl69F\r\n'Scripts\r\n';\r\n'..\\\\Lib\\\\site-packages\r\n'yes\r\n```\r\nAs you can see, there's an additional ' before each item. I've done a small experiment on python 3.6 and 3.7:\r\n```\r\n>>> value = \"..\\\\123456\"\r\n>>> repr(value.encode(\"utf-8\"))\r\n\"b'..\\\\\\\\123456'\"\r\n>>> repr(value.encode(\"utf-8\"))[1:-1]\r\n\"'..\\\\\\\\123456\"\r\n>>>\r\n```\r\nI believe there's something wrong with this function. This function is introduced in PR #1503 \n", "before_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nfrom collections import OrderedDict\n\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[1:-1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}], "after_files": [{"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom collections import OrderedDict\n\nfrom virtualenv.util.path import Path\nfrom virtualenv.util.six import ensure_text\n\nfrom ..via_template import ViaTemplateActivator\n\n\nclass PythonActivator(ViaTemplateActivator):\n def templates(self):\n yield Path(\"activate_this.py\")\n\n def replacements(self, creator, dest_folder):\n replacements = super(PythonActivator, self).replacements(creator, dest_folder)\n lib_folders = OrderedDict((os.path.relpath(str(i), str(dest_folder)), None) for i in creator.libs)\n win_py2 = creator.interpreter.platform == \"win32\" and creator.interpreter.version_info.major == 2\n replacements.update(\n {\n \"__LIB_FOLDERS__\": ensure_text(os.pathsep.join(lib_folders.keys())),\n \"__DECODE_PATH__\": (\"yes\" if win_py2 else \"\"),\n }\n )\n return replacements\n\n @staticmethod\n def _repr_unicode(creator, value):\n py2 = creator.interpreter.version_info.major == 2\n if py2: # on Python 2 we need to encode this into explicit utf-8, py3 supports unicode literals\n value = ensure_text(repr(value.encode(\"utf-8\"))[2 if sys.version_info[0] == 3 else 1 : -1])\n return value\n", "path": "src/virtualenv/activation/python/__init__.py"}, {"content": "from __future__ import absolute_import, unicode_literals\n\nimport os\nimport sys\nfrom abc import ABCMeta, abstractmethod\n\nfrom six import add_metaclass\n\nfrom virtualenv.util.six import ensure_text\n\nfrom .activator import Activator\n\nif sys.version_info >= (3, 7):\n from importlib.resources import read_text\nelse:\n from importlib_resources import read_text\n\n\n@add_metaclass(ABCMeta)\nclass ViaTemplateActivator(Activator):\n @abstractmethod\n def templates(self):\n raise NotImplementedError\n\n def generate(self, creator):\n dest_folder = creator.bin_dir\n replacements = self.replacements(creator, dest_folder)\n at_path = self._generate(replacements, self.templates(), dest_folder, creator)\n if self.flag_prompt is not None:\n creator.pyenv_cfg[\"prompt\"] = self.flag_prompt\n return at_path\n\n def replacements(self, creator, dest_folder):\n return {\n \"__VIRTUAL_PROMPT__\": \"\" if self.flag_prompt is None else self.flag_prompt,\n \"__VIRTUAL_ENV__\": ensure_text(str(creator.dest)),\n \"__VIRTUAL_NAME__\": creator.env_name,\n \"__BIN_NAME__\": ensure_text(str(creator.bin_dir.relative_to(creator.dest))),\n \"__PATH_SEP__\": ensure_text(os.pathsep),\n }\n\n def _generate(self, replacements, templates, to_folder, creator):\n for template in templates:\n text = self.instantiate_template(replacements, template, creator)\n dest = to_folder / self.as_name(template)\n dest.write_text(text, encoding=\"utf-8\")\n return dest\n\n def as_name(self, template):\n return template.name\n\n def instantiate_template(self, replacements, template, creator):\n # read text and do replacements\n text = read_text(self.__module__, str(template), encoding=\"utf-8\", errors=\"strict\")\n for key, value in replacements.items():\n value = self._repr_unicode(creator, value)\n text = text.replace(key, value)\n return text\n\n @staticmethod\n def _repr_unicode(creator, value):\n # by default we just let it be unicode\n return value\n", "path": "src/virtualenv/activation/via_template.py"}]}
1,799
454
gh_patches_debug_49907
rasdani/github-patches
git_diff
CTFd__CTFd-1101
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- CTFd crash right after install ## Git + python 3.7.2 (3b1b82b9a0fbcb8731d7a3a3bbac99499c466c99) [![asciicast](https://asciinema.org/a/iHDMVNRWSYJDTZUgIy4wIBsOK.svg)](https://asciinema.org/a/iHDMVNRWSYJDTZUgIy4wIBsOK) ## Git + python 2.7.15 (3b1b82b9a0fbcb8731d7a3a3bbac99499c466c99) [![asciicast](https://asciinema.org/a/JRqfe0rMl0QeZAEklyvV1LQX9.svg)](https://asciinema.org/a/JRqfe0rMl0QeZAEklyvV1LQX9) ## 2.1.4 + python 3.7.2 [![asciicast](https://asciinema.org/a/wErebw8ZN2HFER9P71tLW4FBv.svg)](https://asciinema.org/a/wErebw8ZN2HFER9P71tLW4FBv) **Environment**: - CTFd Version/Commit: see titles - Operating System: ArchLinux - Web Browser and Version: Firefox **What happened?** CTFd crash right after install. **What did you expect to happen?** Working or at least a more comprehensible error. **How to reproduce your issue** See the asciinema, but basically - Download CTFd - Install dependencies - run flask - crash PS : with the untouched default config **Any associated stack traces or error logs** See the asciinema --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wsgi.py` Content: ``` 1 from gevent import monkey 2 monkey.patch_all() 3 from CTFd import create_app 4 5 app = create_app() 6 7 if __name__ == '__main__': 8 app.run(debug=True, threaded=True, host="127.0.0.1", port=4000) 9 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wsgi.py b/wsgi.py --- a/wsgi.py +++ b/wsgi.py @@ -1,8 +1,14 @@ -from gevent import monkey -monkey.patch_all() +import os + +# Detect if we're running via `flask run` and don't monkey patch +if not os.getenv("FLASK_RUN_FROM_CLI"): + from gevent import monkey + + monkey.patch_all() + from CTFd import create_app app = create_app() -if __name__ == '__main__': +if __name__ == "__main__": app.run(debug=True, threaded=True, host="127.0.0.1", port=4000)
{"golden_diff": "diff --git a/wsgi.py b/wsgi.py\n--- a/wsgi.py\n+++ b/wsgi.py\n@@ -1,8 +1,14 @@\n-from gevent import monkey\n-monkey.patch_all()\n+import os\n+\n+# Detect if we're running via `flask run` and don't monkey patch\n+if not os.getenv(\"FLASK_RUN_FROM_CLI\"):\n+ from gevent import monkey\n+\n+ monkey.patch_all()\n+\n from CTFd import create_app\n \n app = create_app()\n \n-if __name__ == '__main__':\n+if __name__ == \"__main__\":\n app.run(debug=True, threaded=True, host=\"127.0.0.1\", port=4000)\n", "issue": "CTFd crash right after install\n## Git + python 3.7.2 (3b1b82b9a0fbcb8731d7a3a3bbac99499c466c99)\r\n\r\n[![asciicast](https://asciinema.org/a/iHDMVNRWSYJDTZUgIy4wIBsOK.svg)](https://asciinema.org/a/iHDMVNRWSYJDTZUgIy4wIBsOK)\r\n\r\n## Git + python 2.7.15 (3b1b82b9a0fbcb8731d7a3a3bbac99499c466c99)\r\n\r\n[![asciicast](https://asciinema.org/a/JRqfe0rMl0QeZAEklyvV1LQX9.svg)](https://asciinema.org/a/JRqfe0rMl0QeZAEklyvV1LQX9)\r\n\r\n## 2.1.4 + python 3.7.2\r\n\r\n[![asciicast](https://asciinema.org/a/wErebw8ZN2HFER9P71tLW4FBv.svg)](https://asciinema.org/a/wErebw8ZN2HFER9P71tLW4FBv)\r\n\r\n**Environment**:\r\n\r\n - CTFd Version/Commit: see titles\r\n - Operating System: ArchLinux\r\n - Web Browser and Version: Firefox\r\n\r\n**What happened?**\r\n\r\nCTFd crash right after install.\r\n\r\n**What did you expect to happen?**\r\n\r\nWorking or at least a more comprehensible error.\r\n\r\n**How to reproduce your issue**\r\n\r\nSee the asciinema, but basically\r\n\r\n- Download CTFd\r\n- Install dependencies\r\n- run flask\r\n- crash\r\n\r\nPS : with the untouched default config\r\n\r\n**Any associated stack traces or error logs**\r\n\r\nSee the asciinema\n", "before_files": [{"content": "from gevent import monkey\nmonkey.patch_all()\nfrom CTFd import create_app\n\napp = create_app()\n\nif __name__ == '__main__':\n app.run(debug=True, threaded=True, host=\"127.0.0.1\", port=4000)\n", "path": "wsgi.py"}], "after_files": [{"content": "import os\n\n# Detect if we're running via `flask run` and don't monkey patch\nif not os.getenv(\"FLASK_RUN_FROM_CLI\"):\n from gevent import monkey\n\n monkey.patch_all()\n\nfrom CTFd import create_app\n\napp = create_app()\n\nif __name__ == \"__main__\":\n app.run(debug=True, threaded=True, host=\"127.0.0.1\", port=4000)\n", "path": "wsgi.py"}]}
763
156
gh_patches_debug_31995
rasdani/github-patches
git_diff
nvaccess__nvda-10182
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Screen curtain reveals mouse position Note, this was reported to me by my wife, so it would help to have at least one sighted person who can verify this. ### Steps to reproduce: 1. Enable the screen curtain by assigning a custom gesture to the global command in the vision category 2. Look at the screen ### Actual behavior: The screen is black, but the mouse is still shown. ### Expected behavior: Screen is entirely black ### Proposed fix There is a function in the magnification API that hides the system cursor, so that can be implemented for this. Having said that, I don't consider this very important. ### System configuration #### NVDA installed/portable/running from source: Installed #### NVDA version: alpha-18574 #### Windows version: Windows 10 1903 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `source/visionEnhancementProviders/screenCurtain.py` Content: ``` 1 # A part of NonVisual Desktop Access (NVDA) 2 # This file is covered by the GNU General Public License. 3 # See the file COPYING for more details. 4 # Copyright (C) 2018-2019 NV Access Limited, Babbage B.V., Leonard de Ruijter 5 6 """Screen curtain implementation based on the windows magnification API. 7 This implementation only works on Windows 8 and above. 8 """ 9 10 import vision 11 import winVersion 12 from ctypes import Structure, windll, c_float, POINTER, WINFUNCTYPE, WinError 13 from ctypes.wintypes import BOOL 14 15 16 class MAGCOLOREFFECT(Structure): 17 _fields_ = (("transform", c_float * 5 * 5),) 18 19 20 TRANSFORM_BLACK = MAGCOLOREFFECT() 21 TRANSFORM_BLACK.transform[4][4] = 1.0 22 23 24 def _errCheck(result, func, args): 25 if result == 0: 26 raise WinError() 27 return args 28 29 30 class Magnification: 31 """Static class that wraps necessary functions from the Windows magnification API.""" 32 33 _magnification = windll.Magnification 34 35 _MagInitializeFuncType = WINFUNCTYPE(BOOL) 36 _MagUninitializeFuncType = WINFUNCTYPE(BOOL) 37 _MagSetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT)) 38 _MagSetFullscreenColorEffectArgTypes = ((1, "effect"),) 39 _MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT)) 40 _MagGetFullscreenColorEffectArgTypes = ((2, "effect"),) 41 42 MagInitialize = _MagInitializeFuncType(("MagInitialize", _magnification)) 43 MagInitialize.errcheck = _errCheck 44 MagUninitialize = _MagUninitializeFuncType(("MagUninitialize", _magnification)) 45 MagUninitialize.errcheck = _errCheck 46 try: 47 MagSetFullscreenColorEffect = _MagSetFullscreenColorEffectFuncType( 48 ("MagSetFullscreenColorEffect", _magnification), 49 _MagSetFullscreenColorEffectArgTypes 50 ) 51 MagSetFullscreenColorEffect.errcheck = _errCheck 52 MagGetFullscreenColorEffect = _MagGetFullscreenColorEffectFuncType( 53 ("MagGetFullscreenColorEffect", _magnification), 54 _MagGetFullscreenColorEffectArgTypes 55 ) 56 MagGetFullscreenColorEffect.errcheck = _errCheck 57 except AttributeError: 58 MagSetFullscreenColorEffect = None 59 MagGetFullscreenColorEffect = None 60 61 62 class VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider): 63 name = "screenCurtain" 64 # Translators: Description of a vision enhancement provider that disables output to the screen, 65 # making it black. 66 description = _("Screen Curtain") 67 supportedRoles = frozenset([vision.constants.Role.COLORENHANCER]) 68 69 @classmethod 70 def canStart(cls): 71 return winVersion.isFullScreenMagnificationAvailable() 72 73 def __init__(self): 74 super(VisionEnhancementProvider, self).__init__() 75 Magnification.MagInitialize() 76 Magnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK) 77 78 def terminate(self): 79 super(VisionEnhancementProvider, self).terminate() 80 Magnification.MagUninitialize() 81 82 def registerEventExtensionPoints(self, extensionPoints): 83 # The screen curtain isn't interested in any events 84 pass 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/source/visionEnhancementProviders/screenCurtain.py b/source/visionEnhancementProviders/screenCurtain.py --- a/source/visionEnhancementProviders/screenCurtain.py +++ b/source/visionEnhancementProviders/screenCurtain.py @@ -38,6 +38,8 @@ _MagSetFullscreenColorEffectArgTypes = ((1, "effect"),) _MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT)) _MagGetFullscreenColorEffectArgTypes = ((2, "effect"),) + _MagShowSystemCursorFuncType = WINFUNCTYPE(BOOL, BOOL) + _MagShowSystemCursorArgTypes = ((1, "showCursor"),) MagInitialize = _MagInitializeFuncType(("MagInitialize", _magnification)) MagInitialize.errcheck = _errCheck @@ -57,6 +59,11 @@ except AttributeError: MagSetFullscreenColorEffect = None MagGetFullscreenColorEffect = None + MagShowSystemCursor = _MagShowSystemCursorFuncType( + ("MagShowSystemCursor", _magnification), + _MagShowSystemCursorArgTypes + ) + MagShowSystemCursor.errcheck = _errCheck class VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider): @@ -73,10 +80,12 @@ def __init__(self): super(VisionEnhancementProvider, self).__init__() Magnification.MagInitialize() + Magnification.MagShowSystemCursor(False) Magnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK) def terminate(self): super(VisionEnhancementProvider, self).terminate() + Magnification.MagShowSystemCursor(True) Magnification.MagUninitialize() def registerEventExtensionPoints(self, extensionPoints):
{"golden_diff": "diff --git a/source/visionEnhancementProviders/screenCurtain.py b/source/visionEnhancementProviders/screenCurtain.py\n--- a/source/visionEnhancementProviders/screenCurtain.py\n+++ b/source/visionEnhancementProviders/screenCurtain.py\n@@ -38,6 +38,8 @@\n \t_MagSetFullscreenColorEffectArgTypes = ((1, \"effect\"),)\r\n \t_MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n \t_MagGetFullscreenColorEffectArgTypes = ((2, \"effect\"),)\r\n+\t_MagShowSystemCursorFuncType = WINFUNCTYPE(BOOL, BOOL)\r\n+\t_MagShowSystemCursorArgTypes = ((1, \"showCursor\"),)\r\n \r\n \tMagInitialize = _MagInitializeFuncType((\"MagInitialize\", _magnification))\r\n \tMagInitialize.errcheck = _errCheck\r\n@@ -57,6 +59,11 @@\n \texcept AttributeError:\r\n \t\tMagSetFullscreenColorEffect = None\r\n \t\tMagGetFullscreenColorEffect = None\r\n+\tMagShowSystemCursor = _MagShowSystemCursorFuncType(\r\n+\t\t(\"MagShowSystemCursor\", _magnification),\r\n+\t\t_MagShowSystemCursorArgTypes\r\n+\t)\r\n+\tMagShowSystemCursor.errcheck = _errCheck\r\n \r\n \r\n class VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):\r\n@@ -73,10 +80,12 @@\n \tdef __init__(self):\r\n \t\tsuper(VisionEnhancementProvider, self).__init__()\r\n \t\tMagnification.MagInitialize()\r\n+\t\tMagnification.MagShowSystemCursor(False)\r\n \t\tMagnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK)\r\n \r\n \tdef terminate(self):\r\n \t\tsuper(VisionEnhancementProvider, self).terminate()\r\n+\t\tMagnification.MagShowSystemCursor(True)\r\n \t\tMagnification.MagUninitialize()\r\n \r\n \tdef registerEventExtensionPoints(self, extensionPoints):\n", "issue": "Screen curtain reveals mouse position\nNote, this was reported to me by my wife, so it would help to have at least one sighted person who can verify this.\r\n\r\n### Steps to reproduce:\r\n1. Enable the screen curtain by assigning a custom gesture to the global command in the vision category\r\n2. Look at the screen\r\n\r\n### Actual behavior:\r\nThe screen is black, but the mouse is still shown.\r\n\r\n### Expected behavior:\r\nScreen is entirely black\r\n\r\n### Proposed fix\r\nThere is a function in the magnification API that hides the system cursor, so that can be implemented for this. Having said that, I don't consider this very important.\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled\r\n\r\n#### NVDA version:\r\nalpha-18574\r\n\r\n#### Windows version:\r\nWindows 10 1903\r\n\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n# Copyright (C) 2018-2019 NV Access Limited, Babbage B.V., Leonard de Ruijter\r\n\r\n\"\"\"Screen curtain implementation based on the windows magnification API.\r\nThis implementation only works on Windows 8 and above.\r\n\"\"\"\r\n\r\nimport vision\r\nimport winVersion\r\nfrom ctypes import Structure, windll, c_float, POINTER, WINFUNCTYPE, WinError\r\nfrom ctypes.wintypes import BOOL\r\n\r\n\r\nclass MAGCOLOREFFECT(Structure):\r\n\t_fields_ = ((\"transform\", c_float * 5 * 5),)\r\n\r\n\r\nTRANSFORM_BLACK = MAGCOLOREFFECT()\r\nTRANSFORM_BLACK.transform[4][4] = 1.0\r\n\r\n\r\ndef _errCheck(result, func, args):\r\n\tif result == 0:\r\n\t\traise WinError()\r\n\treturn args\r\n\r\n\r\nclass Magnification:\r\n\t\"\"\"Static class that wraps necessary functions from the Windows magnification API.\"\"\"\r\n\r\n\t_magnification = windll.Magnification\r\n\r\n\t_MagInitializeFuncType = WINFUNCTYPE(BOOL)\r\n\t_MagUninitializeFuncType = WINFUNCTYPE(BOOL)\r\n\t_MagSetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n\t_MagSetFullscreenColorEffectArgTypes = ((1, \"effect\"),)\r\n\t_MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n\t_MagGetFullscreenColorEffectArgTypes = ((2, \"effect\"),)\r\n\r\n\tMagInitialize = _MagInitializeFuncType((\"MagInitialize\", _magnification))\r\n\tMagInitialize.errcheck = _errCheck\r\n\tMagUninitialize = _MagUninitializeFuncType((\"MagUninitialize\", _magnification))\r\n\tMagUninitialize.errcheck = _errCheck\r\n\ttry:\r\n\t\tMagSetFullscreenColorEffect = _MagSetFullscreenColorEffectFuncType(\r\n\t\t\t(\"MagSetFullscreenColorEffect\", _magnification),\r\n\t\t\t_MagSetFullscreenColorEffectArgTypes\r\n\t\t)\r\n\t\tMagSetFullscreenColorEffect.errcheck = _errCheck\r\n\t\tMagGetFullscreenColorEffect = _MagGetFullscreenColorEffectFuncType(\r\n\t\t\t(\"MagGetFullscreenColorEffect\", _magnification),\r\n\t\t\t_MagGetFullscreenColorEffectArgTypes\r\n\t\t)\r\n\t\tMagGetFullscreenColorEffect.errcheck = _errCheck\r\n\texcept AttributeError:\r\n\t\tMagSetFullscreenColorEffect = None\r\n\t\tMagGetFullscreenColorEffect = None\r\n\r\n\r\nclass VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):\r\n\tname = \"screenCurtain\"\r\n\t# Translators: Description of a vision enhancement provider that disables output to the screen,\r\n\t# making it black.\r\n\tdescription = _(\"Screen Curtain\")\r\n\tsupportedRoles = frozenset([vision.constants.Role.COLORENHANCER])\r\n\r\n\t@classmethod\r\n\tdef canStart(cls):\r\n\t\treturn winVersion.isFullScreenMagnificationAvailable()\r\n\r\n\tdef __init__(self):\r\n\t\tsuper(VisionEnhancementProvider, self).__init__()\r\n\t\tMagnification.MagInitialize()\r\n\t\tMagnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK)\r\n\r\n\tdef terminate(self):\r\n\t\tsuper(VisionEnhancementProvider, self).terminate()\r\n\t\tMagnification.MagUninitialize()\r\n\r\n\tdef registerEventExtensionPoints(self, extensionPoints):\r\n\t\t# The screen curtain isn't interested in any events\r\n\t\tpass\r\n", "path": "source/visionEnhancementProviders/screenCurtain.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# This file is covered by the GNU General Public License.\r\n# See the file COPYING for more details.\r\n# Copyright (C) 2018-2019 NV Access Limited, Babbage B.V., Leonard de Ruijter\r\n\r\n\"\"\"Screen curtain implementation based on the windows magnification API.\r\nThis implementation only works on Windows 8 and above.\r\n\"\"\"\r\n\r\nimport vision\r\nimport winVersion\r\nfrom ctypes import Structure, windll, c_float, POINTER, WINFUNCTYPE, WinError\r\nfrom ctypes.wintypes import BOOL\r\n\r\n\r\nclass MAGCOLOREFFECT(Structure):\r\n\t_fields_ = ((\"transform\", c_float * 5 * 5),)\r\n\r\n\r\nTRANSFORM_BLACK = MAGCOLOREFFECT()\r\nTRANSFORM_BLACK.transform[4][4] = 1.0\r\n\r\n\r\ndef _errCheck(result, func, args):\r\n\tif result == 0:\r\n\t\traise WinError()\r\n\treturn args\r\n\r\n\r\nclass Magnification:\r\n\t\"\"\"Static class that wraps necessary functions from the Windows magnification API.\"\"\"\r\n\r\n\t_magnification = windll.Magnification\r\n\r\n\t_MagInitializeFuncType = WINFUNCTYPE(BOOL)\r\n\t_MagUninitializeFuncType = WINFUNCTYPE(BOOL)\r\n\t_MagSetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n\t_MagSetFullscreenColorEffectArgTypes = ((1, \"effect\"),)\r\n\t_MagGetFullscreenColorEffectFuncType = WINFUNCTYPE(BOOL, POINTER(MAGCOLOREFFECT))\r\n\t_MagGetFullscreenColorEffectArgTypes = ((2, \"effect\"),)\r\n\t_MagShowSystemCursorFuncType = WINFUNCTYPE(BOOL, BOOL)\r\n\t_MagShowSystemCursorArgTypes = ((1, \"showCursor\"),)\r\n\r\n\tMagInitialize = _MagInitializeFuncType((\"MagInitialize\", _magnification))\r\n\tMagInitialize.errcheck = _errCheck\r\n\tMagUninitialize = _MagUninitializeFuncType((\"MagUninitialize\", _magnification))\r\n\tMagUninitialize.errcheck = _errCheck\r\n\ttry:\r\n\t\tMagSetFullscreenColorEffect = _MagSetFullscreenColorEffectFuncType(\r\n\t\t\t(\"MagSetFullscreenColorEffect\", _magnification),\r\n\t\t\t_MagSetFullscreenColorEffectArgTypes\r\n\t\t)\r\n\t\tMagSetFullscreenColorEffect.errcheck = _errCheck\r\n\t\tMagGetFullscreenColorEffect = _MagGetFullscreenColorEffectFuncType(\r\n\t\t\t(\"MagGetFullscreenColorEffect\", _magnification),\r\n\t\t\t_MagGetFullscreenColorEffectArgTypes\r\n\t\t)\r\n\t\tMagGetFullscreenColorEffect.errcheck = _errCheck\r\n\texcept AttributeError:\r\n\t\tMagSetFullscreenColorEffect = None\r\n\t\tMagGetFullscreenColorEffect = None\r\n\tMagShowSystemCursor = _MagShowSystemCursorFuncType(\r\n\t\t(\"MagShowSystemCursor\", _magnification),\r\n\t\t_MagShowSystemCursorArgTypes\r\n\t)\r\n\tMagShowSystemCursor.errcheck = _errCheck\r\n\r\n\r\nclass VisionEnhancementProvider(vision.providerBase.VisionEnhancementProvider):\r\n\tname = \"screenCurtain\"\r\n\t# Translators: Description of a vision enhancement provider that disables output to the screen,\r\n\t# making it black.\r\n\tdescription = _(\"Screen Curtain\")\r\n\tsupportedRoles = frozenset([vision.constants.Role.COLORENHANCER])\r\n\r\n\t@classmethod\r\n\tdef canStart(cls):\r\n\t\treturn winVersion.isFullScreenMagnificationAvailable()\r\n\r\n\tdef __init__(self):\r\n\t\tsuper(VisionEnhancementProvider, self).__init__()\r\n\t\tMagnification.MagInitialize()\r\n\t\tMagnification.MagShowSystemCursor(False)\r\n\t\tMagnification.MagSetFullscreenColorEffect(TRANSFORM_BLACK)\r\n\r\n\tdef terminate(self):\r\n\t\tsuper(VisionEnhancementProvider, self).terminate()\r\n\t\tMagnification.MagShowSystemCursor(True)\r\n\t\tMagnification.MagUninitialize()\r\n\r\n\tdef registerEventExtensionPoints(self, extensionPoints):\r\n\t\t# The screen curtain isn't interested in any events\r\n\t\tpass\r\n", "path": "source/visionEnhancementProviders/screenCurtain.py"}]}
1,372
425
gh_patches_debug_12450
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-3265
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add OpenAPI Specification for /queries/ endpoint and fix the incorrect specs This is a part of the API Documentation project #2888 ## Problem To ensure the accuracy of the specs generated by drf-spectacular, we will generate an OpenAPI Specification specifically for a particular endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page. This issue aims to generate spec for /queries/ endpoint. ## Proposed solution * Add /queries/ to the custom preprocessing hook function to filter out all endpoints except for the /queries/ endpoint. * Generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `config/settings/openapi.py` Content: ``` 1 def custom_preprocessing_hook(endpoints): 2 filtered = [] 3 for (path, path_regex, method, callback) in endpoints: 4 # Remove all but DRF API endpoints 5 if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"): 6 filtered.append((path, path_regex, method, callback)) 7 return filtered 8 9 10 def remove_url_prefix_hook(result, **kwargs): 11 # Remove namespace and version URL prefix from the operation Id of the generated API schema 12 for path, path_info in result['paths'].items(): 13 for method, operation in path_info.items(): 14 operation_id = operation.get('operationId') 15 if operation_id: 16 if path.startswith('/api/db/v0/'): 17 operation['operationId'] = operation_id.replace('db_v0_', '') 18 elif path.startswith('/api/ui/v0/'): 19 operation['operationId'] = operation_id.replace('ui_v0_', '') 20 21 return result 22 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/config/settings/openapi.py b/config/settings/openapi.py --- a/config/settings/openapi.py +++ b/config/settings/openapi.py @@ -2,7 +2,7 @@ filtered = [] for (path, path_regex, method, callback) in endpoints: # Remove all but DRF API endpoints - if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/"): + if path.startswith("/api/db/v0/databases/") or path.startswith("/api/db/v0/data_files/") or path.startswith("/api/db/v0/schemas/") or path.startswith("/api/db/v0/tables/") or path.startswith("/api/db/v0/links/") or path.startswith("/api/db/v0/queries/"): filtered.append((path, path_regex, method, callback)) return filtered
{"golden_diff": "diff --git a/config/settings/openapi.py b/config/settings/openapi.py\n--- a/config/settings/openapi.py\n+++ b/config/settings/openapi.py\n@@ -2,7 +2,7 @@\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n- if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n+ if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\") or path.startswith(\"/api/db/v0/links/\") or path.startswith(\"/api/db/v0/queries/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n", "issue": "Add OpenAPI Specification for /queries/ endpoint and fix the incorrect specs\nThis is a part of the API Documentation project #2888 \r\n## Problem\r\nTo ensure the accuracy of the specs generated by drf-spectacular, we will generate an OpenAPI Specification specifically for a particular endpoint. This will allow us to verify its correctness by comparing it with the existing test cases and the DRF browsable API page. This issue aims to generate spec for /queries/ endpoint.\r\n\r\n## Proposed solution\r\n* Add /queries/ to the custom preprocessing hook function to filter out all endpoints except for the /queries/ endpoint. \r\n* Generate the spec file using the _./manage.py spectacular --color --file schema.yml_ command\n", "before_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}], "after_files": [{"content": "def custom_preprocessing_hook(endpoints):\n filtered = []\n for (path, path_regex, method, callback) in endpoints:\n # Remove all but DRF API endpoints\n if path.startswith(\"/api/db/v0/databases/\") or path.startswith(\"/api/db/v0/data_files/\") or path.startswith(\"/api/db/v0/schemas/\") or path.startswith(\"/api/db/v0/tables/\") or path.startswith(\"/api/db/v0/links/\") or path.startswith(\"/api/db/v0/queries/\"):\n filtered.append((path, path_regex, method, callback))\n return filtered\n\n\ndef remove_url_prefix_hook(result, **kwargs):\n # Remove namespace and version URL prefix from the operation Id of the generated API schema\n for path, path_info in result['paths'].items():\n for method, operation in path_info.items():\n operation_id = operation.get('operationId')\n if operation_id:\n if path.startswith('/api/db/v0/'):\n operation['operationId'] = operation_id.replace('db_v0_', '')\n elif path.startswith('/api/ui/v0/'):\n operation['operationId'] = operation_id.replace('ui_v0_', '')\n\n return result\n", "path": "config/settings/openapi.py"}]}
670
204
gh_patches_debug_10650
rasdani/github-patches
git_diff
enthought__chaco-499
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Demo noninteractive.py, Errors after <ENTER> **Problem Description** Errors after <ENTER> **Reproduction Steps:** Run noninteractive.py and hit ENTER **Expected behavior:** ``` Please enter a path in which to place generated plots. Press <ENTER> to generate in the current directory. Path: Traceback (most recent call last): File "noninteractive.py", line 113, in <module> draw_plot(get_directory('noninteractive.png'), size=(800, 600)) File "noninteractive.py", line 90, in get_directory path = input('Path: ').strip() File "<string>", line 0 ^ SyntaxError: unexpected EOF while parsing ``` **OS, Python version:** OSX, Python 2.7 xref: #385 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `examples/demo/noninteractive.py` Content: ``` 1 #!/usr/bin/env python 2 """ 3 This demonstrates how to create a plot offscreen and save it to an image 4 file on disk. 5 """ 6 from __future__ import print_function 7 8 # Standard library imports 9 import os 10 import sys 11 12 # Major library imports 13 from numpy import linspace 14 from scipy.special import jn 15 16 # Enthought library imports 17 from traits.etsconfig.api import ETSConfig 18 19 # Chaco imports 20 from chaco.api import ArrayPlotData, Plot, PlotGraphicsContext 21 from chaco.example_support import COLOR_PALETTE 22 23 DPI = 72.0 24 25 # This is a bit of a hack, to work around the fact that line widths don't scale 26 # with the GraphicsContext's CTM. 27 dpi_scale = DPI / 72.0 28 29 30 def create_plot(): 31 numpoints = 100 32 low = -5 33 high = 15.0 34 x = linspace(low, high, numpoints) 35 pd = ArrayPlotData(index=x) 36 p = Plot(pd, bgcolor="oldlace", padding=50, border_visible=True) 37 for i in range(10): 38 pd.set_data("y" + str(i), jn(i, x)) 39 p.plot(("index", "y" + str(i)), color=tuple(COLOR_PALETTE[i]), 40 width=2.0 * dpi_scale) 41 p.x_grid.visible = True 42 p.x_grid.line_width *= dpi_scale 43 p.y_grid.visible = True 44 p.y_grid.line_width *= dpi_scale 45 p.legend.visible = True 46 return p 47 48 49 def draw_plot(filename, size=(800, 600)): 50 container = create_plot() 51 container.outer_bounds = list(size) 52 container.do_layout(force=True) 53 gc = PlotGraphicsContext(size, dpi=DPI) 54 gc.render_component(container) 55 gc.save(filename) 56 return 57 58 59 def draw_svg(filename, size=(800, 600)): 60 from chaco.svg_graphics_context import SVGGraphicsContext 61 container = create_plot() 62 container.bounds = list(size) 63 container.do_layout(force=True) 64 gc = SVGGraphicsContext(size) 65 gc.render_component(container) 66 gc.save(filename) 67 68 69 def draw_pdf(filename, size=(800, 600)): 70 from chaco.pdf_graphics_context import PdfPlotGraphicsContext 71 container = create_plot() 72 container.outer_bounds = list(size) 73 container.do_layout(force=True) 74 gc = PdfPlotGraphicsContext(filename=filename, 75 dest_box=(0.5, 0.5, 5.0, 5.0)) 76 77 for i in range(2): 78 # draw the plot 79 gc.render_component(container) 80 81 #Start a new page for subsequent draw commands. 82 gc.add_page() 83 84 gc.save() 85 86 87 def get_directory(filename): 88 print('Please enter a path in which to place generated plots.') 89 print('Press <ENTER> to generate in the current directory.') 90 path = input('Path: ').strip() 91 92 if len(path) > 0 and not os.path.exists(path): 93 print('The given path does not exist.') 94 sys.exit() 95 96 if not os.path.isabs(path): 97 print('Creating image: ' + os.path.join(os.getcwd(), path, filename)) 98 99 else: 100 print('Creating image: ' + os.path.join(path, filename)) 101 102 return os.path.join(path, filename) 103 104 105 if __name__ == "__main__": 106 if ETSConfig.kiva_backend == 'svg': 107 # Render the plot as a SVG 108 draw_svg(get_directory('noninteractive.svg'), size=(800, 600)) 109 elif ETSConfig.kiva_backend == 'pdf': 110 # Render the plot as a PDF, requires on ReportLab 111 draw_pdf(get_directory('noninteractive.pdf')) 112 else: 113 draw_plot(get_directory('noninteractive.png'), size=(800, 600)) 114 115 # EOF 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/demo/noninteractive.py b/examples/demo/noninteractive.py --- a/examples/demo/noninteractive.py +++ b/examples/demo/noninteractive.py @@ -87,7 +87,14 @@ def get_directory(filename): print('Please enter a path in which to place generated plots.') print('Press <ENTER> to generate in the current directory.') - path = input('Path: ').strip() + + # If python 2.7, use raw_input to parse empty string correctly + try: + get_input = raw_input + except NameError: + get_input = input + + path = get_input('Path: ').strip() if len(path) > 0 and not os.path.exists(path): print('The given path does not exist.')
{"golden_diff": "diff --git a/examples/demo/noninteractive.py b/examples/demo/noninteractive.py\n--- a/examples/demo/noninteractive.py\n+++ b/examples/demo/noninteractive.py\n@@ -87,7 +87,14 @@\n def get_directory(filename):\n print('Please enter a path in which to place generated plots.')\n print('Press <ENTER> to generate in the current directory.')\n- path = input('Path: ').strip()\n+\n+ # If python 2.7, use raw_input to parse empty string correctly\n+ try:\n+ get_input = raw_input\n+ except NameError:\n+ get_input = input\n+\n+ path = get_input('Path: ').strip()\n \n if len(path) > 0 and not os.path.exists(path):\n print('The given path does not exist.')\n", "issue": "Demo noninteractive.py, Errors after <ENTER>\n**Problem Description**\r\nErrors after <ENTER>\r\n**Reproduction Steps:**\r\n\r\nRun noninteractive.py and hit ENTER\r\n\r\n**Expected behavior:**\r\n```\r\n Please enter a path in which to place generated plots.\r\nPress <ENTER> to generate in the current directory.\r\nPath:\r\nTraceback (most recent call last):\r\n File \"noninteractive.py\", line 113, in <module>\r\n draw_plot(get_directory('noninteractive.png'), size=(800, 600))\r\n File \"noninteractive.py\", line 90, in get_directory\r\n path = input('Path: ').strip()\r\n File \"<string>\", line 0\r\n\r\n ^\r\nSyntaxError: unexpected EOF while parsing\r\n```\r\n**OS, Python version:**\r\nOSX, Python 2.7\r\n\r\nxref: #385 \n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis demonstrates how to create a plot offscreen and save it to an image\nfile on disk.\n\"\"\"\nfrom __future__ import print_function\n\n# Standard library imports\nimport os\nimport sys\n\n# Major library imports\nfrom numpy import linspace\nfrom scipy.special import jn\n\n# Enthought library imports\nfrom traits.etsconfig.api import ETSConfig\n\n# Chaco imports\nfrom chaco.api import ArrayPlotData, Plot, PlotGraphicsContext\nfrom chaco.example_support import COLOR_PALETTE\n\nDPI = 72.0\n\n# This is a bit of a hack, to work around the fact that line widths don't scale\n# with the GraphicsContext's CTM.\ndpi_scale = DPI / 72.0\n\n\ndef create_plot():\n numpoints = 100\n low = -5\n high = 15.0\n x = linspace(low, high, numpoints)\n pd = ArrayPlotData(index=x)\n p = Plot(pd, bgcolor=\"oldlace\", padding=50, border_visible=True)\n for i in range(10):\n pd.set_data(\"y\" + str(i), jn(i, x))\n p.plot((\"index\", \"y\" + str(i)), color=tuple(COLOR_PALETTE[i]),\n width=2.0 * dpi_scale)\n p.x_grid.visible = True\n p.x_grid.line_width *= dpi_scale\n p.y_grid.visible = True\n p.y_grid.line_width *= dpi_scale\n p.legend.visible = True\n return p\n\n\ndef draw_plot(filename, size=(800, 600)):\n container = create_plot()\n container.outer_bounds = list(size)\n container.do_layout(force=True)\n gc = PlotGraphicsContext(size, dpi=DPI)\n gc.render_component(container)\n gc.save(filename)\n return\n\n\ndef draw_svg(filename, size=(800, 600)):\n from chaco.svg_graphics_context import SVGGraphicsContext\n container = create_plot()\n container.bounds = list(size)\n container.do_layout(force=True)\n gc = SVGGraphicsContext(size)\n gc.render_component(container)\n gc.save(filename)\n\n\ndef draw_pdf(filename, size=(800, 600)):\n from chaco.pdf_graphics_context import PdfPlotGraphicsContext\n container = create_plot()\n container.outer_bounds = list(size)\n container.do_layout(force=True)\n gc = PdfPlotGraphicsContext(filename=filename,\n dest_box=(0.5, 0.5, 5.0, 5.0))\n\n for i in range(2):\n # draw the plot\n gc.render_component(container)\n\n #Start a new page for subsequent draw commands.\n gc.add_page()\n\n gc.save()\n\n\ndef get_directory(filename):\n print('Please enter a path in which to place generated plots.')\n print('Press <ENTER> to generate in the current directory.')\n path = input('Path: ').strip()\n\n if len(path) > 0 and not os.path.exists(path):\n print('The given path does not exist.')\n sys.exit()\n\n if not os.path.isabs(path):\n print('Creating image: ' + os.path.join(os.getcwd(), path, filename))\n\n else:\n print('Creating image: ' + os.path.join(path, filename))\n\n return os.path.join(path, filename)\n\n\nif __name__ == \"__main__\":\n if ETSConfig.kiva_backend == 'svg':\n # Render the plot as a SVG\n draw_svg(get_directory('noninteractive.svg'), size=(800, 600))\n elif ETSConfig.kiva_backend == 'pdf':\n # Render the plot as a PDF, requires on ReportLab\n draw_pdf(get_directory('noninteractive.pdf'))\n else:\n draw_plot(get_directory('noninteractive.png'), size=(800, 600))\n\n# EOF\n", "path": "examples/demo/noninteractive.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"\nThis demonstrates how to create a plot offscreen and save it to an image\nfile on disk.\n\"\"\"\nfrom __future__ import print_function\n\n# Standard library imports\nimport os\nimport sys\n\n# Major library imports\nfrom numpy import linspace\nfrom scipy.special import jn\n\n# Enthought library imports\nfrom traits.etsconfig.api import ETSConfig\n\n# Chaco imports\nfrom chaco.api import ArrayPlotData, Plot, PlotGraphicsContext\nfrom chaco.example_support import COLOR_PALETTE\n\nDPI = 72.0\n\n# This is a bit of a hack, to work around the fact that line widths don't scale\n# with the GraphicsContext's CTM.\ndpi_scale = DPI / 72.0\n\n\ndef create_plot():\n numpoints = 100\n low = -5\n high = 15.0\n x = linspace(low, high, numpoints)\n pd = ArrayPlotData(index=x)\n p = Plot(pd, bgcolor=\"oldlace\", padding=50, border_visible=True)\n for i in range(10):\n pd.set_data(\"y\" + str(i), jn(i, x))\n p.plot((\"index\", \"y\" + str(i)), color=tuple(COLOR_PALETTE[i]),\n width=2.0 * dpi_scale)\n p.x_grid.visible = True\n p.x_grid.line_width *= dpi_scale\n p.y_grid.visible = True\n p.y_grid.line_width *= dpi_scale\n p.legend.visible = True\n return p\n\n\ndef draw_plot(filename, size=(800, 600)):\n container = create_plot()\n container.outer_bounds = list(size)\n container.do_layout(force=True)\n gc = PlotGraphicsContext(size, dpi=DPI)\n gc.render_component(container)\n gc.save(filename)\n return\n\n\ndef draw_svg(filename, size=(800, 600)):\n from chaco.svg_graphics_context import SVGGraphicsContext\n container = create_plot()\n container.bounds = list(size)\n container.do_layout(force=True)\n gc = SVGGraphicsContext(size)\n gc.render_component(container)\n gc.save(filename)\n\n\ndef draw_pdf(filename, size=(800, 600)):\n from chaco.pdf_graphics_context import PdfPlotGraphicsContext\n container = create_plot()\n container.outer_bounds = list(size)\n container.do_layout(force=True)\n gc = PdfPlotGraphicsContext(filename=filename,\n dest_box=(0.5, 0.5, 5.0, 5.0))\n\n for i in range(2):\n # draw the plot\n gc.render_component(container)\n\n #Start a new page for subsequent draw commands.\n gc.add_page()\n\n gc.save()\n\n\ndef get_directory(filename):\n print('Please enter a path in which to place generated plots.')\n print('Press <ENTER> to generate in the current directory.')\n\n # If python 2.7, use raw_input to parse empty string correctly\n try:\n get_input = raw_input\n except NameError:\n get_input = input\n\n path = get_input('Path: ').strip()\n\n if len(path) > 0 and not os.path.exists(path):\n print('The given path does not exist.')\n sys.exit()\n\n if not os.path.isabs(path):\n print('Creating image: ' + os.path.join(os.getcwd(), path, filename))\n\n else:\n print('Creating image: ' + os.path.join(path, filename))\n\n return os.path.join(path, filename)\n\n\nif __name__ == \"__main__\":\n if ETSConfig.kiva_backend == 'svg':\n # Render the plot as a SVG\n draw_svg(get_directory('noninteractive.svg'), size=(800, 600))\n elif ETSConfig.kiva_backend == 'pdf':\n # Render the plot as a PDF, requires on ReportLab\n draw_pdf(get_directory('noninteractive.pdf'))\n else:\n draw_plot(get_directory('noninteractive.png'), size=(800, 600))\n\n# EOF\n", "path": "examples/demo/noninteractive.py"}]}
1,535
173
gh_patches_debug_172
rasdani/github-patches
git_diff
ManageIQ__integration_tests-471
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Paginator returns wrong rec_end() result When record is last one on it's own on the last page, rec_end() incorrectly shows 1, instead of rec_total() value. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cfme/web_ui/paginator.py` Content: ``` 1 """A set of functions for dealing with the paginator controls.""" 2 import cfme.fixtures.pytest_selenium as sel 3 import re 4 5 _locator = '(//div[@id="paging_div"] | //div[@id="records_div"])' 6 _next = '//img[@alt="Next"]' 7 _previous = '//img[@alt="Previous"]' 8 _first = '//img[@alt="First"]' 9 _last = '//img[@alt="Last"]' 10 _num_results = '//select[@id="ppsetting" or @id="perpage_setting1"]' 11 _sort_by = '//select[@id="sort_choice"]' 12 _page_cell = '//td//td[contains(., " of ")]' 13 _check_all = '//input[@id="masterToggle"]' 14 15 16 def _page_nums(): 17 return sel.element(_locator + _page_cell).text 18 19 20 def check_all(): 21 """ Returns the Check All locator.""" 22 return sel.element(_locator + _check_all) 23 24 25 def next(): 26 """ Returns the Next button locator.""" 27 btn = sel.element(_locator + _next) 28 return btn 29 30 31 def previous(): 32 """ Returns the Previous button locator.""" 33 btn = sel.element(_locator + _previous) 34 return btn 35 36 37 def first(): 38 """ Returns the First button locator.""" 39 btn = sel.element(_locator + _first) 40 return btn 41 42 43 def last(): 44 """ Returns the Last button locator.""" 45 btn = sel.element(_locator + _last) 46 return btn 47 48 49 def results_per_page(num): 50 """ Changes the number of results on a page. 51 52 Args: 53 num: A string, or a tuple of (type, value). 54 """ 55 select = sel.element(_locator + _num_results) 56 sel.select(select, num) 57 58 59 def sort_by(sort): 60 """ Changes the sort by field. 61 62 Args: 63 num: A string, or a tuple of (type, value). 64 """ 65 select = sel.element(_locator + _sort_by) 66 sel.select(select, sort) 67 68 69 def rec_offset(): 70 """ Returns the first record offset.""" 71 offset = re.search('\((Item|Items)*\s*(\d+)', _page_nums()) 72 return offset.groups()[1] 73 74 75 def rec_end(): 76 """ Returns the record set index.""" 77 offset = re.search('-(\d+)', _page_nums()) 78 if offset: 79 return offset.groups()[0] 80 else: 81 return '1' 82 83 84 def rec_total(): 85 """ Returns the total number of records.""" 86 offset = re.search('(\d+)\)', _page_nums()) 87 return offset.groups()[0] 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cfme/web_ui/paginator.py b/cfme/web_ui/paginator.py --- a/cfme/web_ui/paginator.py +++ b/cfme/web_ui/paginator.py @@ -78,7 +78,7 @@ if offset: return offset.groups()[0] else: - return '1' + return rec_total() def rec_total():
{"golden_diff": "diff --git a/cfme/web_ui/paginator.py b/cfme/web_ui/paginator.py\n--- a/cfme/web_ui/paginator.py\n+++ b/cfme/web_ui/paginator.py\n@@ -78,7 +78,7 @@\n if offset:\n return offset.groups()[0]\n else:\n- return '1'\n+ return rec_total()\n \n \n def rec_total():\n", "issue": "Paginator returns wrong rec_end() result\nWhen record is last one on it's own on the last page, rec_end() incorrectly shows 1, instead of rec_total() value.\n\n", "before_files": [{"content": "\"\"\"A set of functions for dealing with the paginator controls.\"\"\"\nimport cfme.fixtures.pytest_selenium as sel\nimport re\n\n_locator = '(//div[@id=\"paging_div\"] | //div[@id=\"records_div\"])'\n_next = '//img[@alt=\"Next\"]'\n_previous = '//img[@alt=\"Previous\"]'\n_first = '//img[@alt=\"First\"]'\n_last = '//img[@alt=\"Last\"]'\n_num_results = '//select[@id=\"ppsetting\" or @id=\"perpage_setting1\"]'\n_sort_by = '//select[@id=\"sort_choice\"]'\n_page_cell = '//td//td[contains(., \" of \")]'\n_check_all = '//input[@id=\"masterToggle\"]'\n\n\ndef _page_nums():\n return sel.element(_locator + _page_cell).text\n\n\ndef check_all():\n \"\"\" Returns the Check All locator.\"\"\"\n return sel.element(_locator + _check_all)\n\n\ndef next():\n \"\"\" Returns the Next button locator.\"\"\"\n btn = sel.element(_locator + _next)\n return btn\n\n\ndef previous():\n \"\"\" Returns the Previous button locator.\"\"\"\n btn = sel.element(_locator + _previous)\n return btn\n\n\ndef first():\n \"\"\" Returns the First button locator.\"\"\"\n btn = sel.element(_locator + _first)\n return btn\n\n\ndef last():\n \"\"\" Returns the Last button locator.\"\"\"\n btn = sel.element(_locator + _last)\n return btn\n\n\ndef results_per_page(num):\n \"\"\" Changes the number of results on a page.\n\n Args:\n num: A string, or a tuple of (type, value).\n \"\"\"\n select = sel.element(_locator + _num_results)\n sel.select(select, num)\n\n\ndef sort_by(sort):\n \"\"\" Changes the sort by field.\n\n Args:\n num: A string, or a tuple of (type, value).\n \"\"\"\n select = sel.element(_locator + _sort_by)\n sel.select(select, sort)\n\n\ndef rec_offset():\n \"\"\" Returns the first record offset.\"\"\"\n offset = re.search('\\((Item|Items)*\\s*(\\d+)', _page_nums())\n return offset.groups()[1]\n\n\ndef rec_end():\n \"\"\" Returns the record set index.\"\"\"\n offset = re.search('-(\\d+)', _page_nums())\n if offset:\n return offset.groups()[0]\n else:\n return '1'\n\n\ndef rec_total():\n \"\"\" Returns the total number of records.\"\"\"\n offset = re.search('(\\d+)\\)', _page_nums())\n return offset.groups()[0]\n", "path": "cfme/web_ui/paginator.py"}], "after_files": [{"content": "\"\"\"A set of functions for dealing with the paginator controls.\"\"\"\nimport cfme.fixtures.pytest_selenium as sel\nimport re\n\n_locator = '(//div[@id=\"paging_div\"] | //div[@id=\"records_div\"])'\n_next = '//img[@alt=\"Next\"]'\n_previous = '//img[@alt=\"Previous\"]'\n_first = '//img[@alt=\"First\"]'\n_last = '//img[@alt=\"Last\"]'\n_num_results = '//select[@id=\"ppsetting\" or @id=\"perpage_setting1\"]'\n_sort_by = '//select[@id=\"sort_choice\"]'\n_page_cell = '//td//td[contains(., \" of \")]'\n_check_all = '//input[@id=\"masterToggle\"]'\n\n\ndef _page_nums():\n return sel.element(_locator + _page_cell).text\n\n\ndef check_all():\n \"\"\" Returns the Check All locator.\"\"\"\n return sel.element(_locator + _check_all)\n\n\ndef next():\n \"\"\" Returns the Next button locator.\"\"\"\n btn = sel.element(_locator + _next)\n return btn\n\n\ndef previous():\n \"\"\" Returns the Previous button locator.\"\"\"\n btn = sel.element(_locator + _previous)\n return btn\n\n\ndef first():\n \"\"\" Returns the First button locator.\"\"\"\n btn = sel.element(_locator + _first)\n return btn\n\n\ndef last():\n \"\"\" Returns the Last button locator.\"\"\"\n btn = sel.element(_locator + _last)\n return btn\n\n\ndef results_per_page(num):\n \"\"\" Changes the number of results on a page.\n\n Args:\n num: A string, or a tuple of (type, value).\n \"\"\"\n select = sel.element(_locator + _num_results)\n sel.select(select, num)\n\n\ndef sort_by(sort):\n \"\"\" Changes the sort by field.\n\n Args:\n num: A string, or a tuple of (type, value).\n \"\"\"\n select = sel.element(_locator + _sort_by)\n sel.select(select, sort)\n\n\ndef rec_offset():\n \"\"\" Returns the first record offset.\"\"\"\n offset = re.search('\\((Item|Items)*\\s*(\\d+)', _page_nums())\n return offset.groups()[1]\n\n\ndef rec_end():\n \"\"\" Returns the record set index.\"\"\"\n offset = re.search('-(\\d+)', _page_nums())\n if offset:\n return offset.groups()[0]\n else:\n return rec_total()\n\n\ndef rec_total():\n \"\"\" Returns the total number of records.\"\"\"\n offset = re.search('(\\d+)\\)', _page_nums())\n return offset.groups()[0]\n", "path": "cfme/web_ui/paginator.py"}]}
1,021
87
gh_patches_debug_37039
rasdani/github-patches
git_diff
microsoft__nni-1640
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- nnictl package install not working for venv It seems that `nnictl package install` makes the package going to user level, which makes it not accessible from a venv. Warning: ``` Installing collected packages: ConfigSpaceNNI, docutils, pynisher, pyrfr, sphinxcontrib-htmlhelp, sphinxcontrib-qthelp, sphinxcontrib-devhelp, imagesize, Jinja2, sphinxcontrib-applehelp, alabaster, sphinxcontrib-jsmath, Pygments, snowballstemmer, sphinxcontrib-serializinghtml, packaging, sphinx, sphinx-rtd-theme, smac Running setup.py install for ConfigSpaceNNI ... done The script pygmentize is installed in '/home/zhangyuge/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location. Running setup.py install for snowballstemmer ... done The scripts sphinx-apidoc, sphinx-autogen, sphinx-build and sphinx-quickstart are installed in '/home/zhangyuge/.local/bin' which is not on PATH. Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location. Running setup.py install for smac ... done ``` Corresponding code: https://github.com/microsoft/nni/blob/958efabf066b975a5cdcde293c9de33ec1430cb2/tools/nni_cmd/command_utils.py#L49 We could consider removing this `--user`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/nni_cmd/command_utils.py` Content: ``` 1 from subprocess import call, check_output 2 import sys 3 import os 4 import signal 5 import psutil 6 from .common_utils import print_error, print_normal, print_warning 7 8 def check_output_command(file_path, head=None, tail=None): 9 '''call check_output command to read content from a file''' 10 if os.path.exists(file_path): 11 if sys.platform == 'win32': 12 cmds = ['powershell.exe', 'type', file_path] 13 if head: 14 cmds += ['|', 'select', '-first', str(head)] 15 elif tail: 16 cmds += ['|', 'select', '-last', str(tail)] 17 return check_output(cmds, shell=True).decode('utf-8') 18 else: 19 cmds = ['cat', file_path] 20 if head: 21 cmds = ['head', '-' + str(head), file_path] 22 elif tail: 23 cmds = ['tail', '-' + str(tail), file_path] 24 return check_output(cmds, shell=False).decode('utf-8') 25 else: 26 print_error('{0} does not exist!'.format(file_path)) 27 exit(1) 28 29 def kill_command(pid): 30 '''kill command''' 31 if sys.platform == 'win32': 32 process = psutil.Process(pid=pid) 33 process.send_signal(signal.CTRL_BREAK_EVENT) 34 else: 35 cmds = ['kill', str(pid)] 36 call(cmds) 37 38 def install_package_command(package_name): 39 '''install python package from pip''' 40 #TODO refactor python logic 41 if sys.platform == "win32": 42 cmds = 'python -m pip install --user {0}'.format(package_name) 43 else: 44 cmds = 'python3 -m pip install --user {0}'.format(package_name) 45 call(cmds, shell=True) 46 47 def install_requirements_command(requirements_path): 48 '''install requirements.txt''' 49 cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt' 50 #TODO refactor python logic 51 if sys.platform == "win32": 52 cmds = cmds.format('python') 53 else: 54 cmds = cmds.format('python3') 55 call(cmds, shell=True) 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/nni_cmd/command_utils.py b/tools/nni_cmd/command_utils.py --- a/tools/nni_cmd/command_utils.py +++ b/tools/nni_cmd/command_utils.py @@ -3,10 +3,11 @@ import os import signal import psutil -from .common_utils import print_error, print_normal, print_warning +from .common_utils import print_error, print_normal, print_warning + def check_output_command(file_path, head=None, tail=None): - '''call check_output command to read content from a file''' + """call check_output command to read content from a file""" if os.path.exists(file_path): if sys.platform == 'win32': cmds = ['powershell.exe', 'type', file_path] @@ -26,8 +27,9 @@ print_error('{0} does not exist!'.format(file_path)) exit(1) + def kill_command(pid): - '''kill command''' + """kill command""" if sys.platform == 'win32': process = psutil.Process(pid=pid) process.send_signal(signal.CTRL_BREAK_EVENT) @@ -35,21 +37,35 @@ cmds = ['kill', str(pid)] call(cmds) + def install_package_command(package_name): - '''install python package from pip''' - #TODO refactor python logic - if sys.platform == "win32": - cmds = 'python -m pip install --user {0}'.format(package_name) - else: - cmds = 'python3 -m pip install --user {0}'.format(package_name) - call(cmds, shell=True) + """ + Install python package from pip. + + Parameters + ---------- + package_name: str + The name of package to be installed. + """ + call(_get_pip_install() + [package_name], shell=False) + def install_requirements_command(requirements_path): - '''install requirements.txt''' - cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt' - #TODO refactor python logic - if sys.platform == "win32": - cmds = cmds.format('python') - else: - cmds = cmds.format('python3') - call(cmds, shell=True) + """ + Install packages from `requirements.txt` in `requirements_path`. + + Parameters + ---------- + requirements_path: str + Path to the directory that contains `requirements.txt`. + """ + call(_get_pip_install() + ["-r", os.path.join(requirements_path, "requirements.txt")], shell=False) + + +def _get_pip_install(): + python = "python" if sys.platform == "win32" else "python3" + ret = [python, "-m", "pip", "install"] + if "CONDA_DEFAULT_ENV" not in os.environ and "VIRTUAL_ENV" not in os.environ and \ + (sys.platform != "win32" and os.getuid() != 0): # on unix and not running in root + ret.append("--user") # not in virtualenv or conda + return ret
{"golden_diff": "diff --git a/tools/nni_cmd/command_utils.py b/tools/nni_cmd/command_utils.py\n--- a/tools/nni_cmd/command_utils.py\n+++ b/tools/nni_cmd/command_utils.py\n@@ -3,10 +3,11 @@\n import os\n import signal\n import psutil\n-from .common_utils import print_error, print_normal, print_warning\n+from .common_utils import print_error, print_normal, print_warning\n+\n \n def check_output_command(file_path, head=None, tail=None):\n- '''call check_output command to read content from a file'''\n+ \"\"\"call check_output command to read content from a file\"\"\"\n if os.path.exists(file_path):\n if sys.platform == 'win32':\n cmds = ['powershell.exe', 'type', file_path]\n@@ -26,8 +27,9 @@\n print_error('{0} does not exist!'.format(file_path))\n exit(1)\n \n+\n def kill_command(pid):\n- '''kill command'''\n+ \"\"\"kill command\"\"\"\n if sys.platform == 'win32':\n process = psutil.Process(pid=pid)\n process.send_signal(signal.CTRL_BREAK_EVENT)\n@@ -35,21 +37,35 @@\n cmds = ['kill', str(pid)]\n call(cmds)\n \n+\n def install_package_command(package_name):\n- '''install python package from pip'''\n- #TODO refactor python logic\n- if sys.platform == \"win32\":\n- cmds = 'python -m pip install --user {0}'.format(package_name)\n- else:\n- cmds = 'python3 -m pip install --user {0}'.format(package_name)\n- call(cmds, shell=True)\n+ \"\"\"\n+ Install python package from pip.\n+\n+ Parameters\n+ ----------\n+ package_name: str\n+ The name of package to be installed.\n+ \"\"\"\n+ call(_get_pip_install() + [package_name], shell=False)\n+\n \n def install_requirements_command(requirements_path):\n- '''install requirements.txt'''\n- cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt'\n- #TODO refactor python logic\n- if sys.platform == \"win32\":\n- cmds = cmds.format('python')\n- else:\n- cmds = cmds.format('python3')\n- call(cmds, shell=True)\n+ \"\"\"\n+ Install packages from `requirements.txt` in `requirements_path`.\n+\n+ Parameters\n+ ----------\n+ requirements_path: str\n+ Path to the directory that contains `requirements.txt`.\n+ \"\"\"\n+ call(_get_pip_install() + [\"-r\", os.path.join(requirements_path, \"requirements.txt\")], shell=False)\n+\n+\n+def _get_pip_install():\n+ python = \"python\" if sys.platform == \"win32\" else \"python3\"\n+ ret = [python, \"-m\", \"pip\", \"install\"]\n+ if \"CONDA_DEFAULT_ENV\" not in os.environ and \"VIRTUAL_ENV\" not in os.environ and \\\n+ (sys.platform != \"win32\" and os.getuid() != 0): # on unix and not running in root\n+ ret.append(\"--user\") # not in virtualenv or conda\n+ return ret\n", "issue": "nnictl package install not working for venv\nIt seems that `nnictl package install` makes the package going to user level, which makes it not accessible from a venv.\r\n\r\nWarning:\r\n\r\n```\r\nInstalling collected packages: ConfigSpaceNNI, docutils, pynisher, pyrfr, sphinxcontrib-htmlhelp, sphinxcontrib-qthelp, sphinxcontrib-devhelp, imagesize, Jinja2, sphinxcontrib-applehelp, alabaster, sphinxcontrib-jsmath, Pygments, snowballstemmer, sphinxcontrib-serializinghtml, packaging, sphinx, sphinx-rtd-theme, smac\r\n Running setup.py install for ConfigSpaceNNI ... done\r\n The script pygmentize is installed in '/home/zhangyuge/.local/bin' which is not on PATH.\r\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\r\n Running setup.py install for snowballstemmer ... done\r\n The scripts sphinx-apidoc, sphinx-autogen, sphinx-build and sphinx-quickstart are installed in '/home/zhangyuge/.local/bin' which is not on PATH.\r\n Consider adding this directory to PATH or, if you prefer to suppress this warning, use --no-warn-script-location.\r\n Running setup.py install for smac ... done\r\n```\r\n\r\nCorresponding code:\r\n\r\nhttps://github.com/microsoft/nni/blob/958efabf066b975a5cdcde293c9de33ec1430cb2/tools/nni_cmd/command_utils.py#L49\r\n\r\nWe could consider removing this `--user`.\n", "before_files": [{"content": "from subprocess import call, check_output\nimport sys\nimport os\nimport signal\nimport psutil\nfrom .common_utils import print_error, print_normal, print_warning\n\ndef check_output_command(file_path, head=None, tail=None):\n '''call check_output command to read content from a file'''\n if os.path.exists(file_path):\n if sys.platform == 'win32':\n cmds = ['powershell.exe', 'type', file_path]\n if head:\n cmds += ['|', 'select', '-first', str(head)]\n elif tail:\n cmds += ['|', 'select', '-last', str(tail)]\n return check_output(cmds, shell=True).decode('utf-8')\n else:\n cmds = ['cat', file_path]\n if head:\n cmds = ['head', '-' + str(head), file_path]\n elif tail:\n cmds = ['tail', '-' + str(tail), file_path]\n return check_output(cmds, shell=False).decode('utf-8')\n else:\n print_error('{0} does not exist!'.format(file_path))\n exit(1)\n\ndef kill_command(pid):\n '''kill command'''\n if sys.platform == 'win32':\n process = psutil.Process(pid=pid)\n process.send_signal(signal.CTRL_BREAK_EVENT)\n else:\n cmds = ['kill', str(pid)]\n call(cmds)\n\ndef install_package_command(package_name):\n '''install python package from pip'''\n #TODO refactor python logic\n if sys.platform == \"win32\":\n cmds = 'python -m pip install --user {0}'.format(package_name)\n else:\n cmds = 'python3 -m pip install --user {0}'.format(package_name)\n call(cmds, shell=True)\n\ndef install_requirements_command(requirements_path):\n '''install requirements.txt'''\n cmds = 'cd ' + requirements_path + ' && {0} -m pip install --user -r requirements.txt'\n #TODO refactor python logic\n if sys.platform == \"win32\":\n cmds = cmds.format('python')\n else:\n cmds = cmds.format('python3')\n call(cmds, shell=True)\n", "path": "tools/nni_cmd/command_utils.py"}], "after_files": [{"content": "from subprocess import call, check_output\nimport sys\nimport os\nimport signal\nimport psutil\nfrom .common_utils import print_error, print_normal, print_warning\n\n\ndef check_output_command(file_path, head=None, tail=None):\n \"\"\"call check_output command to read content from a file\"\"\"\n if os.path.exists(file_path):\n if sys.platform == 'win32':\n cmds = ['powershell.exe', 'type', file_path]\n if head:\n cmds += ['|', 'select', '-first', str(head)]\n elif tail:\n cmds += ['|', 'select', '-last', str(tail)]\n return check_output(cmds, shell=True).decode('utf-8')\n else:\n cmds = ['cat', file_path]\n if head:\n cmds = ['head', '-' + str(head), file_path]\n elif tail:\n cmds = ['tail', '-' + str(tail), file_path]\n return check_output(cmds, shell=False).decode('utf-8')\n else:\n print_error('{0} does not exist!'.format(file_path))\n exit(1)\n\n\ndef kill_command(pid):\n \"\"\"kill command\"\"\"\n if sys.platform == 'win32':\n process = psutil.Process(pid=pid)\n process.send_signal(signal.CTRL_BREAK_EVENT)\n else:\n cmds = ['kill', str(pid)]\n call(cmds)\n\n\ndef install_package_command(package_name):\n \"\"\"\n Install python package from pip.\n\n Parameters\n ----------\n package_name: str\n The name of package to be installed.\n \"\"\"\n call(_get_pip_install() + [package_name], shell=False)\n\n\ndef install_requirements_command(requirements_path):\n \"\"\"\n Install packages from `requirements.txt` in `requirements_path`.\n\n Parameters\n ----------\n requirements_path: str\n Path to the directory that contains `requirements.txt`.\n \"\"\"\n call(_get_pip_install() + [\"-r\", os.path.join(requirements_path, \"requirements.txt\")], shell=False)\n\n\ndef _get_pip_install():\n python = \"python\" if sys.platform == \"win32\" else \"python3\"\n ret = [python, \"-m\", \"pip\", \"install\"]\n if \"CONDA_DEFAULT_ENV\" not in os.environ and \"VIRTUAL_ENV\" not in os.environ and \\\n (sys.platform != \"win32\" and os.getuid() != 0): # on unix and not running in root\n ret.append(\"--user\") # not in virtualenv or conda\n return ret\n", "path": "tools/nni_cmd/command_utils.py"}]}
1,190
719
gh_patches_debug_10219
rasdani/github-patches
git_diff
docker__docker-py-1736
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- exec_start does not return command output I encountered this problem in my work and managed to write an isolated exposing test case attached to this issue. This program works well on Linux Mint 18.3 and Docker version 17.05.0-ce, build 89658be but fails on Centos 7.3 with Docker version 1.13.1, build b303bf6/1.13.1 I investigated this issue deeper and wrote an equivalent program using docker REST API and requests_unixsocket library. The program + strace shows that the body in the reply for REST endpoint associated to exec_start if interpreted in the wrong way. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/utils/socket.py` Content: ``` 1 import errno 2 import os 3 import select 4 import struct 5 6 import six 7 8 try: 9 from ..transport import NpipeSocket 10 except ImportError: 11 NpipeSocket = type(None) 12 13 14 class SocketError(Exception): 15 pass 16 17 18 def read(socket, n=4096): 19 """ 20 Reads at most n bytes from socket 21 """ 22 23 recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) 24 25 # wait for data to become available 26 if not isinstance(socket, NpipeSocket): 27 select.select([socket], [], []) 28 29 try: 30 if hasattr(socket, 'recv'): 31 return socket.recv(n) 32 return os.read(socket.fileno(), n) 33 except EnvironmentError as e: 34 if e.errno not in recoverable_errors: 35 raise 36 37 38 def read_exactly(socket, n): 39 """ 40 Reads exactly n bytes from socket 41 Raises SocketError if there isn't enough data 42 """ 43 data = six.binary_type() 44 while len(data) < n: 45 next_data = read(socket, n - len(data)) 46 if not next_data: 47 raise SocketError("Unexpected EOF") 48 data += next_data 49 return data 50 51 52 def next_frame_size(socket): 53 """ 54 Returns the size of the next frame of data waiting to be read from socket, 55 according to the protocol defined here: 56 57 https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container 58 """ 59 try: 60 data = read_exactly(socket, 8) 61 except SocketError: 62 return 0 63 64 _, actual = struct.unpack('>BxxxL', data) 65 return actual 66 67 68 def frames_iter(socket): 69 """ 70 Returns a generator of frames read from socket 71 """ 72 while True: 73 n = next_frame_size(socket) 74 if n == 0: 75 break 76 while n > 0: 77 result = read(socket, n) 78 if result is None: 79 continue 80 data_length = len(result) 81 if data_length == 0: 82 # We have reached EOF 83 return 84 n -= data_length 85 yield result 86 87 88 def socket_raw_iter(socket): 89 """ 90 Returns a generator of data read from the socket. 91 This is used for non-multiplexed streams. 92 """ 93 while True: 94 result = read(socket) 95 if len(result) == 0: 96 # We have reached EOF 97 return 98 yield result 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/utils/socket.py b/docker/utils/socket.py --- a/docker/utils/socket.py +++ b/docker/utils/socket.py @@ -59,7 +59,7 @@ try: data = read_exactly(socket, 8) except SocketError: - return 0 + return -1 _, actual = struct.unpack('>BxxxL', data) return actual @@ -71,7 +71,7 @@ """ while True: n = next_frame_size(socket) - if n == 0: + if n < 0: break while n > 0: result = read(socket, n)
{"golden_diff": "diff --git a/docker/utils/socket.py b/docker/utils/socket.py\n--- a/docker/utils/socket.py\n+++ b/docker/utils/socket.py\n@@ -59,7 +59,7 @@\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n- return 0\n+ return -1\n \n _, actual = struct.unpack('>BxxxL', data)\n return actual\n@@ -71,7 +71,7 @@\n \"\"\"\n while True:\n n = next_frame_size(socket)\n- if n == 0:\n+ if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n", "issue": "exec_start does not return command output\nI encountered this problem in my work and managed to write an isolated exposing test case attached to this issue.\r\n\r\nThis program works well on Linux Mint 18.3 and Docker version 17.05.0-ce, build 89658be\r\nbut fails on Centos 7.3 with Docker version 1.13.1, build b303bf6/1.13.1\r\n\r\nI investigated this issue deeper and wrote an equivalent program using docker REST API and requests_unixsocket library. The program + strace shows that the body in the reply for REST endpoint associated to exec_start if interpreted in the wrong way.\n", "before_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return 0\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n while True:\n n = next_frame_size(socket)\n if n == 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield result\n\n\ndef socket_raw_iter(socket):\n \"\"\"\n Returns a generator of data read from the socket.\n This is used for non-multiplexed streams.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n", "path": "docker/utils/socket.py"}], "after_files": [{"content": "import errno\nimport os\nimport select\nimport struct\n\nimport six\n\ntry:\n from ..transport import NpipeSocket\nexcept ImportError:\n NpipeSocket = type(None)\n\n\nclass SocketError(Exception):\n pass\n\n\ndef read(socket, n=4096):\n \"\"\"\n Reads at most n bytes from socket\n \"\"\"\n\n recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)\n\n # wait for data to become available\n if not isinstance(socket, NpipeSocket):\n select.select([socket], [], [])\n\n try:\n if hasattr(socket, 'recv'):\n return socket.recv(n)\n return os.read(socket.fileno(), n)\n except EnvironmentError as e:\n if e.errno not in recoverable_errors:\n raise\n\n\ndef read_exactly(socket, n):\n \"\"\"\n Reads exactly n bytes from socket\n Raises SocketError if there isn't enough data\n \"\"\"\n data = six.binary_type()\n while len(data) < n:\n next_data = read(socket, n - len(data))\n if not next_data:\n raise SocketError(\"Unexpected EOF\")\n data += next_data\n return data\n\n\ndef next_frame_size(socket):\n \"\"\"\n Returns the size of the next frame of data waiting to be read from socket,\n according to the protocol defined here:\n\n https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/attach-to-a-container\n \"\"\"\n try:\n data = read_exactly(socket, 8)\n except SocketError:\n return -1\n\n _, actual = struct.unpack('>BxxxL', data)\n return actual\n\n\ndef frames_iter(socket):\n \"\"\"\n Returns a generator of frames read from socket\n \"\"\"\n while True:\n n = next_frame_size(socket)\n if n < 0:\n break\n while n > 0:\n result = read(socket, n)\n if result is None:\n continue\n data_length = len(result)\n if data_length == 0:\n # We have reached EOF\n return\n n -= data_length\n yield result\n\n\ndef socket_raw_iter(socket):\n \"\"\"\n Returns a generator of data read from the socket.\n This is used for non-multiplexed streams.\n \"\"\"\n while True:\n result = read(socket)\n if len(result) == 0:\n # We have reached EOF\n return\n yield result\n", "path": "docker/utils/socket.py"}]}
1,125
148
gh_patches_debug_36428
rasdani/github-patches
git_diff
googleapis__python-bigquery-258
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Improve guidance for streaming inserts Suggesting we update the insert_rows.py code to showcase how to explicitly define a schema to avoid excessive calls to get_table(). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `samples/table_insert_rows.py` Content: ``` 1 # Copyright 2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 def table_insert_rows(table_id): 17 18 # [START bigquery_table_insert_rows] 19 20 from google.cloud import bigquery 21 22 # Construct a BigQuery client object. 23 client = bigquery.Client() 24 25 # TODO(developer): Set table_id to the ID of the model to fetch. 26 # table_id = "your-project.your_dataset.your_table" 27 28 table = client.get_table(table_id) # Make an API request. 29 rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)] 30 31 errors = client.insert_rows(table, rows_to_insert) # Make an API request. 32 if errors == []: 33 print("New rows have been added.") 34 # [END bigquery_table_insert_rows] 35 ``` Path: `samples/table_insert_rows_explicit_none_insert_ids.py` Content: ``` 1 # Copyright 2019 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 16 def table_insert_rows_explicit_none_insert_ids(table_id): 17 18 # [START bigquery_table_insert_rows_explicit_none_insert_ids] 19 20 from google.cloud import bigquery 21 22 # Construct a BigQuery client object. 23 client = bigquery.Client() 24 25 # TODO(developer): Set table_id to the ID of the model to fetch. 26 # table_id = "your-project.your_dataset.your_table" 27 28 table = client.get_table(table_id) # Make an API request. 29 rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)] 30 31 errors = client.insert_rows( 32 table, rows_to_insert, row_ids=[None] * len(rows_to_insert) 33 ) # Make an API request. 34 if errors == []: 35 print("New rows have been added.") 36 # [END bigquery_table_insert_rows_explicit_none_insert_ids] 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/samples/table_insert_rows.py b/samples/table_insert_rows.py --- a/samples/table_insert_rows.py +++ b/samples/table_insert_rows.py @@ -16,19 +16,22 @@ def table_insert_rows(table_id): # [START bigquery_table_insert_rows] - from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() - # TODO(developer): Set table_id to the ID of the model to fetch. + # TODO(developer): Set table_id to the ID of table to append to. # table_id = "your-project.your_dataset.your_table" - table = client.get_table(table_id) # Make an API request. - rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)] + rows_to_insert = [ + {u"full_name": u"Phred Phlyntstone", u"age": 32}, + {u"full_name": u"Wylma Phlyntstone", u"age": 29}, + ] - errors = client.insert_rows(table, rows_to_insert) # Make an API request. + errors = client.insert_rows_json(table_id, rows_to_insert) # Make an API request. if errors == []: print("New rows have been added.") + else: + print("Encountered errors while inserting rows: {}".format(errors)) # [END bigquery_table_insert_rows] diff --git a/samples/table_insert_rows_explicit_none_insert_ids.py b/samples/table_insert_rows_explicit_none_insert_ids.py --- a/samples/table_insert_rows_explicit_none_insert_ids.py +++ b/samples/table_insert_rows_explicit_none_insert_ids.py @@ -16,21 +16,24 @@ def table_insert_rows_explicit_none_insert_ids(table_id): # [START bigquery_table_insert_rows_explicit_none_insert_ids] - from google.cloud import bigquery # Construct a BigQuery client object. client = bigquery.Client() - # TODO(developer): Set table_id to the ID of the model to fetch. + # TODO(developer): Set table_id to the ID of table to append to. # table_id = "your-project.your_dataset.your_table" - table = client.get_table(table_id) # Make an API request. - rows_to_insert = [(u"Phred Phlyntstone", 32), (u"Wylma Phlyntstone", 29)] + rows_to_insert = [ + {u"full_name": u"Phred Phlyntstone", u"age": 32}, + {u"full_name": u"Wylma Phlyntstone", u"age": 29}, + ] - errors = client.insert_rows( - table, rows_to_insert, row_ids=[None] * len(rows_to_insert) + errors = client.insert_rows_json( + table_id, rows_to_insert, row_ids=[None] * len(rows_to_insert) ) # Make an API request. if errors == []: print("New rows have been added.") + else: + print("Encountered errors while inserting rows: {}".format(errors)) # [END bigquery_table_insert_rows_explicit_none_insert_ids]
{"golden_diff": "diff --git a/samples/table_insert_rows.py b/samples/table_insert_rows.py\n--- a/samples/table_insert_rows.py\n+++ b/samples/table_insert_rows.py\n@@ -16,19 +16,22 @@\n def table_insert_rows(table_id):\n \n # [START bigquery_table_insert_rows]\n-\n from google.cloud import bigquery\n \n # Construct a BigQuery client object.\n client = bigquery.Client()\n \n- # TODO(developer): Set table_id to the ID of the model to fetch.\n+ # TODO(developer): Set table_id to the ID of table to append to.\n # table_id = \"your-project.your_dataset.your_table\"\n \n- table = client.get_table(table_id) # Make an API request.\n- rows_to_insert = [(u\"Phred Phlyntstone\", 32), (u\"Wylma Phlyntstone\", 29)]\n+ rows_to_insert = [\n+ {u\"full_name\": u\"Phred Phlyntstone\", u\"age\": 32},\n+ {u\"full_name\": u\"Wylma Phlyntstone\", u\"age\": 29},\n+ ]\n \n- errors = client.insert_rows(table, rows_to_insert) # Make an API request.\n+ errors = client.insert_rows_json(table_id, rows_to_insert) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n+ else:\n+ print(\"Encountered errors while inserting rows: {}\".format(errors))\n # [END bigquery_table_insert_rows]\ndiff --git a/samples/table_insert_rows_explicit_none_insert_ids.py b/samples/table_insert_rows_explicit_none_insert_ids.py\n--- a/samples/table_insert_rows_explicit_none_insert_ids.py\n+++ b/samples/table_insert_rows_explicit_none_insert_ids.py\n@@ -16,21 +16,24 @@\n def table_insert_rows_explicit_none_insert_ids(table_id):\n \n # [START bigquery_table_insert_rows_explicit_none_insert_ids]\n-\n from google.cloud import bigquery\n \n # Construct a BigQuery client object.\n client = bigquery.Client()\n \n- # TODO(developer): Set table_id to the ID of the model to fetch.\n+ # TODO(developer): Set table_id to the ID of table to append to.\n # table_id = \"your-project.your_dataset.your_table\"\n \n- table = client.get_table(table_id) # Make an API request.\n- rows_to_insert = [(u\"Phred Phlyntstone\", 32), (u\"Wylma Phlyntstone\", 29)]\n+ rows_to_insert = [\n+ {u\"full_name\": u\"Phred Phlyntstone\", u\"age\": 32},\n+ {u\"full_name\": u\"Wylma Phlyntstone\", u\"age\": 29},\n+ ]\n \n- errors = client.insert_rows(\n- table, rows_to_insert, row_ids=[None] * len(rows_to_insert)\n+ errors = client.insert_rows_json(\n+ table_id, rows_to_insert, row_ids=[None] * len(rows_to_insert)\n ) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n+ else:\n+ print(\"Encountered errors while inserting rows: {}\".format(errors))\n # [END bigquery_table_insert_rows_explicit_none_insert_ids]\n", "issue": "Improve guidance for streaming inserts\nSuggesting we update the insert_rows.py code to showcase how to explicitly define a schema to avoid excessive calls to get_table().\n", "before_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef table_insert_rows(table_id):\n\n # [START bigquery_table_insert_rows]\n\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set table_id to the ID of the model to fetch.\n # table_id = \"your-project.your_dataset.your_table\"\n\n table = client.get_table(table_id) # Make an API request.\n rows_to_insert = [(u\"Phred Phlyntstone\", 32), (u\"Wylma Phlyntstone\", 29)]\n\n errors = client.insert_rows(table, rows_to_insert) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n # [END bigquery_table_insert_rows]\n", "path": "samples/table_insert_rows.py"}, {"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef table_insert_rows_explicit_none_insert_ids(table_id):\n\n # [START bigquery_table_insert_rows_explicit_none_insert_ids]\n\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set table_id to the ID of the model to fetch.\n # table_id = \"your-project.your_dataset.your_table\"\n\n table = client.get_table(table_id) # Make an API request.\n rows_to_insert = [(u\"Phred Phlyntstone\", 32), (u\"Wylma Phlyntstone\", 29)]\n\n errors = client.insert_rows(\n table, rows_to_insert, row_ids=[None] * len(rows_to_insert)\n ) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n # [END bigquery_table_insert_rows_explicit_none_insert_ids]\n", "path": "samples/table_insert_rows_explicit_none_insert_ids.py"}], "after_files": [{"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef table_insert_rows(table_id):\n\n # [START bigquery_table_insert_rows]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set table_id to the ID of table to append to.\n # table_id = \"your-project.your_dataset.your_table\"\n\n rows_to_insert = [\n {u\"full_name\": u\"Phred Phlyntstone\", u\"age\": 32},\n {u\"full_name\": u\"Wylma Phlyntstone\", u\"age\": 29},\n ]\n\n errors = client.insert_rows_json(table_id, rows_to_insert) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n else:\n print(\"Encountered errors while inserting rows: {}\".format(errors))\n # [END bigquery_table_insert_rows]\n", "path": "samples/table_insert_rows.py"}, {"content": "# Copyright 2019 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\ndef table_insert_rows_explicit_none_insert_ids(table_id):\n\n # [START bigquery_table_insert_rows_explicit_none_insert_ids]\n from google.cloud import bigquery\n\n # Construct a BigQuery client object.\n client = bigquery.Client()\n\n # TODO(developer): Set table_id to the ID of table to append to.\n # table_id = \"your-project.your_dataset.your_table\"\n\n rows_to_insert = [\n {u\"full_name\": u\"Phred Phlyntstone\", u\"age\": 32},\n {u\"full_name\": u\"Wylma Phlyntstone\", u\"age\": 29},\n ]\n\n errors = client.insert_rows_json(\n table_id, rows_to_insert, row_ids=[None] * len(rows_to_insert)\n ) # Make an API request.\n if errors == []:\n print(\"New rows have been added.\")\n else:\n print(\"Encountered errors while inserting rows: {}\".format(errors))\n # [END bigquery_table_insert_rows_explicit_none_insert_ids]\n", "path": "samples/table_insert_rows_explicit_none_insert_ids.py"}]}
1,078
750
gh_patches_debug_1717
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-2016
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update OTLP HTTP port As per spec change here: https://github.com/open-telemetry/opentelemetry-specification/pull/1839 The OTLP HTTP port should be 4318. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import gzip 16 import logging 17 import zlib 18 from io import BytesIO 19 from os import environ 20 from typing import Dict, Optional 21 from time import sleep 22 23 import requests 24 from backoff import expo 25 26 from opentelemetry.sdk.environment_variables import ( 27 OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, 28 OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, 29 OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, 30 OTEL_EXPORTER_OTLP_TRACES_HEADERS, 31 OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, 32 OTEL_EXPORTER_OTLP_CERTIFICATE, 33 OTEL_EXPORTER_OTLP_COMPRESSION, 34 OTEL_EXPORTER_OTLP_ENDPOINT, 35 OTEL_EXPORTER_OTLP_HEADERS, 36 OTEL_EXPORTER_OTLP_TIMEOUT, 37 ) 38 from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult 39 from opentelemetry.exporter.otlp.proto.http import Compression 40 from opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import ( 41 _ProtobufEncoder, 42 ) 43 from opentelemetry.util.re import parse_headers 44 45 46 _logger = logging.getLogger(__name__) 47 48 49 DEFAULT_COMPRESSION = Compression.NoCompression 50 DEFAULT_ENDPOINT = "http://localhost:55681/v1/traces" 51 DEFAULT_TIMEOUT = 10 # in seconds 52 53 54 class OTLPSpanExporter(SpanExporter): 55 56 _MAX_RETRY_TIMEOUT = 64 57 58 def __init__( 59 self, 60 endpoint: Optional[str] = None, 61 certificate_file: Optional[str] = None, 62 headers: Optional[Dict[str, str]] = None, 63 timeout: Optional[int] = None, 64 compression: Optional[Compression] = None, 65 ): 66 self._endpoint = endpoint or environ.get( 67 OTEL_EXPORTER_OTLP_TRACES_ENDPOINT, 68 environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT), 69 ) 70 self._certificate_file = certificate_file or environ.get( 71 OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE, 72 environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True), 73 ) 74 headers_string = environ.get( 75 OTEL_EXPORTER_OTLP_TRACES_HEADERS, 76 environ.get(OTEL_EXPORTER_OTLP_HEADERS, ""), 77 ) 78 self._headers = headers or parse_headers(headers_string) 79 self._timeout = timeout or int( 80 environ.get( 81 OTEL_EXPORTER_OTLP_TRACES_TIMEOUT, 82 environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT), 83 ) 84 ) 85 self._compression = compression or _compression_from_env() 86 self._session = requests.Session() 87 self._session.headers.update(self._headers) 88 self._session.headers.update( 89 {"Content-Type": _ProtobufEncoder._CONTENT_TYPE} 90 ) 91 if self._compression is not Compression.NoCompression: 92 self._session.headers.update( 93 {"Content-Encoding": self._compression.value} 94 ) 95 self._shutdown = False 96 97 def _export(self, serialized_data: str): 98 data = serialized_data 99 if self._compression == Compression.Gzip: 100 gzip_data = BytesIO() 101 with gzip.GzipFile(fileobj=gzip_data, mode="w") as gzip_stream: 102 gzip_stream.write(serialized_data) 103 data = gzip_data.getvalue() 104 elif self._compression == Compression.Deflate: 105 data = zlib.compress(bytes(serialized_data)) 106 107 return self._session.post( 108 url=self._endpoint, 109 data=data, 110 verify=self._certificate_file, 111 timeout=self._timeout, 112 ) 113 114 @staticmethod 115 def _retryable(resp: requests.Response) -> bool: 116 if resp.status_code == 408: 117 return True 118 if resp.status_code >= 500 and resp.status_code <= 599: 119 return True 120 return False 121 122 def export(self, spans) -> SpanExportResult: 123 # After the call to Shutdown subsequent calls to Export are 124 # not allowed and should return a Failure result. 125 if self._shutdown: 126 _logger.warning("Exporter already shutdown, ignoring batch") 127 return SpanExportResult.FAILURE 128 129 serialized_data = _ProtobufEncoder.serialize(spans) 130 131 for delay in expo(max_value=self._MAX_RETRY_TIMEOUT): 132 133 if delay == self._MAX_RETRY_TIMEOUT: 134 return SpanExportResult.FAILURE 135 136 resp = self._export(serialized_data) 137 # pylint: disable=no-else-return 138 if resp.status_code in (200, 202): 139 return SpanExportResult.SUCCESS 140 elif self._retryable(resp): 141 _logger.debug( 142 "Waiting %ss before retrying export of span", delay 143 ) 144 sleep(delay) 145 continue 146 else: 147 _logger.warning( 148 "Failed to export batch code: %s, reason: %s", 149 resp.status_code, 150 resp.text, 151 ) 152 return SpanExportResult.FAILURE 153 return SpanExportResult.FAILURE 154 155 def shutdown(self): 156 if self._shutdown: 157 _logger.warning("Exporter already shutdown, ignoring call") 158 return 159 self._session.close() 160 self._shutdown = True 161 162 163 def _compression_from_env() -> Compression: 164 compression = ( 165 environ.get( 166 OTEL_EXPORTER_OTLP_TRACES_COMPRESSION, 167 environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, "none"), 168 ) 169 .lower() 170 .strip() 171 ) 172 return Compression(compression) 173 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py --- a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py @@ -47,7 +47,7 @@ DEFAULT_COMPRESSION = Compression.NoCompression -DEFAULT_ENDPOINT = "http://localhost:55681/v1/traces" +DEFAULT_ENDPOINT = "http://localhost:4318/v1/traces" DEFAULT_TIMEOUT = 10 # in seconds
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py\n@@ -47,7 +47,7 @@\n \n \n DEFAULT_COMPRESSION = Compression.NoCompression\n-DEFAULT_ENDPOINT = \"http://localhost:55681/v1/traces\"\n+DEFAULT_ENDPOINT = \"http://localhost:4318/v1/traces\"\n DEFAULT_TIMEOUT = 10 # in seconds\n", "issue": "Update OTLP HTTP port\nAs per spec change here:\r\n\r\nhttps://github.com/open-telemetry/opentelemetry-specification/pull/1839\r\n\r\nThe OTLP HTTP port should be 4318.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gzip\nimport logging\nimport zlib\nfrom io import BytesIO\nfrom os import environ\nfrom typing import Dict, Optional\nfrom time import sleep\n\nimport requests\nfrom backoff import expo\n\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n OTEL_EXPORTER_OTLP_CERTIFICATE,\n OTEL_EXPORTER_OTLP_COMPRESSION,\n OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_EXPORTER_OTLP_HEADERS,\n OTEL_EXPORTER_OTLP_TIMEOUT,\n)\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.exporter.otlp.proto.http import Compression\nfrom opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (\n _ProtobufEncoder,\n)\nfrom opentelemetry.util.re import parse_headers\n\n\n_logger = logging.getLogger(__name__)\n\n\nDEFAULT_COMPRESSION = Compression.NoCompression\nDEFAULT_ENDPOINT = \"http://localhost:55681/v1/traces\"\nDEFAULT_TIMEOUT = 10 # in seconds\n\n\nclass OTLPSpanExporter(SpanExporter):\n\n _MAX_RETRY_TIMEOUT = 64\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n certificate_file: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n self._endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),\n )\n self._certificate_file = certificate_file or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True),\n )\n headers_string = environ.get(\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n environ.get(OTEL_EXPORTER_OTLP_HEADERS, \"\"),\n )\n self._headers = headers or parse_headers(headers_string)\n self._timeout = timeout or int(\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT),\n )\n )\n self._compression = compression or _compression_from_env()\n self._session = requests.Session()\n self._session.headers.update(self._headers)\n self._session.headers.update(\n {\"Content-Type\": _ProtobufEncoder._CONTENT_TYPE}\n )\n if self._compression is not Compression.NoCompression:\n self._session.headers.update(\n {\"Content-Encoding\": self._compression.value}\n )\n self._shutdown = False\n\n def _export(self, serialized_data: str):\n data = serialized_data\n if self._compression == Compression.Gzip:\n gzip_data = BytesIO()\n with gzip.GzipFile(fileobj=gzip_data, mode=\"w\") as gzip_stream:\n gzip_stream.write(serialized_data)\n data = gzip_data.getvalue()\n elif self._compression == Compression.Deflate:\n data = zlib.compress(bytes(serialized_data))\n\n return self._session.post(\n url=self._endpoint,\n data=data,\n verify=self._certificate_file,\n timeout=self._timeout,\n )\n\n @staticmethod\n def _retryable(resp: requests.Response) -> bool:\n if resp.status_code == 408:\n return True\n if resp.status_code >= 500 and resp.status_code <= 599:\n return True\n return False\n\n def export(self, spans) -> SpanExportResult:\n # After the call to Shutdown subsequent calls to Export are\n # not allowed and should return a Failure result.\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring batch\")\n return SpanExportResult.FAILURE\n\n serialized_data = _ProtobufEncoder.serialize(spans)\n\n for delay in expo(max_value=self._MAX_RETRY_TIMEOUT):\n\n if delay == self._MAX_RETRY_TIMEOUT:\n return SpanExportResult.FAILURE\n\n resp = self._export(serialized_data)\n # pylint: disable=no-else-return\n if resp.status_code in (200, 202):\n return SpanExportResult.SUCCESS\n elif self._retryable(resp):\n _logger.debug(\n \"Waiting %ss before retrying export of span\", delay\n )\n sleep(delay)\n continue\n else:\n _logger.warning(\n \"Failed to export batch code: %s, reason: %s\",\n resp.status_code,\n resp.text,\n )\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n\n def shutdown(self):\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring call\")\n return\n self._session.close()\n self._shutdown = True\n\n\ndef _compression_from_env() -> Compression:\n compression = (\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, \"none\"),\n )\n .lower()\n .strip()\n )\n return Compression(compression)\n", "path": "exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport gzip\nimport logging\nimport zlib\nfrom io import BytesIO\nfrom os import environ\nfrom typing import Dict, Optional\nfrom time import sleep\n\nimport requests\nfrom backoff import expo\n\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n OTEL_EXPORTER_OTLP_CERTIFICATE,\n OTEL_EXPORTER_OTLP_COMPRESSION,\n OTEL_EXPORTER_OTLP_ENDPOINT,\n OTEL_EXPORTER_OTLP_HEADERS,\n OTEL_EXPORTER_OTLP_TIMEOUT,\n)\nfrom opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult\nfrom opentelemetry.exporter.otlp.proto.http import Compression\nfrom opentelemetry.exporter.otlp.proto.http.trace_exporter.encoder import (\n _ProtobufEncoder,\n)\nfrom opentelemetry.util.re import parse_headers\n\n\n_logger = logging.getLogger(__name__)\n\n\nDEFAULT_COMPRESSION = Compression.NoCompression\nDEFAULT_ENDPOINT = \"http://localhost:4318/v1/traces\"\nDEFAULT_TIMEOUT = 10 # in seconds\n\n\nclass OTLPSpanExporter(SpanExporter):\n\n _MAX_RETRY_TIMEOUT = 64\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n certificate_file: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n self._endpoint = endpoint or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_ENDPOINT,\n environ.get(OTEL_EXPORTER_OTLP_ENDPOINT, DEFAULT_ENDPOINT),\n )\n self._certificate_file = certificate_file or environ.get(\n OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE,\n environ.get(OTEL_EXPORTER_OTLP_CERTIFICATE, True),\n )\n headers_string = environ.get(\n OTEL_EXPORTER_OTLP_TRACES_HEADERS,\n environ.get(OTEL_EXPORTER_OTLP_HEADERS, \"\"),\n )\n self._headers = headers or parse_headers(headers_string)\n self._timeout = timeout or int(\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_TIMEOUT,\n environ.get(OTEL_EXPORTER_OTLP_TIMEOUT, DEFAULT_TIMEOUT),\n )\n )\n self._compression = compression or _compression_from_env()\n self._session = requests.Session()\n self._session.headers.update(self._headers)\n self._session.headers.update(\n {\"Content-Type\": _ProtobufEncoder._CONTENT_TYPE}\n )\n if self._compression is not Compression.NoCompression:\n self._session.headers.update(\n {\"Content-Encoding\": self._compression.value}\n )\n self._shutdown = False\n\n def _export(self, serialized_data: str):\n data = serialized_data\n if self._compression == Compression.Gzip:\n gzip_data = BytesIO()\n with gzip.GzipFile(fileobj=gzip_data, mode=\"w\") as gzip_stream:\n gzip_stream.write(serialized_data)\n data = gzip_data.getvalue()\n elif self._compression == Compression.Deflate:\n data = zlib.compress(bytes(serialized_data))\n\n return self._session.post(\n url=self._endpoint,\n data=data,\n verify=self._certificate_file,\n timeout=self._timeout,\n )\n\n @staticmethod\n def _retryable(resp: requests.Response) -> bool:\n if resp.status_code == 408:\n return True\n if resp.status_code >= 500 and resp.status_code <= 599:\n return True\n return False\n\n def export(self, spans) -> SpanExportResult:\n # After the call to Shutdown subsequent calls to Export are\n # not allowed and should return a Failure result.\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring batch\")\n return SpanExportResult.FAILURE\n\n serialized_data = _ProtobufEncoder.serialize(spans)\n\n for delay in expo(max_value=self._MAX_RETRY_TIMEOUT):\n\n if delay == self._MAX_RETRY_TIMEOUT:\n return SpanExportResult.FAILURE\n\n resp = self._export(serialized_data)\n # pylint: disable=no-else-return\n if resp.status_code in (200, 202):\n return SpanExportResult.SUCCESS\n elif self._retryable(resp):\n _logger.debug(\n \"Waiting %ss before retrying export of span\", delay\n )\n sleep(delay)\n continue\n else:\n _logger.warning(\n \"Failed to export batch code: %s, reason: %s\",\n resp.status_code,\n resp.text,\n )\n return SpanExportResult.FAILURE\n return SpanExportResult.FAILURE\n\n def shutdown(self):\n if self._shutdown:\n _logger.warning(\"Exporter already shutdown, ignoring call\")\n return\n self._session.close()\n self._shutdown = True\n\n\ndef _compression_from_env() -> Compression:\n compression = (\n environ.get(\n OTEL_EXPORTER_OTLP_TRACES_COMPRESSION,\n environ.get(OTEL_EXPORTER_OTLP_COMPRESSION, \"none\"),\n )\n .lower()\n .strip()\n )\n return Compression(compression)\n", "path": "exporter/opentelemetry-exporter-otlp-proto-http/src/opentelemetry/exporter/otlp/proto/http/trace_exporter/__init__.py"}]}
2,008
209
gh_patches_debug_31931
rasdani/github-patches
git_diff
Azure__azure-cli-extensions-1181
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- app-insights az cli extension seems not working properly in AzureChinaCloud (China East 2) - If the issue is to do with Azure CLI 2.0 in-particular, create an issue here at [Azure/azure-cli](https://github.com/Azure/azure-cli/issues) ### Extension name (the extension in question) application-insights ### Description of issue (in as much detail as possible) I have properly logged in with a certain subscription in AzureChinaCloud (China East2) ``` az account show { "environmentName": "AzureChinaCloud", "id": "5b5d5f37-8aca-49f8-8682-7ed6d2f00424", "isDefault": true, "name": "Project Vienna PROD - ChinaEast2", "state": "Enabled", "tenantId": "a55a4d5b-9241-49b1-b4ff-befa8db00269", "user": { "name": "[email protected]", "type": "user" } } ``` However run any az monitor app-insights command always failed with error "The subscription '5b5d5f37-8aca-49f8-8682-7ed6d2f00424' could not be found" for example, api-key show ``` az monitor app-insights api-key show --app model-monitoring-chinaeast2 --resource-group model-mgmt-chinaeast2 --api-key MMSModelMonitoringApiKey The subscription '5b5d5f37-8aca-49f8-8682-7ed6d2f00424' could not be found. ``` I doubted application-insights is not working in AzureChinaCloud, any insight of it? Appreciate --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/application-insights/setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # -------------------------------------------------------------------------------------------- 4 # Copyright (c) Microsoft Corporation. All rights reserved. 5 # Licensed under the MIT License. See License.txt in the project root for license information. 6 # -------------------------------------------------------------------------------------------- 7 8 from codecs import open 9 from setuptools import setup, find_packages 10 11 VERSION = "0.1.1" 12 13 CLASSIFIERS = [ 14 'Development Status :: 4 - Beta', 15 'Intended Audience :: Developers', 16 'Intended Audience :: System Administrators', 17 'Programming Language :: Python', 18 'Programming Language :: Python :: 2', 19 'Programming Language :: Python :: 2.7', 20 'Programming Language :: Python :: 3', 21 'Programming Language :: Python :: 3.4', 22 'Programming Language :: Python :: 3.5', 23 'Programming Language :: Python :: 3.6', 24 'License :: OSI Approved :: MIT License', 25 ] 26 27 DEPENDENCIES = [] 28 29 with open('README.rst', 'r', encoding='utf-8') as f: 30 README = f.read() 31 with open('HISTORY.rst', 'r', encoding='utf-8') as f: 32 HISTORY = f.read() 33 34 setup( 35 name='application-insights', 36 version=VERSION, 37 description='Support for managing Application Insights components and querying metrics, events, and logs from such components.', 38 long_description=README + '\n\n' + HISTORY, 39 license='MIT', 40 author='Ace Eldeib', 41 author_email='[email protected]', 42 url='https://github.com/Azure/azure-cli-extensions/tree/master/src/application-insights', 43 classifiers=CLASSIFIERS, 44 packages=find_packages(exclude=["tests"]), 45 package_data={'azext_applicationinsights': ['azext_metadata.json']}, 46 install_requires=DEPENDENCIES 47 ) 48 ``` Path: `src/application-insights/azext_applicationinsights/_client_factory.py` Content: ``` 1 # -------------------------------------------------------------------------------------------- 2 # Copyright (c) Microsoft Corporation. All rights reserved. 3 # Licensed under the MIT License. See License.txt in the project root for license information. 4 # -------------------------------------------------------------------------------------------- 5 6 7 def applicationinsights_data_plane_client(cli_ctx, _, subscription=None): 8 """Initialize Log Analytics data client for use with CLI.""" 9 from .vendored_sdks.applicationinsights import ApplicationInsightsDataClient 10 from azure.cli.core._profile import Profile 11 profile = Profile(cli_ctx=cli_ctx) 12 cred, _, _ = profile.get_login_credentials( 13 resource="https://api.applicationinsights.io", 14 subscription_id=subscription 15 ) 16 return ApplicationInsightsDataClient(cred) 17 18 19 def applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None): 20 """Initialize Log Analytics mgmt client for use with CLI.""" 21 from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient 22 from azure.cli.core._profile import Profile 23 profile = Profile(cli_ctx=cli_ctx) 24 # Use subscription from resource_id where possible, otherwise use login. 25 if subscription: 26 cred, _, _ = profile.get_login_credentials(subscription_id=subscription) 27 return ApplicationInsightsManagementClient( 28 cred, 29 subscription 30 ) 31 cred, sub_id, _ = profile.get_login_credentials() 32 return ApplicationInsightsManagementClient( 33 cred, 34 sub_id 35 ) 36 37 38 def cf_query(cli_ctx, _, subscription=None): 39 return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).query 40 41 42 def cf_metrics(cli_ctx, _, subscription=None): 43 return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).metrics 44 45 46 def cf_events(cli_ctx, _, subscription=None): 47 return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).events 48 49 50 def cf_components(cli_ctx, _, subscription=None): 51 return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).components 52 53 54 def cf_api_key(cli_ctx, _, subscription=None): 55 return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).api_keys 56 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/application-insights/azext_applicationinsights/_client_factory.py b/src/application-insights/azext_applicationinsights/_client_factory.py --- a/src/application-insights/azext_applicationinsights/_client_factory.py +++ b/src/application-insights/azext_applicationinsights/_client_factory.py @@ -10,7 +10,7 @@ from azure.cli.core._profile import Profile profile = Profile(cli_ctx=cli_ctx) cred, _, _ = profile.get_login_credentials( - resource="https://api.applicationinsights.io", + resource=cli_ctx.cloud.endpoints.app_insights_resource_id, subscription_id=subscription ) return ApplicationInsightsDataClient(cred) @@ -19,20 +19,8 @@ def applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None): """Initialize Log Analytics mgmt client for use with CLI.""" from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient - from azure.cli.core._profile import Profile - profile = Profile(cli_ctx=cli_ctx) - # Use subscription from resource_id where possible, otherwise use login. - if subscription: - cred, _, _ = profile.get_login_credentials(subscription_id=subscription) - return ApplicationInsightsManagementClient( - cred, - subscription - ) - cred, sub_id, _ = profile.get_login_credentials() - return ApplicationInsightsManagementClient( - cred, - sub_id - ) + from azure.cli.core.commands.client_factory import get_mgmt_service_client + return get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient, subscription_id=subscription) def cf_query(cli_ctx, _, subscription=None): diff --git a/src/application-insights/setup.py b/src/application-insights/setup.py --- a/src/application-insights/setup.py +++ b/src/application-insights/setup.py @@ -8,7 +8,7 @@ from codecs import open from setuptools import setup, find_packages -VERSION = "0.1.1" +VERSION = "0.1.2" CLASSIFIERS = [ 'Development Status :: 4 - Beta',
{"golden_diff": "diff --git a/src/application-insights/azext_applicationinsights/_client_factory.py b/src/application-insights/azext_applicationinsights/_client_factory.py\n--- a/src/application-insights/azext_applicationinsights/_client_factory.py\n+++ b/src/application-insights/azext_applicationinsights/_client_factory.py\n@@ -10,7 +10,7 @@\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n- resource=\"https://api.applicationinsights.io\",\n+ resource=cli_ctx.cloud.endpoints.app_insights_resource_id,\n subscription_id=subscription\n )\n return ApplicationInsightsDataClient(cred)\n@@ -19,20 +19,8 @@\n def applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics mgmt client for use with CLI.\"\"\"\n from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient\n- from azure.cli.core._profile import Profile\n- profile = Profile(cli_ctx=cli_ctx)\n- # Use subscription from resource_id where possible, otherwise use login.\n- if subscription:\n- cred, _, _ = profile.get_login_credentials(subscription_id=subscription)\n- return ApplicationInsightsManagementClient(\n- cred,\n- subscription\n- )\n- cred, sub_id, _ = profile.get_login_credentials()\n- return ApplicationInsightsManagementClient(\n- cred,\n- sub_id\n- )\n+ from azure.cli.core.commands.client_factory import get_mgmt_service_client\n+ return get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient, subscription_id=subscription)\n \n \n def cf_query(cli_ctx, _, subscription=None):\ndiff --git a/src/application-insights/setup.py b/src/application-insights/setup.py\n--- a/src/application-insights/setup.py\n+++ b/src/application-insights/setup.py\n@@ -8,7 +8,7 @@\n from codecs import open\n from setuptools import setup, find_packages\n \n-VERSION = \"0.1.1\"\n+VERSION = \"0.1.2\"\n \n CLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n", "issue": "app-insights az cli extension seems not working properly in AzureChinaCloud (China East 2)\n- If the issue is to do with Azure CLI 2.0 in-particular, create an issue here at [Azure/azure-cli](https://github.com/Azure/azure-cli/issues)\r\n\r\n### Extension name (the extension in question)\r\napplication-insights\r\n\r\n### Description of issue (in as much detail as possible)\r\nI have properly logged in with a certain subscription in AzureChinaCloud (China East2)\r\n```\r\naz account show\r\n{\r\n \"environmentName\": \"AzureChinaCloud\",\r\n \"id\": \"5b5d5f37-8aca-49f8-8682-7ed6d2f00424\",\r\n \"isDefault\": true,\r\n \"name\": \"Project Vienna PROD - ChinaEast2\",\r\n \"state\": \"Enabled\",\r\n \"tenantId\": \"a55a4d5b-9241-49b1-b4ff-befa8db00269\",\r\n \"user\": {\r\n \"name\": \"[email protected]\",\r\n \"type\": \"user\"\r\n }\r\n}\r\n```\r\nHowever run any az monitor app-insights command always failed with error \"The subscription '5b5d5f37-8aca-49f8-8682-7ed6d2f00424' could not be found\" \r\n\r\nfor example, api-key show\r\n```\r\naz monitor app-insights api-key show --app model-monitoring-chinaeast2 --resource-group model-mgmt-chinaeast2 --api-key MMSModelMonitoringApiKey\r\nThe subscription '5b5d5f37-8aca-49f8-8682-7ed6d2f00424' could not be found.\r\n```\r\n\r\nI doubted application-insights is not working in AzureChinaCloud, any insight of it? Appreciate\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.1\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nwith open('README.rst', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='application-insights',\n version=VERSION,\n description='Support for managing Application Insights components and querying metrics, events, and logs from such components.',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n author='Ace Eldeib',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/application-insights',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n package_data={'azext_applicationinsights': ['azext_metadata.json']},\n install_requires=DEPENDENCIES\n)\n", "path": "src/application-insights/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\ndef applicationinsights_data_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics data client for use with CLI.\"\"\"\n from .vendored_sdks.applicationinsights import ApplicationInsightsDataClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n resource=\"https://api.applicationinsights.io\",\n subscription_id=subscription\n )\n return ApplicationInsightsDataClient(cred)\n\n\ndef applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics mgmt client for use with CLI.\"\"\"\n from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n # Use subscription from resource_id where possible, otherwise use login.\n if subscription:\n cred, _, _ = profile.get_login_credentials(subscription_id=subscription)\n return ApplicationInsightsManagementClient(\n cred,\n subscription\n )\n cred, sub_id, _ = profile.get_login_credentials()\n return ApplicationInsightsManagementClient(\n cred,\n sub_id\n )\n\n\ndef cf_query(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).query\n\n\ndef cf_metrics(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).metrics\n\n\ndef cf_events(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).events\n\n\ndef cf_components(cli_ctx, _, subscription=None):\n return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).components\n\n\ndef cf_api_key(cli_ctx, _, subscription=None):\n return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).api_keys\n", "path": "src/application-insights/azext_applicationinsights/_client_factory.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\nfrom codecs import open\nfrom setuptools import setup, find_packages\n\nVERSION = \"0.1.2\"\n\nCLASSIFIERS = [\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'License :: OSI Approved :: MIT License',\n]\n\nDEPENDENCIES = []\n\nwith open('README.rst', 'r', encoding='utf-8') as f:\n README = f.read()\nwith open('HISTORY.rst', 'r', encoding='utf-8') as f:\n HISTORY = f.read()\n\nsetup(\n name='application-insights',\n version=VERSION,\n description='Support for managing Application Insights components and querying metrics, events, and logs from such components.',\n long_description=README + '\\n\\n' + HISTORY,\n license='MIT',\n author='Ace Eldeib',\n author_email='[email protected]',\n url='https://github.com/Azure/azure-cli-extensions/tree/master/src/application-insights',\n classifiers=CLASSIFIERS,\n packages=find_packages(exclude=[\"tests\"]),\n package_data={'azext_applicationinsights': ['azext_metadata.json']},\n install_requires=DEPENDENCIES\n)\n", "path": "src/application-insights/setup.py"}, {"content": "# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\ndef applicationinsights_data_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics data client for use with CLI.\"\"\"\n from .vendored_sdks.applicationinsights import ApplicationInsightsDataClient\n from azure.cli.core._profile import Profile\n profile = Profile(cli_ctx=cli_ctx)\n cred, _, _ = profile.get_login_credentials(\n resource=cli_ctx.cloud.endpoints.app_insights_resource_id,\n subscription_id=subscription\n )\n return ApplicationInsightsDataClient(cred)\n\n\ndef applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=None):\n \"\"\"Initialize Log Analytics mgmt client for use with CLI.\"\"\"\n from .vendored_sdks.mgmt_applicationinsights import ApplicationInsightsManagementClient\n from azure.cli.core.commands.client_factory import get_mgmt_service_client\n return get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient, subscription_id=subscription)\n\n\ndef cf_query(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).query\n\n\ndef cf_metrics(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).metrics\n\n\ndef cf_events(cli_ctx, _, subscription=None):\n return applicationinsights_data_plane_client(cli_ctx, _, subscription=subscription).events\n\n\ndef cf_components(cli_ctx, _, subscription=None):\n return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).components\n\n\ndef cf_api_key(cli_ctx, _, subscription=None):\n return applicationinsights_mgmt_plane_client(cli_ctx, _, subscription=subscription).api_keys\n", "path": "src/application-insights/azext_applicationinsights/_client_factory.py"}]}
1,726
472
gh_patches_debug_64261
rasdani/github-patches
git_diff
pyjanitor-devs__pyjanitor-574
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [INF] Allow `import_message()` to be Python distribution flexible # Brief Description <!-- Please provide a brief description of what you'd like to propose. --> Currently, if a user attempts to user a feature of an optional external package (`rdkit`, `biopython`, `unyt`, `pyspark`) which is not installed, the user receives an error that directs them on how to install it. The error message is displayed by `import_message()` which passes instructions on how to install it. Ex: ``` To use the janitor submodule spark, you need to install pyspark. To do so, use the following command: conda install -c conda-forge pyspark ``` With the exception of `rdkit`, I think all of these packages are `pip` installable. It would be nice if this message could decide whether to provide `conda` vs `pip` instructions to the user. Or tell them that the package can only be installed with `conda`. This is how the function is currently called: ```python import_message(submodule="spark", package="pyspark", installation="conda install -c conda-forge pyspark") ``` Not all `conda` installs will use the same channel. One option is to provide both `conda` and `pip` instructions as arguments in the call, and it figures out which to send to the user. If either are `None`, then it is understood to be `pip` or `conda` only. # Example API One verbose option would be to extend what currently exists: ```python import_message(submodule="spark", package="pyspark", conda_installation="conda install -c conda-forge pyspark", pip_installation="pip install pyspark") ``` A more succinct version could be: ```python import_message(submodule="spark", package="pyspark", conda_channel="conda-forge", pip_install=True) ``` which would use the provided `package` argument, and `conda_channel` could be `None` if it doesn't exist on `conda`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `janitor/spark/functions.py` Content: ``` 1 """ 2 General purpose data cleaning functions for pyspark. 3 """ 4 5 import re 6 7 from .. import functions as janitor_func 8 from .. import utils as janitor_utils 9 from . import backend 10 11 try: 12 from pyspark.sql import DataFrame 13 except ImportError: 14 import_message( 15 submodule="spark", 16 package="pyspark", 17 conda_channel="conda-forge", 18 pip_install=True, 19 ) 20 21 22 @backend.register_dataframe_method 23 def clean_names( 24 df: DataFrame, 25 case_type: str = "lower", 26 remove_special: bool = False, 27 strip_underscores: str = None, 28 ) -> DataFrame: 29 """ 30 Clean column names for pyspark dataframe. 31 32 Takes all column names, converts them to lowercase, then replaces all 33 spaces with underscores. 34 35 This method does not mutate the original DataFrame. 36 37 Functional usage example: 38 39 .. code-block:: python 40 41 df = clean_names(df) 42 43 Method chaining example: 44 45 .. code-block:: python 46 47 from pyspark.sql import DataFrame 48 import janitor.spark 49 df = DataFrame(...).clean_names() 50 51 :Example of transformation: 52 53 .. code-block:: python 54 55 Columns before: First Name, Last Name, Employee Status, Subject 56 Columns after: first_name, last_name, employee_status, subject 57 58 :param df: Spark DataFrame object. 59 :param strip_underscores: (optional) Removes the outer underscores from all 60 column names. Default None keeps outer underscores. Values can be 61 either 'left', 'right' or 'both' or the respective shorthand 'l', 'r' 62 and True. 63 :param case_type: (optional) Whether to make columns lower or uppercase. 64 Current case may be preserved with 'preserve', 65 while snake case conversion (from CamelCase or camelCase only) 66 can be turned on using "snake". 67 Default 'lower' makes all characters lowercase. 68 :param remove_special: (optional) Remove special characters from columns. 69 Only letters, numbers and underscores are preserved. 70 :returns: A Spark DataFrame. 71 """ 72 73 cols = df.columns 74 75 cols = [janitor_func._change_case(col, case_type) for col in cols] 76 77 cols = [janitor_func._normalize_1(col) for col in cols] 78 79 if remove_special: 80 cols = [janitor_func._remove_special(col) for col in cols] 81 82 cols = [re.sub("_+", "_", col) for col in cols] 83 84 cols = [ 85 janitor_utils._strip_underscores_func(col, strip_underscores) 86 for col in cols 87 ] 88 89 cols = [ 90 f"`{col}` AS `{new_col}`" for col, new_col in zip(df.columns, cols) 91 ] 92 93 return df.selectExpr(*cols) 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/janitor/spark/functions.py b/janitor/spark/functions.py --- a/janitor/spark/functions.py +++ b/janitor/spark/functions.py @@ -11,7 +11,7 @@ try: from pyspark.sql import DataFrame except ImportError: - import_message( + janitor_utils.import_message( submodule="spark", package="pyspark", conda_channel="conda-forge",
{"golden_diff": "diff --git a/janitor/spark/functions.py b/janitor/spark/functions.py\n--- a/janitor/spark/functions.py\n+++ b/janitor/spark/functions.py\n@@ -11,7 +11,7 @@\n try:\n from pyspark.sql import DataFrame\n except ImportError:\n- import_message(\n+ janitor_utils.import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n", "issue": "[INF] Allow `import_message()` to be Python distribution flexible\n# Brief Description\r\n\r\n<!-- Please provide a brief description of what you'd like to propose. -->\r\n\r\nCurrently, if a user attempts to user a feature of an optional external package (`rdkit`, `biopython`, `unyt`, `pyspark`) which is not installed, the user receives an error that directs them on how to install it. The error message is displayed by `import_message()` which passes instructions on how to install it. Ex:\r\n```\r\nTo use the janitor submodule spark, you need to install pyspark.\r\nTo do so, use the following command:\r\n conda install -c conda-forge pyspark\r\n```\r\nWith the exception of `rdkit`, I think all of these packages are `pip` installable. It would be nice if this message could decide whether to provide `conda` vs `pip` instructions to the user. Or tell them that the package can only be installed with `conda`. \r\n\r\nThis is how the function is currently called:\r\n```python\r\nimport_message(submodule=\"spark\", package=\"pyspark\", \r\n installation=\"conda install -c conda-forge pyspark\")\r\n```\r\n\r\nNot all `conda` installs will use the same channel. One option is to provide both `conda` and `pip` instructions as arguments in the call, and it figures out which to send to the user. If either are `None`, then it is understood to be `pip` or `conda` only.\r\n\r\n# Example API\r\n\r\nOne verbose option would be to extend what currently exists:\r\n```python\r\nimport_message(submodule=\"spark\", package=\"pyspark\", \r\n conda_installation=\"conda install -c conda-forge pyspark\", \r\n pip_installation=\"pip install pyspark\")\r\n```\r\n\r\nA more succinct version could be:\r\n```python\r\nimport_message(submodule=\"spark\", package=\"pyspark\", \r\n conda_channel=\"conda-forge\", pip_install=True)\r\n```\r\nwhich would use the provided `package` argument, and `conda_channel` could be `None` if it doesn't exist on `conda`.\n", "before_files": [{"content": "\"\"\"\nGeneral purpose data cleaning functions for pyspark.\n\"\"\"\n\nimport re\n\nfrom .. import functions as janitor_func\nfrom .. import utils as janitor_utils\nfrom . import backend\n\ntry:\n from pyspark.sql import DataFrame\nexcept ImportError:\n import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n pip_install=True,\n )\n\n\[email protected]_dataframe_method\ndef clean_names(\n df: DataFrame,\n case_type: str = \"lower\",\n remove_special: bool = False,\n strip_underscores: str = None,\n) -> DataFrame:\n \"\"\"\n Clean column names for pyspark dataframe.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n This method does not mutate the original DataFrame.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n from pyspark.sql import DataFrame\n import janitor.spark\n df = DataFrame(...).clean_names()\n\n :Example of transformation:\n\n .. code-block:: python\n\n Columns before: First Name, Last Name, Employee Status, Subject\n Columns after: first_name, last_name, employee_status, subject\n\n :param df: Spark DataFrame object.\n :param strip_underscores: (optional) Removes the outer underscores from all\n column names. Default None keeps outer underscores. Values can be\n either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n and True.\n :param case_type: (optional) Whether to make columns lower or uppercase.\n Current case may be preserved with 'preserve',\n while snake case conversion (from CamelCase or camelCase only)\n can be turned on using \"snake\".\n Default 'lower' makes all characters lowercase.\n :param remove_special: (optional) Remove special characters from columns.\n Only letters, numbers and underscores are preserved.\n :returns: A Spark DataFrame.\n \"\"\"\n\n cols = df.columns\n\n cols = [janitor_func._change_case(col, case_type) for col in cols]\n\n cols = [janitor_func._normalize_1(col) for col in cols]\n\n if remove_special:\n cols = [janitor_func._remove_special(col) for col in cols]\n\n cols = [re.sub(\"_+\", \"_\", col) for col in cols]\n\n cols = [\n janitor_utils._strip_underscores_func(col, strip_underscores)\n for col in cols\n ]\n\n cols = [\n f\"`{col}` AS `{new_col}`\" for col, new_col in zip(df.columns, cols)\n ]\n\n return df.selectExpr(*cols)\n", "path": "janitor/spark/functions.py"}], "after_files": [{"content": "\"\"\"\nGeneral purpose data cleaning functions for pyspark.\n\"\"\"\n\nimport re\n\nfrom .. import functions as janitor_func\nfrom .. import utils as janitor_utils\nfrom . import backend\n\ntry:\n from pyspark.sql import DataFrame\nexcept ImportError:\n janitor_utils.import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n pip_install=True,\n )\n\n\[email protected]_dataframe_method\ndef clean_names(\n df: DataFrame,\n case_type: str = \"lower\",\n remove_special: bool = False,\n strip_underscores: str = None,\n) -> DataFrame:\n \"\"\"\n Clean column names for pyspark dataframe.\n\n Takes all column names, converts them to lowercase, then replaces all\n spaces with underscores.\n\n This method does not mutate the original DataFrame.\n\n Functional usage example:\n\n .. code-block:: python\n\n df = clean_names(df)\n\n Method chaining example:\n\n .. code-block:: python\n\n from pyspark.sql import DataFrame\n import janitor.spark\n df = DataFrame(...).clean_names()\n\n :Example of transformation:\n\n .. code-block:: python\n\n Columns before: First Name, Last Name, Employee Status, Subject\n Columns after: first_name, last_name, employee_status, subject\n\n :param df: Spark DataFrame object.\n :param strip_underscores: (optional) Removes the outer underscores from all\n column names. Default None keeps outer underscores. Values can be\n either 'left', 'right' or 'both' or the respective shorthand 'l', 'r'\n and True.\n :param case_type: (optional) Whether to make columns lower or uppercase.\n Current case may be preserved with 'preserve',\n while snake case conversion (from CamelCase or camelCase only)\n can be turned on using \"snake\".\n Default 'lower' makes all characters lowercase.\n :param remove_special: (optional) Remove special characters from columns.\n Only letters, numbers and underscores are preserved.\n :returns: A Spark DataFrame.\n \"\"\"\n\n cols = df.columns\n\n cols = [janitor_func._change_case(col, case_type) for col in cols]\n\n cols = [janitor_func._normalize_1(col) for col in cols]\n\n if remove_special:\n cols = [janitor_func._remove_special(col) for col in cols]\n\n cols = [re.sub(\"_+\", \"_\", col) for col in cols]\n\n cols = [\n janitor_utils._strip_underscores_func(col, strip_underscores)\n for col in cols\n ]\n\n cols = [\n f\"`{col}` AS `{new_col}`\" for col, new_col in zip(df.columns, cols)\n ]\n\n return df.selectExpr(*cols)\n", "path": "janitor/spark/functions.py"}]}
1,485
99
gh_patches_debug_737
rasdani/github-patches
git_diff
graspologic-org__graspologic-176
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- change semipar and nonpar names? What do people think? @jovo brought up that the current names are uninformative. I agree, but don't really have a strong opinion on it --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `graspy/inference/__init__.py` Content: ``` 1 from .semipar import SemiparametricTest 2 from .nonpar import NonparametricTest 3 4 __all__ = ["SemiparametricTest", "NonparametricTest"] 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/graspy/inference/__init__.py b/graspy/inference/__init__.py --- a/graspy/inference/__init__.py +++ b/graspy/inference/__init__.py @@ -1,4 +1,4 @@ -from .semipar import SemiparametricTest -from .nonpar import NonparametricTest +from .latent_position_test import LatentPositionTest +from .latent_distribution_test import LatentDistributionTest -__all__ = ["SemiparametricTest", "NonparametricTest"] +__all__ = ["LatentPositionTest", "LatentDistributionTest"]
{"golden_diff": "diff --git a/graspy/inference/__init__.py b/graspy/inference/__init__.py\n--- a/graspy/inference/__init__.py\n+++ b/graspy/inference/__init__.py\n@@ -1,4 +1,4 @@\n-from .semipar import SemiparametricTest\n-from .nonpar import NonparametricTest\n+from .latent_position_test import LatentPositionTest\n+from .latent_distribution_test import LatentDistributionTest\n \n-__all__ = [\"SemiparametricTest\", \"NonparametricTest\"]\n+__all__ = [\"LatentPositionTest\", \"LatentDistributionTest\"]\n", "issue": "change semipar and nonpar names?\nWhat do people think? @jovo brought up that the current names are uninformative. I agree, but don't really have a strong opinion on it \n", "before_files": [{"content": "from .semipar import SemiparametricTest\nfrom .nonpar import NonparametricTest\n\n__all__ = [\"SemiparametricTest\", \"NonparametricTest\"]\n", "path": "graspy/inference/__init__.py"}], "after_files": [{"content": "from .latent_position_test import LatentPositionTest\nfrom .latent_distribution_test import LatentDistributionTest\n\n__all__ = [\"LatentPositionTest\", \"LatentDistributionTest\"]\n", "path": "graspy/inference/__init__.py"}]}
350
139
gh_patches_debug_57590
rasdani/github-patches
git_diff
joke2k__faker-305
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Fail to run the tests Another problem I have when building the RPM is when I try to run the tests. This is in a minimal build environment, so maybe some library is missing. I have tried to use PYTHONPATH like in issue #291 but with no success. Could you help me? ``` + /usr/bin/python2 setup.py test running test running egg_info writing requirements to fake_factory.egg-info/requires.txt writing fake_factory.egg-info/PKG-INFO writing top-level names to fake_factory.egg-info/top_level.txt writing dependency_links to fake_factory.egg-info/dependency_links.txt writing entry points to fake_factory.egg-info/entry_points.txt reading manifest file 'fake_factory.egg-info/SOURCES.txt' reading manifest template 'MANIFEST.in' warning: no files found matching '*.md' writing manifest file 'fake_factory.egg-info/SOURCES.txt' running build_ext Traceback (most recent call last): File "setup.py", line 58, in <module> ':python_version=="3.0"': ['importlib'], File "/usr/lib64/python2.7/distutils/core.py", line 151, in setup dist.run_commands() File "/usr/lib64/python2.7/distutils/dist.py", line 953, in run_commands self.run_command(cmd) File "/usr/lib64/python2.7/distutils/dist.py", line 972, in run_command cmd_obj.run() File "/usr/lib/python2.7/site-packages/setuptools/command/test.py", line 142, in run self.with_project_on_sys_path(self.run_tests) File "/usr/lib/python2.7/site-packages/setuptools/command/test.py", line 122, in with_project_on_sys_path func() File "/usr/lib/python2.7/site-packages/setuptools/command/test.py", line 163, in run_tests testRunner=self._resolve_as_ep(self.test_runner), File "/usr/lib64/python2.7/unittest/main.py", line 94, in __init__ self.parseArgs(argv) File "/usr/lib64/python2.7/unittest/main.py", line 149, in parseArgs self.createTests() File "/usr/lib64/python2.7/unittest/main.py", line 158, in createTests self.module) File "/usr/lib64/python2.7/unittest/loader.py", line 130, in loadTestsFromNames suites = [self.loadTestsFromName(name, module) for name in names] File "/usr/lib64/python2.7/unittest/loader.py", line 100, in loadTestsFromName parent, obj = obj, getattr(obj, part) AttributeError: 'module' object has no attribute 'tests' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 # coding=utf-8 3 4 import os 5 import io 6 from setuptools import setup, find_packages 7 8 here = os.path.abspath(os.path.dirname(__file__)) 9 README = io.open(os.path.join(here, 'README.rst'), encoding="utf8").read() 10 NEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding="utf8").read() 11 12 13 version = '0.5.3' 14 15 # this module can be zip-safe if the zipimporter implements iter_modules or if 16 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter. 17 try: 18 import pkgutil 19 import zipimport 20 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \ 21 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys() 22 except (ImportError, AttributeError): 23 zip_safe = False 24 25 setup( 26 name='fake-factory', 27 version=version, 28 description="Faker is a Python package that generates fake data for you.", 29 long_description=README + '\n\n' + NEWS, 30 entry_points={ 31 'console_scripts': ['faker=faker.cli:execute_from_command_line'], 32 }, 33 classifiers=[ 34 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers 35 'Development Status :: 3 - Alpha', 36 'Environment :: Console', 37 'Intended Audience :: Developers', 38 'Programming Language :: Python', 39 'Programming Language :: Python :: 2', 40 'Programming Language :: Python :: 3', 41 'Programming Language :: Python :: 3.4', 42 'Topic :: Software Development :: Libraries :: Python Modules', 43 'Topic :: Software Development :: Testing', 44 'Topic :: Utilities', 45 'License :: OSI Approved :: MIT License' 46 ], 47 keywords='faker fixtures data test mock generator', 48 author='joke2k', 49 author_email='[email protected]', 50 url='http://github.com/joke2k/faker', 51 license='MIT License', 52 packages=find_packages(exclude=['*.tests']), 53 platforms=["any"], 54 test_suite='faker.tests', 55 zip_safe=zip_safe, 56 extras_require={ 57 ':python_version=="2.6"': ['importlib'], 58 ':python_version=="3.0"': ['importlib'], 59 } 60 ) 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -49,7 +49,7 @@ author_email='[email protected]', url='http://github.com/joke2k/faker', license='MIT License', - packages=find_packages(exclude=['*.tests']), + packages=find_packages(), platforms=["any"], test_suite='faker.tests', zip_safe=zip_safe,
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -49,7 +49,7 @@\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n- packages=find_packages(exclude=['*.tests']),\n+ packages=find_packages(),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n", "issue": "Fail to run the tests\nAnother problem I have when building the RPM is when I try to run the tests. This is in a minimal build environment, so maybe some library is missing. I have tried to use PYTHONPATH like in issue #291 but with no success.\n\nCould you help me?\n\n```\n+ /usr/bin/python2 setup.py test\nrunning test\nrunning egg_info\nwriting requirements to fake_factory.egg-info/requires.txt\nwriting fake_factory.egg-info/PKG-INFO\nwriting top-level names to fake_factory.egg-info/top_level.txt\nwriting dependency_links to fake_factory.egg-info/dependency_links.txt\nwriting entry points to fake_factory.egg-info/entry_points.txt\nreading manifest file 'fake_factory.egg-info/SOURCES.txt'\nreading manifest template 'MANIFEST.in'\nwarning: no files found matching '*.md'\nwriting manifest file 'fake_factory.egg-info/SOURCES.txt'\nrunning build_ext\nTraceback (most recent call last):\n File \"setup.py\", line 58, in <module>\n ':python_version==\"3.0\"': ['importlib'],\n File \"/usr/lib64/python2.7/distutils/core.py\", line 151, in setup\n dist.run_commands()\n File \"/usr/lib64/python2.7/distutils/dist.py\", line 953, in run_commands\n self.run_command(cmd)\n File \"/usr/lib64/python2.7/distutils/dist.py\", line 972, in run_command\n cmd_obj.run()\n File \"/usr/lib/python2.7/site-packages/setuptools/command/test.py\", line 142, in run\n self.with_project_on_sys_path(self.run_tests)\n File \"/usr/lib/python2.7/site-packages/setuptools/command/test.py\", line 122, in with_project_on_sys_path\n func()\n File \"/usr/lib/python2.7/site-packages/setuptools/command/test.py\", line 163, in run_tests\n testRunner=self._resolve_as_ep(self.test_runner),\n File \"/usr/lib64/python2.7/unittest/main.py\", line 94, in __init__\n self.parseArgs(argv)\n File \"/usr/lib64/python2.7/unittest/main.py\", line 149, in parseArgs\n self.createTests()\n File \"/usr/lib64/python2.7/unittest/main.py\", line 158, in createTests\n self.module)\n File \"/usr/lib64/python2.7/unittest/loader.py\", line 130, in loadTestsFromNames\n suites = [self.loadTestsFromName(name, module) for name in names]\n File \"/usr/lib64/python2.7/unittest/loader.py\", line 100, in loadTestsFromName\n parent, obj = obj, getattr(obj, part)\nAttributeError: 'module' object has no attribute 'tests'\n```\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\nNEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.5.3'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=['*.tests']),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n extras_require={\n ':python_version==\"2.6\"': ['importlib'],\n ':python_version==\"3.0\"': ['importlib'],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\nNEWS = io.open(os.path.join(here, 'CHANGELOG.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.5.3'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='fake-factory',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README + '\\n\\n' + NEWS,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='http://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n extras_require={\n ':python_version==\"2.6\"': ['importlib'],\n ':python_version==\"3.0\"': ['importlib'],\n }\n)\n", "path": "setup.py"}]}
1,488
99
gh_patches_debug_54121
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-contrib-1439
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- System metrics instrumentation not working with custom defined configuration System metric instrumentation is not functional if configuration on which metrics to be exported is explicitly provided. As a minimal example, this code ```python from opentelemetry.metrics import set_meter_provider from opentelemetry.instrumentation.system_metrics import SystemMetricsInstrumentor from opentelemetry.sdk.metrics import MeterProvider from opentelemetry.sdk.metrics.export import ( ConsoleMetricExporter, PeriodicExportingMetricReader, ) exporter = ConsoleMetricExporter() set_meter_provider(MeterProvider([PeriodicExportingMetricReader(exporter)])) configuration = { "runtime.memory": ["rss", "vms"], "runtime.cpu.time": ["user", "system"], } SystemMetricsInstrumentor(config=configuration).instrument() ``` results in ``` Traceback (most recent call last): File ".../test.py", line 15, in <module> SystemMetricsInstrumentor(config=configuration).instrument() File ".../lib/python3.10/site-packages/opentelemetry/instrumentation/instrumentor.py", line 51, in __new__ cls._instance = object.__new__(cls, *args, **kwargs) TypeError: object.__new__() takes exactly one argument (the type to instantiate) ``` I am happy to look into fixing this. Removing `*args` and `**kwargs` in `opentelemetry/instrumentation/instrumentor.py:51` actually solves the issue here but I'd like to understand the implications as this implies changing the interface class. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # type: ignore 15 16 """ 17 OpenTelemetry Base Instrumentor 18 """ 19 20 from abc import ABC, abstractmethod 21 from logging import getLogger 22 from typing import Collection, Optional 23 24 from opentelemetry.instrumentation.dependencies import ( 25 DependencyConflict, 26 get_dependency_conflicts, 27 ) 28 29 _LOG = getLogger(__name__) 30 31 32 class BaseInstrumentor(ABC): 33 """An ABC for instrumentors 34 35 Child classes of this ABC should instrument specific third 36 party libraries or frameworks either by using the 37 ``opentelemetry-instrument`` command or by calling their methods 38 directly. 39 40 Since every third party library or framework is different and has different 41 instrumentation needs, more methods can be added to the child classes as 42 needed to provide practical instrumentation to the end user. 43 """ 44 45 _instance = None 46 _is_instrumented_by_opentelemetry = False 47 48 def __new__(cls, *args, **kwargs): 49 if cls._instance is None: 50 cls._instance = object.__new__(cls, *args, **kwargs) 51 52 return cls._instance 53 54 @property 55 def is_instrumented_by_opentelemetry(self): 56 return self._is_instrumented_by_opentelemetry 57 58 @abstractmethod 59 def instrumentation_dependencies(self) -> Collection[str]: 60 """Return a list of python packages with versions that the will be instrumented. 61 62 The format should be the same as used in requirements.txt or pyproject.toml. 63 64 For example, if an instrumentation instruments requests 1.x, this method should look 65 like: 66 67 def instrumentation_dependencies(self) -> Collection[str]: 68 return ['requests ~= 1.0'] 69 70 This will ensure that the instrumentation will only be used when the specified library 71 is present in the environment. 72 """ 73 74 def _instrument(self, **kwargs): 75 """Instrument the library""" 76 77 @abstractmethod 78 def _uninstrument(self, **kwargs): 79 """Uninstrument the library""" 80 81 def _check_dependency_conflicts(self) -> Optional[DependencyConflict]: 82 dependencies = self.instrumentation_dependencies() 83 return get_dependency_conflicts(dependencies) 84 85 def instrument(self, **kwargs): 86 """Instrument the library 87 88 This method will be called without any optional arguments by the 89 ``opentelemetry-instrument`` command. 90 91 This means that calling this method directly without passing any 92 optional values should do the very same thing that the 93 ``opentelemetry-instrument`` command does. 94 """ 95 96 if self._is_instrumented_by_opentelemetry: 97 _LOG.warning("Attempting to instrument while already instrumented") 98 return None 99 100 # check if instrumentor has any missing or conflicting dependencies 101 skip_dep_check = kwargs.pop("skip_dep_check", False) 102 if not skip_dep_check: 103 conflict = self._check_dependency_conflicts() 104 if conflict: 105 _LOG.error(conflict) 106 return None 107 108 result = self._instrument( # pylint: disable=assignment-from-no-return 109 **kwargs 110 ) 111 self._is_instrumented_by_opentelemetry = True 112 return result 113 114 def uninstrument(self, **kwargs): 115 """Uninstrument the library 116 117 See ``BaseInstrumentor.instrument`` for more information regarding the 118 usage of ``kwargs``. 119 """ 120 121 if self._is_instrumented_by_opentelemetry: 122 result = self._uninstrument(**kwargs) 123 self._is_instrumented_by_opentelemetry = False 124 return result 125 126 _LOG.warning("Attempting to uninstrument while already uninstrumented") 127 128 return None 129 130 131 __all__ = ["BaseInstrumentor"] 132 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py --- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py +++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py @@ -47,7 +47,7 @@ def __new__(cls, *args, **kwargs): if cls._instance is None: - cls._instance = object.__new__(cls, *args, **kwargs) + cls._instance = object.__new__(cls) return cls._instance
{"golden_diff": "diff --git a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py\n--- a/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py\n+++ b/opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py\n@@ -47,7 +47,7 @@\n \n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n- cls._instance = object.__new__(cls, *args, **kwargs)\n+ cls._instance = object.__new__(cls)\n \n return cls._instance\n", "issue": "System metrics instrumentation not working with custom defined configuration\nSystem metric instrumentation is not functional if configuration on which metrics to be exported is explicitly provided. As a minimal example, this code\r\n\r\n```python\r\nfrom opentelemetry.metrics import set_meter_provider\r\nfrom opentelemetry.instrumentation.system_metrics import SystemMetricsInstrumentor\r\nfrom opentelemetry.sdk.metrics import MeterProvider\r\nfrom opentelemetry.sdk.metrics.export import (\r\n ConsoleMetricExporter,\r\n PeriodicExportingMetricReader,\r\n)\r\n\r\nexporter = ConsoleMetricExporter()\r\nset_meter_provider(MeterProvider([PeriodicExportingMetricReader(exporter)]))\r\n\r\nconfiguration = {\r\n \"runtime.memory\": [\"rss\", \"vms\"],\r\n \"runtime.cpu.time\": [\"user\", \"system\"],\r\n}\r\n\r\nSystemMetricsInstrumentor(config=configuration).instrument()\r\n```\r\n\r\nresults in\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \".../test.py\", line 15, in <module>\r\n SystemMetricsInstrumentor(config=configuration).instrument()\r\n File \".../lib/python3.10/site-packages/opentelemetry/instrumentation/instrumentor.py\", line 51, in __new__\r\n cls._instance = object.__new__(cls, *args, **kwargs)\r\nTypeError: object.__new__() takes exactly one argument (the type to instantiate)\r\n```\r\n\r\nI am happy to look into fixing this. Removing `*args` and `**kwargs` in `opentelemetry/instrumentation/instrumentor.py:51` actually solves the issue here but I'd like to understand the implications as this implies changing the interface class.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\n\"\"\"\nOpenTelemetry Base Instrumentor\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom logging import getLogger\nfrom typing import Collection, Optional\n\nfrom opentelemetry.instrumentation.dependencies import (\n DependencyConflict,\n get_dependency_conflicts,\n)\n\n_LOG = getLogger(__name__)\n\n\nclass BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\n\n Child classes of this ABC should instrument specific third\n party libraries or frameworks either by using the\n ``opentelemetry-instrument`` command or by calling their methods\n directly.\n\n Since every third party library or framework is different and has different\n instrumentation needs, more methods can be added to the child classes as\n needed to provide practical instrumentation to the end user.\n \"\"\"\n\n _instance = None\n _is_instrumented_by_opentelemetry = False\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = object.__new__(cls, *args, **kwargs)\n\n return cls._instance\n\n @property\n def is_instrumented_by_opentelemetry(self):\n return self._is_instrumented_by_opentelemetry\n\n @abstractmethod\n def instrumentation_dependencies(self) -> Collection[str]:\n \"\"\"Return a list of python packages with versions that the will be instrumented.\n\n The format should be the same as used in requirements.txt or pyproject.toml.\n\n For example, if an instrumentation instruments requests 1.x, this method should look\n like:\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return ['requests ~= 1.0']\n\n This will ensure that the instrumentation will only be used when the specified library\n is present in the environment.\n \"\"\"\n\n def _instrument(self, **kwargs):\n \"\"\"Instrument the library\"\"\"\n\n @abstractmethod\n def _uninstrument(self, **kwargs):\n \"\"\"Uninstrument the library\"\"\"\n\n def _check_dependency_conflicts(self) -> Optional[DependencyConflict]:\n dependencies = self.instrumentation_dependencies()\n return get_dependency_conflicts(dependencies)\n\n def instrument(self, **kwargs):\n \"\"\"Instrument the library\n\n This method will be called without any optional arguments by the\n ``opentelemetry-instrument`` command.\n\n This means that calling this method directly without passing any\n optional values should do the very same thing that the\n ``opentelemetry-instrument`` command does.\n \"\"\"\n\n if self._is_instrumented_by_opentelemetry:\n _LOG.warning(\"Attempting to instrument while already instrumented\")\n return None\n\n # check if instrumentor has any missing or conflicting dependencies\n skip_dep_check = kwargs.pop(\"skip_dep_check\", False)\n if not skip_dep_check:\n conflict = self._check_dependency_conflicts()\n if conflict:\n _LOG.error(conflict)\n return None\n\n result = self._instrument( # pylint: disable=assignment-from-no-return\n **kwargs\n )\n self._is_instrumented_by_opentelemetry = True\n return result\n\n def uninstrument(self, **kwargs):\n \"\"\"Uninstrument the library\n\n See ``BaseInstrumentor.instrument`` for more information regarding the\n usage of ``kwargs``.\n \"\"\"\n\n if self._is_instrumented_by_opentelemetry:\n result = self._uninstrument(**kwargs)\n self._is_instrumented_by_opentelemetry = False\n return result\n\n _LOG.warning(\"Attempting to uninstrument while already uninstrumented\")\n\n return None\n\n\n__all__ = [\"BaseInstrumentor\"]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# type: ignore\n\n\"\"\"\nOpenTelemetry Base Instrumentor\n\"\"\"\n\nfrom abc import ABC, abstractmethod\nfrom logging import getLogger\nfrom typing import Collection, Optional\n\nfrom opentelemetry.instrumentation.dependencies import (\n DependencyConflict,\n get_dependency_conflicts,\n)\n\n_LOG = getLogger(__name__)\n\n\nclass BaseInstrumentor(ABC):\n \"\"\"An ABC for instrumentors\n\n Child classes of this ABC should instrument specific third\n party libraries or frameworks either by using the\n ``opentelemetry-instrument`` command or by calling their methods\n directly.\n\n Since every third party library or framework is different and has different\n instrumentation needs, more methods can be added to the child classes as\n needed to provide practical instrumentation to the end user.\n \"\"\"\n\n _instance = None\n _is_instrumented_by_opentelemetry = False\n\n def __new__(cls, *args, **kwargs):\n if cls._instance is None:\n cls._instance = object.__new__(cls)\n\n return cls._instance\n\n @property\n def is_instrumented_by_opentelemetry(self):\n return self._is_instrumented_by_opentelemetry\n\n @abstractmethod\n def instrumentation_dependencies(self) -> Collection[str]:\n \"\"\"Return a list of python packages with versions that the will be instrumented.\n\n The format should be the same as used in requirements.txt or pyproject.toml.\n\n For example, if an instrumentation instruments requests 1.x, this method should look\n like:\n\n def instrumentation_dependencies(self) -> Collection[str]:\n return ['requests ~= 1.0']\n\n This will ensure that the instrumentation will only be used when the specified library\n is present in the environment.\n \"\"\"\n\n def _instrument(self, **kwargs):\n \"\"\"Instrument the library\"\"\"\n\n @abstractmethod\n def _uninstrument(self, **kwargs):\n \"\"\"Uninstrument the library\"\"\"\n\n def _check_dependency_conflicts(self) -> Optional[DependencyConflict]:\n dependencies = self.instrumentation_dependencies()\n return get_dependency_conflicts(dependencies)\n\n def instrument(self, **kwargs):\n \"\"\"Instrument the library\n\n This method will be called without any optional arguments by the\n ``opentelemetry-instrument`` command.\n\n This means that calling this method directly without passing any\n optional values should do the very same thing that the\n ``opentelemetry-instrument`` command does.\n \"\"\"\n\n if self._is_instrumented_by_opentelemetry:\n _LOG.warning(\"Attempting to instrument while already instrumented\")\n return None\n\n # check if instrumentor has any missing or conflicting dependencies\n skip_dep_check = kwargs.pop(\"skip_dep_check\", False)\n if not skip_dep_check:\n conflict = self._check_dependency_conflicts()\n if conflict:\n _LOG.error(conflict)\n return None\n\n result = self._instrument( # pylint: disable=assignment-from-no-return\n **kwargs\n )\n self._is_instrumented_by_opentelemetry = True\n return result\n\n def uninstrument(self, **kwargs):\n \"\"\"Uninstrument the library\n\n See ``BaseInstrumentor.instrument`` for more information regarding the\n usage of ``kwargs``.\n \"\"\"\n\n if self._is_instrumented_by_opentelemetry:\n result = self._uninstrument(**kwargs)\n self._is_instrumented_by_opentelemetry = False\n return result\n\n _LOG.warning(\"Attempting to uninstrument while already uninstrumented\")\n\n return None\n\n\n__all__ = [\"BaseInstrumentor\"]\n", "path": "opentelemetry-instrumentation/src/opentelemetry/instrumentation/instrumentor.py"}]}
1,789
151
gh_patches_debug_1319
rasdani/github-patches
git_diff
zenml-io__zenml-2271
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update `sklearn` Integration to Support Versions >1.3.0 and Resolve MLflow Autologging Issues ## Open Source Contributors Welcomed! Please comment below if you would like to work on this issue! ### Contact Details [Optional] [email protected] ### What happened? The current ZenML Sklearn integration is restricted to versions of Sklearn <1.3.0, as defined in `src/zenml/integrations/sklearn/__init__.py`. However, the release of Sklearn 1.3.0 necessitates an update to this constraint. Additionally, this Sklearn version upgrade appears to cause issues with MLflow autologging, likely due to compatibility conflicts. ### Task Description Update the Sklearn integration in ZenML to support Sklearn versions >1.3.0. Additionally, identify and resolve any issues arising in MLflow autologging due to this version update. ### Expected Outcome - The Sklearn integration in ZenML should allow for the use of Sklearn versions >1.3.0. - Any compatibility issues, especially with MLflow autologging, should be identified and resolved. - Ensure that all tests, including CI pipelines, pass with the updated Sklearn version. ### Steps to Implement - Modify the Sklearn version constraint in src/zenml/integrations/sklearn/__init__.py to allow for versions >1.3.0. - Investigate and identify the root cause of the issues with MLflow autologging when using Sklearn 1.3.0. - Implement necessary fixes or updates to ensure compatibility with the new Sklearn version. - Thoroughly test the changes, especially focusing on MLflow autologging functionality. - Update documentation and examples as necessary to reflect the support for the new Sklearn version. ### Additional Context This update is crucial for keeping ZenML compatible with the latest machine learning tools and libraries, ensuring that users can leverage the newest features and improvements in Sklearn. ### Code of Conduct - [ ] I agree to follow this project's Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/zenml/integrations/sklearn/__init__.py` Content: ``` 1 # Copyright (c) ZenML GmbH 2021. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at: 6 # 7 # https://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express 12 # or implied. See the License for the specific language governing 13 # permissions and limitations under the License. 14 """Initialization of the sklearn integration.""" 15 16 from zenml.integrations.constants import SKLEARN 17 from zenml.integrations.integration import Integration 18 19 20 class SklearnIntegration(Integration): 21 """Definition of sklearn integration for ZenML.""" 22 23 NAME = SKLEARN 24 REQUIREMENTS = ["scikit-learn<1.3"] 25 26 @classmethod 27 def activate(cls) -> None: 28 """Activates the integration.""" 29 from zenml.integrations.sklearn import materializers # noqa 30 31 32 SklearnIntegration.check_installation() 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/zenml/integrations/sklearn/__init__.py b/src/zenml/integrations/sklearn/__init__.py --- a/src/zenml/integrations/sklearn/__init__.py +++ b/src/zenml/integrations/sklearn/__init__.py @@ -21,7 +21,7 @@ """Definition of sklearn integration for ZenML.""" NAME = SKLEARN - REQUIREMENTS = ["scikit-learn<1.3"] + REQUIREMENTS = ["scikit-learn>1.3"] @classmethod def activate(cls) -> None:
{"golden_diff": "diff --git a/src/zenml/integrations/sklearn/__init__.py b/src/zenml/integrations/sklearn/__init__.py\n--- a/src/zenml/integrations/sklearn/__init__.py\n+++ b/src/zenml/integrations/sklearn/__init__.py\n@@ -21,7 +21,7 @@\n \"\"\"Definition of sklearn integration for ZenML.\"\"\"\n \n NAME = SKLEARN\n- REQUIREMENTS = [\"scikit-learn<1.3\"]\n+ REQUIREMENTS = [\"scikit-learn>1.3\"]\n \n @classmethod\n def activate(cls) -> None:\n", "issue": "Update `sklearn` Integration to Support Versions >1.3.0 and Resolve MLflow Autologging Issues\n## Open Source Contributors Welcomed!\r\nPlease comment below if you would like to work on this issue!\r\n\r\n### Contact Details [Optional]\r\n\r\[email protected]\r\n\r\n### What happened?\r\nThe current ZenML Sklearn integration is restricted to versions of Sklearn <1.3.0, as defined in `src/zenml/integrations/sklearn/__init__.py`. However, the release of Sklearn 1.3.0 necessitates an update to this constraint. Additionally, this Sklearn version upgrade appears to cause issues with MLflow autologging, likely due to compatibility conflicts.\r\n\r\n### Task Description\r\nUpdate the Sklearn integration in ZenML to support Sklearn versions >1.3.0. Additionally, identify and resolve any issues arising in MLflow autologging due to this version update.\r\n\r\n### Expected Outcome\r\n- The Sklearn integration in ZenML should allow for the use of Sklearn versions >1.3.0.\r\n- Any compatibility issues, especially with MLflow autologging, should be identified and resolved.\r\n- Ensure that all tests, including CI pipelines, pass with the updated Sklearn version.\r\n\r\n### Steps to Implement\r\n\r\n- Modify the Sklearn version constraint in src/zenml/integrations/sklearn/__init__.py to allow for versions >1.3.0.\r\n- Investigate and identify the root cause of the issues with MLflow autologging when using Sklearn 1.3.0.\r\n- Implement necessary fixes or updates to ensure compatibility with the new Sklearn version.\r\n- Thoroughly test the changes, especially focusing on MLflow autologging functionality.\r\n- Update documentation and examples as necessary to reflect the support for the new Sklearn version.\r\n\r\n### Additional Context\r\nThis update is crucial for keeping ZenML compatible with the latest machine learning tools and libraries, ensuring that users can leverage the newest features and improvements in Sklearn.\r\n\r\n### Code of Conduct\r\n- [ ] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Initialization of the sklearn integration.\"\"\"\n\nfrom zenml.integrations.constants import SKLEARN\nfrom zenml.integrations.integration import Integration\n\n\nclass SklearnIntegration(Integration):\n \"\"\"Definition of sklearn integration for ZenML.\"\"\"\n\n NAME = SKLEARN\n REQUIREMENTS = [\"scikit-learn<1.3\"]\n\n @classmethod\n def activate(cls) -> None:\n \"\"\"Activates the integration.\"\"\"\n from zenml.integrations.sklearn import materializers # noqa\n\n\nSklearnIntegration.check_installation()\n", "path": "src/zenml/integrations/sklearn/__init__.py"}], "after_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\n\"\"\"Initialization of the sklearn integration.\"\"\"\n\nfrom zenml.integrations.constants import SKLEARN\nfrom zenml.integrations.integration import Integration\n\n\nclass SklearnIntegration(Integration):\n \"\"\"Definition of sklearn integration for ZenML.\"\"\"\n\n NAME = SKLEARN\n REQUIREMENTS = [\"scikit-learn>1.3\"]\n\n @classmethod\n def activate(cls) -> None:\n \"\"\"Activates the integration.\"\"\"\n from zenml.integrations.sklearn import materializers # noqa\n\n\nSklearnIntegration.check_installation()\n", "path": "src/zenml/integrations/sklearn/__init__.py"}]}
1,017
141
gh_patches_debug_1995
rasdani/github-patches
git_diff
RedHatInsights__insights-core-1641
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RedhatRelease parser failed to parse minor release version in some scenarios In few cases where redhat_release content is something similar to below, RedhatRelease parser fails to get the minor version extracted from it Run: ``` >>> from insights.parsers.redhat_release import RedhatRelease >>> from insights.tests import context_wrap >>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).major 7 >>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).minor Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/insights/insights-core/insights/parsers/redhat_release.py", line 59, in minor return int(s[1]) ValueError: invalid literal for int() with base 10: '5-0' >>> RedhatRelease(context_wrap("Red Hat Enterprise Linux release 7.5-0.14")).version '7.5-0.14' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `insights/parsers/redhat_release.py` Content: ``` 1 """ 2 redhat-release - File ``/etc/redhat-release`` 3 ============================================= 4 5 This module provides plugins access to file ``/etc/redhat-release`` 6 7 Typical content of file ``/etc/redhat-release`` is:: 8 9 Red Hat Enterprise Linux Server release 7.2 (Maipo) 10 11 This module parses the file content and stores data in the dict ``self.parsed``. 12 The version info can also be get via ``obj.major`` and ``obj.minor``. 13 Property ``is_rhel`` and ``is_hypervisor`` specifies the host type. 14 15 Examples: 16 >>> rh_rls_content = ''' 17 ... Red Hat Enterprise Linux Server release 7.2 (Maipo) 18 ... '''.strip() 19 >>> from insights.tests import context_wrap 20 >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))} 21 >>> release = shared[RedhatRelease] 22 >>> assert release.raw == rh_rls_content 23 >>> assert release.major == 7 24 >>> assert release.minor == 2 25 >>> assert release.version == "7.2" 26 >>> assert release.is_rhel 27 >>> assert release.product == "Red Hat Enterprise Linux Server" 28 """ 29 from .. import Parser, parser 30 from ..specs import Specs 31 32 33 @parser(Specs.redhat_release) 34 class RedhatRelease(Parser): 35 """Parses the content of file ``/etc/redhat-release``.""" 36 37 def parse_content(self, content): 38 self.raw = content[0] 39 product, _, version_name = [v.strip() for v in content[0].partition("release")] 40 version_name_split = [v.strip() for v in version_name.split(None, 1)] 41 code_name = (version_name_split[1].strip("()") 42 if len(version_name_split) > 1 else None) 43 self.parsed = { 44 "product": product, 45 "version": version_name_split[0], 46 "code_name": code_name 47 } 48 49 @property 50 def major(self): 51 """int: the major version of this OS.""" 52 return int(self.parsed["version"].split(".")[0]) 53 54 @property 55 def minor(self): 56 """int: the minor version of this OS.""" 57 s = self.parsed["version"].split(".") 58 if len(s) > 1: 59 return int(s[1]) 60 61 @property 62 def version(self): 63 """string: version of this OS.""" 64 return self.parsed["version"] 65 66 @property 67 def is_rhel(self): 68 """bool: True if this OS belong to RHEL, else False.""" 69 return "Red Hat Enterprise Linux" in self.parsed["product"] 70 71 @property 72 def product(self): 73 """string: product of this OS.""" 74 return self.parsed["product"] 75 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/insights/parsers/redhat_release.py b/insights/parsers/redhat_release.py --- a/insights/parsers/redhat_release.py +++ b/insights/parsers/redhat_release.py @@ -54,7 +54,7 @@ @property def minor(self): """int: the minor version of this OS.""" - s = self.parsed["version"].split(".") + s = self.parsed["version"].split("-", 1)[0].split(".") if len(s) > 1: return int(s[1])
{"golden_diff": "diff --git a/insights/parsers/redhat_release.py b/insights/parsers/redhat_release.py\n--- a/insights/parsers/redhat_release.py\n+++ b/insights/parsers/redhat_release.py\n@@ -54,7 +54,7 @@\n @property\n def minor(self):\n \"\"\"int: the minor version of this OS.\"\"\"\n- s = self.parsed[\"version\"].split(\".\")\n+ s = self.parsed[\"version\"].split(\"-\", 1)[0].split(\".\")\n if len(s) > 1:\n return int(s[1])\n", "issue": "RedhatRelease parser failed to parse minor release version in some scenarios\nIn few cases where redhat_release content is something similar to below, RedhatRelease parser fails to get the minor version extracted from it\r\n\r\nRun:\r\n```\r\n>>> from insights.parsers.redhat_release import RedhatRelease\r\n>>> from insights.tests import context_wrap\r\n>>> RedhatRelease(context_wrap(\"Red Hat Enterprise Linux release 7.5-0.14\")).major\r\n7\r\n>>> RedhatRelease(context_wrap(\"Red Hat Enterprise Linux release 7.5-0.14\")).minor\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/opt/insights/insights-core/insights/parsers/redhat_release.py\", line 59, in minor\r\n return int(s[1])\r\nValueError: invalid literal for int() with base 10: '5-0'\r\n>>> RedhatRelease(context_wrap(\"Red Hat Enterprise Linux release 7.5-0.14\")).version\r\n'7.5-0.14'\r\n```\n", "before_files": [{"content": "\"\"\"\nredhat-release - File ``/etc/redhat-release``\n=============================================\n\nThis module provides plugins access to file ``/etc/redhat-release``\n\nTypical content of file ``/etc/redhat-release`` is::\n\n Red Hat Enterprise Linux Server release 7.2 (Maipo)\n\nThis module parses the file content and stores data in the dict ``self.parsed``.\nThe version info can also be get via ``obj.major`` and ``obj.minor``.\nProperty ``is_rhel`` and ``is_hypervisor`` specifies the host type.\n\nExamples:\n >>> rh_rls_content = '''\n ... Red Hat Enterprise Linux Server release 7.2 (Maipo)\n ... '''.strip()\n >>> from insights.tests import context_wrap\n >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))}\n >>> release = shared[RedhatRelease]\n >>> assert release.raw == rh_rls_content\n >>> assert release.major == 7\n >>> assert release.minor == 2\n >>> assert release.version == \"7.2\"\n >>> assert release.is_rhel\n >>> assert release.product == \"Red Hat Enterprise Linux Server\"\n\"\"\"\nfrom .. import Parser, parser\nfrom ..specs import Specs\n\n\n@parser(Specs.redhat_release)\nclass RedhatRelease(Parser):\n \"\"\"Parses the content of file ``/etc/redhat-release``.\"\"\"\n\n def parse_content(self, content):\n self.raw = content[0]\n product, _, version_name = [v.strip() for v in content[0].partition(\"release\")]\n version_name_split = [v.strip() for v in version_name.split(None, 1)]\n code_name = (version_name_split[1].strip(\"()\")\n if len(version_name_split) > 1 else None)\n self.parsed = {\n \"product\": product,\n \"version\": version_name_split[0],\n \"code_name\": code_name\n }\n\n @property\n def major(self):\n \"\"\"int: the major version of this OS.\"\"\"\n return int(self.parsed[\"version\"].split(\".\")[0])\n\n @property\n def minor(self):\n \"\"\"int: the minor version of this OS.\"\"\"\n s = self.parsed[\"version\"].split(\".\")\n if len(s) > 1:\n return int(s[1])\n\n @property\n def version(self):\n \"\"\"string: version of this OS.\"\"\"\n return self.parsed[\"version\"]\n\n @property\n def is_rhel(self):\n \"\"\"bool: True if this OS belong to RHEL, else False.\"\"\"\n return \"Red Hat Enterprise Linux\" in self.parsed[\"product\"]\n\n @property\n def product(self):\n \"\"\"string: product of this OS.\"\"\"\n return self.parsed[\"product\"]\n", "path": "insights/parsers/redhat_release.py"}], "after_files": [{"content": "\"\"\"\nredhat-release - File ``/etc/redhat-release``\n=============================================\n\nThis module provides plugins access to file ``/etc/redhat-release``\n\nTypical content of file ``/etc/redhat-release`` is::\n\n Red Hat Enterprise Linux Server release 7.2 (Maipo)\n\nThis module parses the file content and stores data in the dict ``self.parsed``.\nThe version info can also be get via ``obj.major`` and ``obj.minor``.\nProperty ``is_rhel`` and ``is_hypervisor`` specifies the host type.\n\nExamples:\n >>> rh_rls_content = '''\n ... Red Hat Enterprise Linux Server release 7.2 (Maipo)\n ... '''.strip()\n >>> from insights.tests import context_wrap\n >>> shared = {RedhatRelease: RedhatRelease(context_wrap(rh_rls_content))}\n >>> release = shared[RedhatRelease]\n >>> assert release.raw == rh_rls_content\n >>> assert release.major == 7\n >>> assert release.minor == 2\n >>> assert release.version == \"7.2\"\n >>> assert release.is_rhel\n >>> assert release.product == \"Red Hat Enterprise Linux Server\"\n\"\"\"\nfrom .. import Parser, parser\nfrom ..specs import Specs\n\n\n@parser(Specs.redhat_release)\nclass RedhatRelease(Parser):\n \"\"\"Parses the content of file ``/etc/redhat-release``.\"\"\"\n\n def parse_content(self, content):\n self.raw = content[0]\n product, _, version_name = [v.strip() for v in content[0].partition(\"release\")]\n version_name_split = [v.strip() for v in version_name.split(None, 1)]\n code_name = (version_name_split[1].strip(\"()\")\n if len(version_name_split) > 1 else None)\n self.parsed = {\n \"product\": product,\n \"version\": version_name_split[0],\n \"code_name\": code_name\n }\n\n @property\n def major(self):\n \"\"\"int: the major version of this OS.\"\"\"\n return int(self.parsed[\"version\"].split(\".\")[0])\n\n @property\n def minor(self):\n \"\"\"int: the minor version of this OS.\"\"\"\n s = self.parsed[\"version\"].split(\"-\", 1)[0].split(\".\")\n if len(s) > 1:\n return int(s[1])\n\n @property\n def version(self):\n \"\"\"string: version of this OS.\"\"\"\n return self.parsed[\"version\"]\n\n @property\n def is_rhel(self):\n \"\"\"bool: True if this OS belong to RHEL, else False.\"\"\"\n return \"Red Hat Enterprise Linux\" in self.parsed[\"product\"]\n\n @property\n def product(self):\n \"\"\"string: product of this OS.\"\"\"\n return self.parsed[\"product\"]\n", "path": "insights/parsers/redhat_release.py"}]}
1,228
129
gh_patches_debug_10667
rasdani/github-patches
git_diff
saleor__saleor-730
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- no_query_found method is missing. Hello, the user gets an error if he submits the search form without a query. There is not "no_query_found" method inside SearchForm. https://github.com/mirumee/saleor/blob/35956e20a85b66abc95e08491f38fa69d74d5f12/saleor/search/views.py#L28 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/search/views.py` Content: ``` 1 from __future__ import unicode_literals 2 3 from django.core.paginator import Paginator, InvalidPage 4 from django.conf import settings 5 from django.http import Http404 6 from django.shortcuts import render 7 from .forms import SearchForm 8 from ..product.utils import products_with_details 9 10 11 def paginate_results(results, get_data, paginate_by=25): 12 paginator = Paginator(results, paginate_by) 13 page_number = get_data.get('page', 1) 14 try: 15 page = paginator.page(page_number) 16 except InvalidPage: 17 raise Http404('No such page!') 18 return page 19 20 21 def search(request): 22 form = SearchForm(data=request.GET or None) 23 if form.is_valid(): 24 visible_products = products_with_details(request.user) 25 results = form.search(model_or_queryset=visible_products) 26 page = paginate_results(results, request.GET, settings.PAGINATE_BY) 27 else: 28 page = form.no_query_found() 29 query = form.cleaned_data['q'] 30 ctx = { 31 'query': query, 32 'results': page, 33 'query_string': '?q=%s' % query} 34 return render(request, 'search/results.html', ctx) 35 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/search/views.py b/saleor/search/views.py --- a/saleor/search/views.py +++ b/saleor/search/views.py @@ -23,10 +23,11 @@ if form.is_valid(): visible_products = products_with_details(request.user) results = form.search(model_or_queryset=visible_products) - page = paginate_results(results, request.GET, settings.PAGINATE_BY) + query = form.cleaned_data.get('q', '') else: - page = form.no_query_found() - query = form.cleaned_data['q'] + results = [] + query = '' + page = paginate_results(results, request.GET, settings.PAGINATE_BY) ctx = { 'query': query, 'results': page,
{"golden_diff": "diff --git a/saleor/search/views.py b/saleor/search/views.py\n--- a/saleor/search/views.py\n+++ b/saleor/search/views.py\n@@ -23,10 +23,11 @@\n if form.is_valid():\n visible_products = products_with_details(request.user)\n results = form.search(model_or_queryset=visible_products)\n- page = paginate_results(results, request.GET, settings.PAGINATE_BY)\n+ query = form.cleaned_data.get('q', '')\n else:\n- page = form.no_query_found()\n- query = form.cleaned_data['q']\n+ results = []\n+ query = ''\n+ page = paginate_results(results, request.GET, settings.PAGINATE_BY)\n ctx = {\n 'query': query,\n 'results': page,\n", "issue": "no_query_found method is missing.\nHello,\r\nthe user gets an error if he submits the search form without a query.\r\nThere is not \"no_query_found\" method inside SearchForm.\r\n\r\nhttps://github.com/mirumee/saleor/blob/35956e20a85b66abc95e08491f38fa69d74d5f12/saleor/search/views.py#L28\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.core.paginator import Paginator, InvalidPage\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom .forms import SearchForm\nfrom ..product.utils import products_with_details\n\n\ndef paginate_results(results, get_data, paginate_by=25):\n paginator = Paginator(results, paginate_by)\n page_number = get_data.get('page', 1)\n try:\n page = paginator.page(page_number)\n except InvalidPage:\n raise Http404('No such page!')\n return page\n\n\ndef search(request):\n form = SearchForm(data=request.GET or None)\n if form.is_valid():\n visible_products = products_with_details(request.user)\n results = form.search(model_or_queryset=visible_products)\n page = paginate_results(results, request.GET, settings.PAGINATE_BY)\n else:\n page = form.no_query_found()\n query = form.cleaned_data['q']\n ctx = {\n 'query': query,\n 'results': page,\n 'query_string': '?q=%s' % query}\n return render(request, 'search/results.html', ctx)\n", "path": "saleor/search/views.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom django.core.paginator import Paginator, InvalidPage\nfrom django.conf import settings\nfrom django.http import Http404\nfrom django.shortcuts import render\nfrom .forms import SearchForm\nfrom ..product.utils import products_with_details\n\n\ndef paginate_results(results, get_data, paginate_by=25):\n paginator = Paginator(results, paginate_by)\n page_number = get_data.get('page', 1)\n try:\n page = paginator.page(page_number)\n except InvalidPage:\n raise Http404('No such page!')\n return page\n\n\ndef search(request):\n form = SearchForm(data=request.GET or None)\n if form.is_valid():\n visible_products = products_with_details(request.user)\n results = form.search(model_or_queryset=visible_products)\n query = form.cleaned_data.get('q', '')\n else:\n results = []\n query = ''\n page = paginate_results(results, request.GET, settings.PAGINATE_BY)\n ctx = {\n 'query': query,\n 'results': page,\n 'query_string': '?q=%s' % query}\n return render(request, 'search/results.html', ctx)\n", "path": "saleor/search/views.py"}]}
668
174
gh_patches_debug_11214
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-2737
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py` Content: ``` 1 from dataclasses import dataclass 2 from torch.fx.node import Node 3 from torch.fx.graph import Graph 4 from torch.fx.graph_module import GraphModule 5 from collections import OrderedDict as ODict 6 from typing import List, OrderedDict, Union, Any 7 from colossalai.fx.passes.utils import get_node_module 8 9 __all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser'] 10 11 12 @dataclass 13 class LiveVariable: 14 """ 15 LiveVariable is a data structure to store the meta information of a variable for liveness analysis. 16 """ 17 name: str 18 node: Node 19 is_inplace: bool 20 21 22 class LiveVariableVector(list): 23 """ 24 LiveVariableVector is a data structure to store the list of LiveVariable objects. 25 """ 26 27 def exists(self, name) -> bool: 28 """ 29 Check if a variable has already existed in the current list by name. 30 """ 31 for var in self: 32 if name == var.name: 33 return True 34 return False 35 36 def get(self, name) -> LiveVariable: 37 for var in self: 38 if name == var.name: 39 return var 40 raise KeyError(f"Variable {name} is not found") 41 42 def copy(self) -> "LiveVariableVector": 43 """ 44 Create a copy of this vector 45 """ 46 vector = LiveVariableVector() 47 for var in self: 48 vector.append(var) 49 return vector 50 51 52 @dataclass 53 class LiveStage: 54 """ 55 LiveStage is a data structure to record the living variables at this current node. 56 """ 57 name: str 58 node: Node 59 all_live_vars: LiveVariableVector 60 unique_live_vars: LiveVariableVector 61 62 63 class GraphAnalyser: 64 65 def __init__(self, gm: GraphModule): 66 self._gm = gm 67 self._graph = gm.graph 68 69 @property 70 def gm(self) -> GraphModule: 71 """ 72 Return the GraphModule object associated with this analyser. 73 """ 74 return self._gm 75 76 @property 77 def graph(self) -> Graph: 78 """ 79 Return the Graph object associated with this analyser. 80 """ 81 return self._graph 82 83 def liveness_analysis(self) -> List[LiveStage]: 84 """ 85 Analyse the graph to obtain the variable liveness information. This function returns 86 an ordered dictionary where the key is the compute stage ID and the value is a LivenessStage object. 87 """ 88 compute_nodes = self.graph.nodes 89 liveness_list = [] 90 91 # checked: record all variables created since the first stage 92 # all: record the live variables only exist until the current stage. 93 # this can be different from the `checked list`` as some varialbes may be destroyed prior to this stage. 94 # unique: record the unique live variables only exist until the current stage. 95 # this is different from `all list` as some variables are duplicated. 96 checked_variables = LiveVariableVector() 97 all_live_variables = LiveVariableVector() 98 unique_live_vars = LiveVariableVector() 99 100 for idx, node in enumerate(compute_nodes): 101 ############################# 102 # find new living variables # 103 ############################# 104 # detect whether the current op is an in-place op 105 # if it is an in-place op, we would deem it as a duplciate var 106 is_inplace = False 107 if node.op == 'call_function': 108 # check if this is an inplace op such as torch.nn.functional.relu(x, inplace=True) 109 if node.kwargs.get('inplace', False): 110 is_inplace = True 111 elif node.op == 'call_module': 112 # to check if this is an inplace op such as torch.nn.Relu(inplace=True) 113 module = get_node_module(node) 114 if getattr(module, 'inplace', False): 115 is_inplace = True 116 117 # add the output var 118 meta = getattr(node, '_meta_data', None) 119 live_var = LiveVariable(name=node.name, node=node, is_inplace=is_inplace) 120 if not is_inplace: 121 unique_live_vars.append(live_var) 122 checked_variables.append(live_var) 123 all_live_variables.append(live_var) 124 125 # check if any input is not checked yet 126 for arg in node.args: 127 if not isinstance(arg, Node): 128 continue 129 arg_name = arg.name 130 if not checked_variables.exists(arg_name): 131 live_var_from_arg = LiveVariable(name=arg_name, node=node, is_inplace=False) 132 all_live_variables.append(live_var_from_arg) 133 checked_variables.append(live_var_from_arg) 134 unique_live_vars.append(live_var_from_arg) 135 136 # TODO: add the logic to remove live variables 137 # this should be completed if we are able to trace the backward compute graph 138 139 # add this stage to liveness dict 140 stage = LiveStage(name=node.name, 141 node=node, 142 all_live_vars=all_live_variables.copy(), 143 unique_live_vars=unique_live_vars.copy()) 144 # if a LiveStage is covered by another LiveStage, we just keep the larger one. 145 replace = False 146 for index, prev_stage in enumerate(liveness_list): 147 all_covered = True 148 for ele in prev_stage.unique_live_vars: 149 if ele not in stage.unique_live_vars: 150 all_covered = False 151 break 152 if all_covered: 153 replace = True 154 break 155 if replace: 156 liveness_list[index] = stage 157 else: 158 liveness_list.append(stage) 159 160 return liveness_list 161 162 def get_alias_set(self): 163 pass 164 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py b/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py --- a/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py +++ b/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py @@ -1,9 +1,11 @@ +from collections import OrderedDict as ODict from dataclasses import dataclass -from torch.fx.node import Node +from typing import Any, List, OrderedDict, Union + from torch.fx.graph import Graph from torch.fx.graph_module import GraphModule -from collections import OrderedDict as ODict -from typing import List, OrderedDict, Union, Any +from torch.fx.node import Node + from colossalai.fx.passes.utils import get_node_module __all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']
{"golden_diff": "diff --git a/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py b/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py\n--- a/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py\n+++ b/colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py\n@@ -1,9 +1,11 @@\n+from collections import OrderedDict as ODict\n from dataclasses import dataclass\n-from torch.fx.node import Node\n+from typing import Any, List, OrderedDict, Union\n+\n from torch.fx.graph import Graph\n from torch.fx.graph_module import GraphModule\n-from collections import OrderedDict as ODict\n-from typing import List, OrderedDict, Union, Any\n+from torch.fx.node import Node\n+\n from colossalai.fx.passes.utils import get_node_module\n \n __all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom torch.fx.node import Node\nfrom torch.fx.graph import Graph\nfrom torch.fx.graph_module import GraphModule\nfrom collections import OrderedDict as ODict\nfrom typing import List, OrderedDict, Union, Any\nfrom colossalai.fx.passes.utils import get_node_module\n\n__all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']\n\n\n@dataclass\nclass LiveVariable:\n \"\"\"\n LiveVariable is a data structure to store the meta information of a variable for liveness analysis.\n \"\"\"\n name: str\n node: Node\n is_inplace: bool\n\n\nclass LiveVariableVector(list):\n \"\"\"\n LiveVariableVector is a data structure to store the list of LiveVariable objects.\n \"\"\"\n\n def exists(self, name) -> bool:\n \"\"\"\n Check if a variable has already existed in the current list by name.\n \"\"\"\n for var in self:\n if name == var.name:\n return True\n return False\n\n def get(self, name) -> LiveVariable:\n for var in self:\n if name == var.name:\n return var\n raise KeyError(f\"Variable {name} is not found\")\n\n def copy(self) -> \"LiveVariableVector\":\n \"\"\"\n Create a copy of this vector\n \"\"\"\n vector = LiveVariableVector()\n for var in self:\n vector.append(var)\n return vector\n\n\n@dataclass\nclass LiveStage:\n \"\"\"\n LiveStage is a data structure to record the living variables at this current node.\n \"\"\"\n name: str\n node: Node\n all_live_vars: LiveVariableVector\n unique_live_vars: LiveVariableVector\n\n\nclass GraphAnalyser:\n\n def __init__(self, gm: GraphModule):\n self._gm = gm\n self._graph = gm.graph\n\n @property\n def gm(self) -> GraphModule:\n \"\"\"\n Return the GraphModule object associated with this analyser.\n \"\"\"\n return self._gm\n\n @property\n def graph(self) -> Graph:\n \"\"\"\n Return the Graph object associated with this analyser.\n \"\"\"\n return self._graph\n\n def liveness_analysis(self) -> List[LiveStage]:\n \"\"\"\n Analyse the graph to obtain the variable liveness information. This function returns\n an ordered dictionary where the key is the compute stage ID and the value is a LivenessStage object.\n \"\"\"\n compute_nodes = self.graph.nodes\n liveness_list = []\n\n # checked: record all variables created since the first stage\n # all: record the live variables only exist until the current stage.\n # this can be different from the `checked list`` as some varialbes may be destroyed prior to this stage.\n # unique: record the unique live variables only exist until the current stage.\n # this is different from `all list` as some variables are duplicated.\n checked_variables = LiveVariableVector()\n all_live_variables = LiveVariableVector()\n unique_live_vars = LiveVariableVector()\n\n for idx, node in enumerate(compute_nodes):\n #############################\n # find new living variables #\n #############################\n # detect whether the current op is an in-place op\n # if it is an in-place op, we would deem it as a duplciate var\n is_inplace = False\n if node.op == 'call_function':\n # check if this is an inplace op such as torch.nn.functional.relu(x, inplace=True)\n if node.kwargs.get('inplace', False):\n is_inplace = True\n elif node.op == 'call_module':\n # to check if this is an inplace op such as torch.nn.Relu(inplace=True)\n module = get_node_module(node)\n if getattr(module, 'inplace', False):\n is_inplace = True\n\n # add the output var\n meta = getattr(node, '_meta_data', None)\n live_var = LiveVariable(name=node.name, node=node, is_inplace=is_inplace)\n if not is_inplace:\n unique_live_vars.append(live_var)\n checked_variables.append(live_var)\n all_live_variables.append(live_var)\n\n # check if any input is not checked yet\n for arg in node.args:\n if not isinstance(arg, Node):\n continue\n arg_name = arg.name\n if not checked_variables.exists(arg_name):\n live_var_from_arg = LiveVariable(name=arg_name, node=node, is_inplace=False)\n all_live_variables.append(live_var_from_arg)\n checked_variables.append(live_var_from_arg)\n unique_live_vars.append(live_var_from_arg)\n\n # TODO: add the logic to remove live variables\n # this should be completed if we are able to trace the backward compute graph\n\n # add this stage to liveness dict\n stage = LiveStage(name=node.name,\n node=node,\n all_live_vars=all_live_variables.copy(),\n unique_live_vars=unique_live_vars.copy())\n # if a LiveStage is covered by another LiveStage, we just keep the larger one.\n replace = False\n for index, prev_stage in enumerate(liveness_list):\n all_covered = True\n for ele in prev_stage.unique_live_vars:\n if ele not in stage.unique_live_vars:\n all_covered = False\n break\n if all_covered:\n replace = True\n break\n if replace:\n liveness_list[index] = stage\n else:\n liveness_list.append(stage)\n\n return liveness_list\n\n def get_alias_set(self):\n pass\n", "path": "colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py"}], "after_files": [{"content": "from collections import OrderedDict as ODict\nfrom dataclasses import dataclass\nfrom typing import Any, List, OrderedDict, Union\n\nfrom torch.fx.graph import Graph\nfrom torch.fx.graph_module import GraphModule\nfrom torch.fx.node import Node\n\nfrom colossalai.fx.passes.utils import get_node_module\n\n__all__ = ['LiveVariable', 'LiveVariableVector', 'LiveStage', 'GraphAnalyser']\n\n\n@dataclass\nclass LiveVariable:\n \"\"\"\n LiveVariable is a data structure to store the meta information of a variable for liveness analysis.\n \"\"\"\n name: str\n node: Node\n is_inplace: bool\n\n\nclass LiveVariableVector(list):\n \"\"\"\n LiveVariableVector is a data structure to store the list of LiveVariable objects.\n \"\"\"\n\n def exists(self, name) -> bool:\n \"\"\"\n Check if a variable has already existed in the current list by name.\n \"\"\"\n for var in self:\n if name == var.name:\n return True\n return False\n\n def get(self, name) -> LiveVariable:\n for var in self:\n if name == var.name:\n return var\n raise KeyError(f\"Variable {name} is not found\")\n\n def copy(self) -> \"LiveVariableVector\":\n \"\"\"\n Create a copy of this vector\n \"\"\"\n vector = LiveVariableVector()\n for var in self:\n vector.append(var)\n return vector\n\n\n@dataclass\nclass LiveStage:\n \"\"\"\n LiveStage is a data structure to record the living variables at this current node.\n \"\"\"\n name: str\n node: Node\n all_live_vars: LiveVariableVector\n unique_live_vars: LiveVariableVector\n\n\nclass GraphAnalyser:\n\n def __init__(self, gm: GraphModule):\n self._gm = gm\n self._graph = gm.graph\n\n @property\n def gm(self) -> GraphModule:\n \"\"\"\n Return the GraphModule object associated with this analyser.\n \"\"\"\n return self._gm\n\n @property\n def graph(self) -> Graph:\n \"\"\"\n Return the Graph object associated with this analyser.\n \"\"\"\n return self._graph\n\n def liveness_analysis(self) -> List[LiveStage]:\n \"\"\"\n Analyse the graph to obtain the variable liveness information. This function returns\n an ordered dictionary where the key is the compute stage ID and the value is a LivenessStage object.\n \"\"\"\n compute_nodes = self.graph.nodes\n liveness_list = []\n\n # checked: record all variables created since the first stage\n # all: record the live variables only exist until the current stage.\n # this can be different from the `checked list`` as some varialbes may be destroyed prior to this stage.\n # unique: record the unique live variables only exist until the current stage.\n # this is different from `all list` as some variables are duplicated.\n checked_variables = LiveVariableVector()\n all_live_variables = LiveVariableVector()\n unique_live_vars = LiveVariableVector()\n\n for idx, node in enumerate(compute_nodes):\n #############################\n # find new living variables #\n #############################\n # detect whether the current op is an in-place op\n # if it is an in-place op, we would deem it as a duplciate var\n is_inplace = False\n if node.op == 'call_function':\n # check if this is an inplace op such as torch.nn.functional.relu(x, inplace=True)\n if node.kwargs.get('inplace', False):\n is_inplace = True\n elif node.op == 'call_module':\n # to check if this is an inplace op such as torch.nn.Relu(inplace=True)\n module = get_node_module(node)\n if getattr(module, 'inplace', False):\n is_inplace = True\n\n # add the output var\n meta = getattr(node, '_meta_data', None)\n live_var = LiveVariable(name=node.name, node=node, is_inplace=is_inplace)\n if not is_inplace:\n unique_live_vars.append(live_var)\n checked_variables.append(live_var)\n all_live_variables.append(live_var)\n\n # check if any input is not checked yet\n for arg in node.args:\n if not isinstance(arg, Node):\n continue\n arg_name = arg.name\n if not checked_variables.exists(arg_name):\n live_var_from_arg = LiveVariable(name=arg_name, node=node, is_inplace=False)\n all_live_variables.append(live_var_from_arg)\n checked_variables.append(live_var_from_arg)\n unique_live_vars.append(live_var_from_arg)\n\n # TODO: add the logic to remove live variables\n # this should be completed if we are able to trace the backward compute graph\n\n # add this stage to liveness dict\n stage = LiveStage(name=node.name,\n node=node,\n all_live_vars=all_live_variables.copy(),\n unique_live_vars=unique_live_vars.copy())\n # if a LiveStage is covered by another LiveStage, we just keep the larger one.\n replace = False\n for index, prev_stage in enumerate(liveness_list):\n all_covered = True\n for ele in prev_stage.unique_live_vars:\n if ele not in stage.unique_live_vars:\n all_covered = False\n break\n if all_covered:\n replace = True\n break\n if replace:\n liveness_list[index] = stage\n else:\n liveness_list.append(stage)\n\n return liveness_list\n\n def get_alias_set(self):\n pass\n", "path": "colossalai/auto_parallel/tensor_shard/deprecated/graph_analysis.py"}]}
1,872
197
gh_patches_debug_36427
rasdani/github-patches
git_diff
strawberry-graphql__strawberry-1116
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Type resolver not called when extending from pydantic model I have 2 types, Category and Articles, and I want the Category type to return its related Articles(I have tried with other simpler types too), but when defining a resolver in the Category type its not being called: ```python def articles_helper(root): return [] # <-whatever I return here, it always shows null @strawberry.experimental.pydantic.type( model=Category, fields=[ 'title', 'description', 'content', 'template', 'slug', 'date_published', 'date_updated', ]) class CategoryType(Category): id: int articles: Optional[List[ArticleType]] = strawberry.field(resolver=articles_helper) # <- resolver not called ``` Query type: ```python @strawberry.type() class Query: category: Optional[CategoryType] = field(resolver=get_category) # this resolver is OK ``` Always returns `null`: ``` Query: { category(categoryId: 1) { id title articles { title } } } Response: { "data": { "category": { "id": 1, "title": "test", "articles": null <- always } } } ``` **I works when I don't extend from a pydantic model** --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `strawberry/experimental/pydantic/object_type.py` Content: ``` 1 import builtins 2 import dataclasses 3 from functools import partial 4 from typing import Any, Dict, List, Optional, Type 5 6 from pydantic import BaseModel 7 from pydantic.fields import ModelField 8 9 from strawberry.arguments import UNSET 10 from strawberry.experimental.pydantic.conversion import ( 11 convert_pydantic_model_to_strawberry_class, 12 ) 13 from strawberry.experimental.pydantic.fields import get_basic_type 14 from strawberry.field import StrawberryField 15 from strawberry.object_type import _process_type 16 from strawberry.types.types import FederationTypeParams, TypeDefinition 17 18 from .exceptions import MissingFieldsListError, UnregisteredTypeException 19 20 21 def replace_pydantic_types(type_: Any): 22 if hasattr(type_, "__args__"): 23 new_type = type_.copy_with( 24 tuple(replace_pydantic_types(t) for t in type_.__args__) 25 ) 26 27 if isinstance(new_type, TypeDefinition): 28 # TODO: Not sure if this is necessary. No coverage in tests 29 # TODO: Unnecessary with StrawberryObject 30 31 new_type = builtins.type( 32 new_type.name, 33 (), 34 {"_type_definition": new_type}, 35 ) 36 37 return new_type 38 39 if issubclass(type_, BaseModel): 40 if hasattr(type_, "_strawberry_type"): 41 return type_._strawberry_type 42 else: 43 raise UnregisteredTypeException(type_) 44 45 return type_ 46 47 48 def get_type_for_field(field: ModelField): 49 type_ = field.outer_type_ 50 type_ = get_basic_type(type_) 51 type_ = replace_pydantic_types(type_) 52 53 if not field.required: 54 type_ = Optional[type_] 55 56 return type_ 57 58 59 def type( 60 model: Type[BaseModel], 61 *, 62 fields: List[str], 63 name: Optional[str] = None, 64 is_input: bool = False, 65 is_interface: bool = False, 66 description: Optional[str] = None, 67 federation: Optional[FederationTypeParams] = None, 68 ): 69 def wrap(cls): 70 if not fields: 71 raise MissingFieldsListError(model) 72 73 model_fields = model.__fields__ 74 fields_set = set(fields) 75 76 all_fields = [ 77 ( 78 name, 79 get_type_for_field(field), 80 StrawberryField( 81 python_name=field.name, 82 graphql_name=field.alias if field.has_alias else None, 83 default=field.default if not field.required else UNSET, 84 default_factory=( 85 field.default_factory if field.default_factory else UNSET 86 ), 87 type_annotation=get_type_for_field(field), 88 ), 89 ) 90 for name, field in model_fields.items() 91 if name in fields_set 92 ] 93 94 cls_annotations = getattr(cls, "__annotations__", {}) 95 all_fields.extend( 96 ( 97 ( 98 name, 99 type_, 100 StrawberryField( 101 python_name=name, 102 graphql_name=None, 103 type_annotation=type_, 104 # we need a default value when adding additional fields 105 # on top of a type generated from Pydantic, this is because 106 # Pydantic Optional fields always have None as default value 107 # which breaks dataclasses generation; as we can't define 108 # a field without a default value after one with a default value 109 # adding fields at the beginning won't work as we will also 110 # support default values on them (so the problem will be just 111 # shifted around) 112 default=None, 113 ), 114 ) 115 for name, type_ in cls_annotations.items() 116 ) 117 ) 118 119 cls = dataclasses.make_dataclass( 120 cls.__name__, 121 all_fields, 122 ) 123 124 _process_type( 125 cls, 126 name=name, 127 is_input=is_input, 128 is_interface=is_interface, 129 description=description, 130 federation=federation, 131 ) 132 133 model._strawberry_type = cls # type: ignore 134 135 def from_pydantic(instance: Any, extra: Dict[str, Any] = None) -> Any: 136 return convert_pydantic_model_to_strawberry_class( 137 cls=cls, model_instance=instance, extra=extra 138 ) 139 140 def to_pydantic(self) -> Any: 141 instance_kwargs = dataclasses.asdict(self) 142 143 return model(**instance_kwargs) 144 145 cls.from_pydantic = staticmethod(from_pydantic) 146 cls.to_pydantic = to_pydantic 147 148 return cls 149 150 return wrap 151 152 153 input = partial(type, is_input=True) 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/strawberry/experimental/pydantic/object_type.py b/strawberry/experimental/pydantic/object_type.py --- a/strawberry/experimental/pydantic/object_type.py +++ b/strawberry/experimental/pydantic/object_type.py @@ -12,7 +12,8 @@ ) from strawberry.experimental.pydantic.fields import get_basic_type from strawberry.field import StrawberryField -from strawberry.object_type import _process_type +from strawberry.object_type import _process_type, _wrap_dataclass +from strawberry.types.type_resolver import _get_fields from strawberry.types.types import FederationTypeParams, TypeDefinition from .exceptions import MissingFieldsListError, UnregisteredTypeException @@ -91,34 +92,36 @@ if name in fields_set ] - cls_annotations = getattr(cls, "__annotations__", {}) + wrapped = _wrap_dataclass(cls) + extra_fields = _get_fields(wrapped) + all_fields.extend( ( ( - name, - type_, - StrawberryField( - python_name=name, - graphql_name=None, - type_annotation=type_, - # we need a default value when adding additional fields - # on top of a type generated from Pydantic, this is because - # Pydantic Optional fields always have None as default value - # which breaks dataclasses generation; as we can't define - # a field without a default value after one with a default value - # adding fields at the beginning won't work as we will also - # support default values on them (so the problem will be just - # shifted around) - default=None, - ), + field.name, + field.type, + field, ) - for name, type_ in cls_annotations.items() + for field in extra_fields ) ) + # Sort fields so that fields with missing defaults go first + # because dataclasses require that fields with no defaults are defined + # first + missing_default = [] + has_default = [] + for field in all_fields: + if field[2].default is dataclasses.MISSING: + missing_default.append(field) + else: + has_default.append(field) + + sorted_fields = missing_default + has_default + cls = dataclasses.make_dataclass( cls.__name__, - all_fields, + sorted_fields, ) _process_type(
{"golden_diff": "diff --git a/strawberry/experimental/pydantic/object_type.py b/strawberry/experimental/pydantic/object_type.py\n--- a/strawberry/experimental/pydantic/object_type.py\n+++ b/strawberry/experimental/pydantic/object_type.py\n@@ -12,7 +12,8 @@\n )\n from strawberry.experimental.pydantic.fields import get_basic_type\n from strawberry.field import StrawberryField\n-from strawberry.object_type import _process_type\n+from strawberry.object_type import _process_type, _wrap_dataclass\n+from strawberry.types.type_resolver import _get_fields\n from strawberry.types.types import FederationTypeParams, TypeDefinition\n \n from .exceptions import MissingFieldsListError, UnregisteredTypeException\n@@ -91,34 +92,36 @@\n if name in fields_set\n ]\n \n- cls_annotations = getattr(cls, \"__annotations__\", {})\n+ wrapped = _wrap_dataclass(cls)\n+ extra_fields = _get_fields(wrapped)\n+\n all_fields.extend(\n (\n (\n- name,\n- type_,\n- StrawberryField(\n- python_name=name,\n- graphql_name=None,\n- type_annotation=type_,\n- # we need a default value when adding additional fields\n- # on top of a type generated from Pydantic, this is because\n- # Pydantic Optional fields always have None as default value\n- # which breaks dataclasses generation; as we can't define\n- # a field without a default value after one with a default value\n- # adding fields at the beginning won't work as we will also\n- # support default values on them (so the problem will be just\n- # shifted around)\n- default=None,\n- ),\n+ field.name,\n+ field.type,\n+ field,\n )\n- for name, type_ in cls_annotations.items()\n+ for field in extra_fields\n )\n )\n \n+ # Sort fields so that fields with missing defaults go first\n+ # because dataclasses require that fields with no defaults are defined\n+ # first\n+ missing_default = []\n+ has_default = []\n+ for field in all_fields:\n+ if field[2].default is dataclasses.MISSING:\n+ missing_default.append(field)\n+ else:\n+ has_default.append(field)\n+\n+ sorted_fields = missing_default + has_default\n+\n cls = dataclasses.make_dataclass(\n cls.__name__,\n- all_fields,\n+ sorted_fields,\n )\n \n _process_type(\n", "issue": "Type resolver not called when extending from pydantic model\nI have 2 types, Category and Articles, and I want the Category type to return its related Articles(I have tried with other simpler types too), but when defining a resolver in the Category type its not being called:\r\n\r\n```python\r\ndef articles_helper(root): \r\n return [] # <-whatever I return here, it always shows null\r\n \r\n \r\[email protected](\r\n model=Category,\r\n fields=[\r\n 'title',\r\n 'description',\r\n 'content',\r\n 'template',\r\n 'slug',\r\n 'date_published',\r\n 'date_updated',\r\n ])\r\nclass CategoryType(Category):\r\n id: int\r\n articles: Optional[List[ArticleType]] = strawberry.field(resolver=articles_helper) # <- resolver not called\r\n``` \r\n\r\nQuery type:\r\n```python\r\[email protected]()\r\nclass Query:\r\n category: Optional[CategoryType] = field(resolver=get_category) # this resolver is OK\r\n```\r\n\r\nAlways returns `null`:\r\n```\r\nQuery:\r\n\r\n{\r\n category(categoryId: 1) {\r\n id\r\n title\r\n articles {\r\n title\r\n }\r\n }\r\n}\r\n\r\nResponse:\r\n\r\n{\r\n \"data\": {\r\n \"category\": {\r\n \"id\": 1,\r\n \"title\": \"test\",\r\n \"articles\": null <- always\r\n }\r\n }\r\n}\r\n```\r\n\r\n**I works when I don't extend from a pydantic model**\n", "before_files": [{"content": "import builtins\nimport dataclasses\nfrom functools import partial\nfrom typing import Any, Dict, List, Optional, Type\n\nfrom pydantic import BaseModel\nfrom pydantic.fields import ModelField\n\nfrom strawberry.arguments import UNSET\nfrom strawberry.experimental.pydantic.conversion import (\n convert_pydantic_model_to_strawberry_class,\n)\nfrom strawberry.experimental.pydantic.fields import get_basic_type\nfrom strawberry.field import StrawberryField\nfrom strawberry.object_type import _process_type\nfrom strawberry.types.types import FederationTypeParams, TypeDefinition\n\nfrom .exceptions import MissingFieldsListError, UnregisteredTypeException\n\n\ndef replace_pydantic_types(type_: Any):\n if hasattr(type_, \"__args__\"):\n new_type = type_.copy_with(\n tuple(replace_pydantic_types(t) for t in type_.__args__)\n )\n\n if isinstance(new_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n\n new_type = builtins.type(\n new_type.name,\n (),\n {\"_type_definition\": new_type},\n )\n\n return new_type\n\n if issubclass(type_, BaseModel):\n if hasattr(type_, \"_strawberry_type\"):\n return type_._strawberry_type\n else:\n raise UnregisteredTypeException(type_)\n\n return type_\n\n\ndef get_type_for_field(field: ModelField):\n type_ = field.outer_type_\n type_ = get_basic_type(type_)\n type_ = replace_pydantic_types(type_)\n\n if not field.required:\n type_ = Optional[type_]\n\n return type_\n\n\ndef type(\n model: Type[BaseModel],\n *,\n fields: List[str],\n name: Optional[str] = None,\n is_input: bool = False,\n is_interface: bool = False,\n description: Optional[str] = None,\n federation: Optional[FederationTypeParams] = None,\n):\n def wrap(cls):\n if not fields:\n raise MissingFieldsListError(model)\n\n model_fields = model.__fields__\n fields_set = set(fields)\n\n all_fields = [\n (\n name,\n get_type_for_field(field),\n StrawberryField(\n python_name=field.name,\n graphql_name=field.alias if field.has_alias else None,\n default=field.default if not field.required else UNSET,\n default_factory=(\n field.default_factory if field.default_factory else UNSET\n ),\n type_annotation=get_type_for_field(field),\n ),\n )\n for name, field in model_fields.items()\n if name in fields_set\n ]\n\n cls_annotations = getattr(cls, \"__annotations__\", {})\n all_fields.extend(\n (\n (\n name,\n type_,\n StrawberryField(\n python_name=name,\n graphql_name=None,\n type_annotation=type_,\n # we need a default value when adding additional fields\n # on top of a type generated from Pydantic, this is because\n # Pydantic Optional fields always have None as default value\n # which breaks dataclasses generation; as we can't define\n # a field without a default value after one with a default value\n # adding fields at the beginning won't work as we will also\n # support default values on them (so the problem will be just\n # shifted around)\n default=None,\n ),\n )\n for name, type_ in cls_annotations.items()\n )\n )\n\n cls = dataclasses.make_dataclass(\n cls.__name__,\n all_fields,\n )\n\n _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n federation=federation,\n )\n\n model._strawberry_type = cls # type: ignore\n\n def from_pydantic(instance: Any, extra: Dict[str, Any] = None) -> Any:\n return convert_pydantic_model_to_strawberry_class(\n cls=cls, model_instance=instance, extra=extra\n )\n\n def to_pydantic(self) -> Any:\n instance_kwargs = dataclasses.asdict(self)\n\n return model(**instance_kwargs)\n\n cls.from_pydantic = staticmethod(from_pydantic)\n cls.to_pydantic = to_pydantic\n\n return cls\n\n return wrap\n\n\ninput = partial(type, is_input=True)\n", "path": "strawberry/experimental/pydantic/object_type.py"}], "after_files": [{"content": "import builtins\nimport dataclasses\nfrom functools import partial\nfrom typing import Any, Dict, List, Optional, Type\n\nfrom pydantic import BaseModel\nfrom pydantic.fields import ModelField\n\nfrom strawberry.arguments import UNSET\nfrom strawberry.experimental.pydantic.conversion import (\n convert_pydantic_model_to_strawberry_class,\n)\nfrom strawberry.experimental.pydantic.fields import get_basic_type\nfrom strawberry.field import StrawberryField\nfrom strawberry.object_type import _process_type, _wrap_dataclass\nfrom strawberry.types.type_resolver import _get_fields\nfrom strawberry.types.types import FederationTypeParams, TypeDefinition\n\nfrom .exceptions import MissingFieldsListError, UnregisteredTypeException\n\n\ndef replace_pydantic_types(type_: Any):\n if hasattr(type_, \"__args__\"):\n new_type = type_.copy_with(\n tuple(replace_pydantic_types(t) for t in type_.__args__)\n )\n\n if isinstance(new_type, TypeDefinition):\n # TODO: Not sure if this is necessary. No coverage in tests\n # TODO: Unnecessary with StrawberryObject\n\n new_type = builtins.type(\n new_type.name,\n (),\n {\"_type_definition\": new_type},\n )\n\n return new_type\n\n if issubclass(type_, BaseModel):\n if hasattr(type_, \"_strawberry_type\"):\n return type_._strawberry_type\n else:\n raise UnregisteredTypeException(type_)\n\n return type_\n\n\ndef get_type_for_field(field: ModelField):\n type_ = field.outer_type_\n type_ = get_basic_type(type_)\n type_ = replace_pydantic_types(type_)\n\n if not field.required:\n type_ = Optional[type_]\n\n return type_\n\n\ndef type(\n model: Type[BaseModel],\n *,\n fields: List[str],\n name: Optional[str] = None,\n is_input: bool = False,\n is_interface: bool = False,\n description: Optional[str] = None,\n federation: Optional[FederationTypeParams] = None,\n):\n def wrap(cls):\n if not fields:\n raise MissingFieldsListError(model)\n\n model_fields = model.__fields__\n fields_set = set(fields)\n\n all_fields = [\n (\n name,\n get_type_for_field(field),\n StrawberryField(\n python_name=field.name,\n graphql_name=field.alias if field.has_alias else None,\n default=field.default if not field.required else UNSET,\n default_factory=(\n field.default_factory if field.default_factory else UNSET\n ),\n type_annotation=get_type_for_field(field),\n ),\n )\n for name, field in model_fields.items()\n if name in fields_set\n ]\n\n wrapped = _wrap_dataclass(cls)\n extra_fields = _get_fields(wrapped)\n\n all_fields.extend(\n (\n (\n field.name,\n field.type,\n field,\n )\n for field in extra_fields\n )\n )\n\n # Sort fields so that fields with missing defaults go first\n # because dataclasses require that fields with no defaults are defined\n # first\n missing_default = []\n has_default = []\n for field in all_fields:\n if field[2].default is dataclasses.MISSING:\n missing_default.append(field)\n else:\n has_default.append(field)\n\n sorted_fields = missing_default + has_default\n\n cls = dataclasses.make_dataclass(\n cls.__name__,\n sorted_fields,\n )\n\n _process_type(\n cls,\n name=name,\n is_input=is_input,\n is_interface=is_interface,\n description=description,\n federation=federation,\n )\n\n model._strawberry_type = cls # type: ignore\n\n def from_pydantic(instance: Any, extra: Dict[str, Any] = None) -> Any:\n return convert_pydantic_model_to_strawberry_class(\n cls=cls, model_instance=instance, extra=extra\n )\n\n def to_pydantic(self) -> Any:\n instance_kwargs = dataclasses.asdict(self)\n\n return model(**instance_kwargs)\n\n cls.from_pydantic = staticmethod(from_pydantic)\n cls.to_pydantic = to_pydantic\n\n return cls\n\n return wrap\n\n\ninput = partial(type, is_input=True)\n", "path": "strawberry/experimental/pydantic/object_type.py"}]}
1,870
549
gh_patches_debug_17800
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3618
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider western_family is broken During the global build at 2021-08-04-14-42-45, spider **western_family** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/western_family.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/western_family.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/western_family.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/western_family.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 4 from locations.items import GeojsonPointItem 5 6 7 class WesternFamilySpider(scrapy.Spider): 8 9 name = "western_family" 10 item_attributes = {"brand": "Western Family"} 11 allowed_domains = ["www.westernfamily.com"] 12 start_urls = ( 13 "http://www.westernfamily.com/wp-admin/admin-ajax.php?action=store_search&lat=45.5230622&lng=-122.67648159999999&max_results=2500&search_radius=50000&autoload=1", 14 ) 15 16 def parse(self, response): 17 results = response.json() 18 for data in results: 19 properties = { 20 "ref": data["id"], 21 "name": data["store"], 22 "lat": data["lat"], 23 "lon": data["lng"], 24 "addr_full": data["address"], 25 "city": data["city"], 26 "state": data["state"], 27 "postcode": data["zip"], 28 "country": data["country"], 29 "phone": data["phone"], 30 "website": data["url"], 31 } 32 33 yield GeojsonPointItem(**properties) 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/western_family.py b/locations/spiders/western_family.py deleted file mode 100644 --- a/locations/spiders/western_family.py +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -import scrapy - -from locations.items import GeojsonPointItem - - -class WesternFamilySpider(scrapy.Spider): - - name = "western_family" - item_attributes = {"brand": "Western Family"} - allowed_domains = ["www.westernfamily.com"] - start_urls = ( - "http://www.westernfamily.com/wp-admin/admin-ajax.php?action=store_search&lat=45.5230622&lng=-122.67648159999999&max_results=2500&search_radius=50000&autoload=1", - ) - - def parse(self, response): - results = response.json() - for data in results: - properties = { - "ref": data["id"], - "name": data["store"], - "lat": data["lat"], - "lon": data["lng"], - "addr_full": data["address"], - "city": data["city"], - "state": data["state"], - "postcode": data["zip"], - "country": data["country"], - "phone": data["phone"], - "website": data["url"], - } - - yield GeojsonPointItem(**properties)
{"golden_diff": "diff --git a/locations/spiders/western_family.py b/locations/spiders/western_family.py\ndeleted file mode 100644\n--- a/locations/spiders/western_family.py\n+++ /dev/null\n@@ -1,33 +0,0 @@\n-# -*- coding: utf-8 -*-\n-import scrapy\n-\n-from locations.items import GeojsonPointItem\n-\n-\n-class WesternFamilySpider(scrapy.Spider):\n-\n- name = \"western_family\"\n- item_attributes = {\"brand\": \"Western Family\"}\n- allowed_domains = [\"www.westernfamily.com\"]\n- start_urls = (\n- \"http://www.westernfamily.com/wp-admin/admin-ajax.php?action=store_search&lat=45.5230622&lng=-122.67648159999999&max_results=2500&search_radius=50000&autoload=1\",\n- )\n-\n- def parse(self, response):\n- results = response.json()\n- for data in results:\n- properties = {\n- \"ref\": data[\"id\"],\n- \"name\": data[\"store\"],\n- \"lat\": data[\"lat\"],\n- \"lon\": data[\"lng\"],\n- \"addr_full\": data[\"address\"],\n- \"city\": data[\"city\"],\n- \"state\": data[\"state\"],\n- \"postcode\": data[\"zip\"],\n- \"country\": data[\"country\"],\n- \"phone\": data[\"phone\"],\n- \"website\": data[\"url\"],\n- }\n-\n- yield GeojsonPointItem(**properties)\n", "issue": "Spider western_family is broken\nDuring the global build at 2021-08-04-14-42-45, spider **western_family** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/western_family.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/western_family.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/western_family.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\n\nfrom locations.items import GeojsonPointItem\n\n\nclass WesternFamilySpider(scrapy.Spider):\n\n name = \"western_family\"\n item_attributes = {\"brand\": \"Western Family\"}\n allowed_domains = [\"www.westernfamily.com\"]\n start_urls = (\n \"http://www.westernfamily.com/wp-admin/admin-ajax.php?action=store_search&lat=45.5230622&lng=-122.67648159999999&max_results=2500&search_radius=50000&autoload=1\",\n )\n\n def parse(self, response):\n results = response.json()\n for data in results:\n properties = {\n \"ref\": data[\"id\"],\n \"name\": data[\"store\"],\n \"lat\": data[\"lat\"],\n \"lon\": data[\"lng\"],\n \"addr_full\": data[\"address\"],\n \"city\": data[\"city\"],\n \"state\": data[\"state\"],\n \"postcode\": data[\"zip\"],\n \"country\": data[\"country\"],\n \"phone\": data[\"phone\"],\n \"website\": data[\"url\"],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/western_family.py"}], "after_files": [{"content": null, "path": "locations/spiders/western_family.py"}]}
777
357
gh_patches_debug_49884
rasdani/github-patches
git_diff
scikit-hep__awkward-895
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Typo in `identifier.py` https://github.com/scikit-hep/awkward-1.0/blob/a0ec3bcacacc81a47fe61a1d99b0bc512a8bb3cf/src/awkward/_v2/identifier.py#L30 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/awkward/_v2/identifier.py` Content: ``` 1 # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE 2 3 from __future__ import absolute_import 4 5 import awkward as ak 6 7 np = ak.nplike.NumpyMetadata.instance() 8 9 10 class Identifier(object): 11 _numrefs = 0 12 13 @staticmethod 14 def newref(): 15 out = Identifier._numrefs 16 Identifier._numrefs += 1 17 return out 18 19 def __init__(self, ref, fieldloc, data): 20 self._ref = ref 21 self._fieldloc = fieldloc 22 if not isinstance(fieldloc, dict) or not all( 23 isinstance(k, int) and isinstance(v, str) for k, v in fieldloc.items() 24 ): 25 raise TypeError("Identifier fieldloc must be a dict of int -> str") 26 self._nplike = ak.nplike.of(data) 27 28 self._data = self._nplike.asarray(data, order="C") 29 if len(self._data.shape) != 2: 30 raise TypeError("Identifer data must be 2-dimensional") 31 32 # TypeError for unsupported types? 33 self._T = self._data.dtype 34 if self._T not in (np.dtype(np.int32), np.dtype(np.int64)): 35 raise TypeError("Identifier data must be int32, int64") 36 37 @classmethod 38 # cpp takes width, length? 39 def zeros(cls, ref, fieldloc, length, width, nplike, dtype): 40 return Identifier(ref, fieldloc, nplike.zeros((length, width), dtype=dtype)) 41 42 @classmethod 43 def empty(cls, ref, fieldloc, length, width, nplike, dtype): 44 return Identifier(ref, fieldloc, nplike.empty((length, width), dtype=dtype)) 45 46 @property 47 def ref(self): 48 return self._ref 49 50 @property 51 def filedloc(self): 52 return self._fieldloc 53 54 @property 55 def data(self): 56 return self._data 57 58 @property 59 def nplike(self): 60 return self._nplike 61 62 def __len__(self): 63 return len(self._data) 64 65 def width(self): 66 return self._data.shape[1] 67 68 def to64(self): 69 return Identifier(self._ref, self._fieldloc, self._data.astype(np.int64)) 70 71 def __getitem__(self, where): 72 return self._data[where] 73 74 def __copy__(self): 75 return Identifier(self._ref, self._fieldloc, self._data.copy()) 76 77 def __repr__(self): 78 return self._repr("", "", "") 79 80 def _repr(self, indent, pre, post): 81 out = [indent, pre, "<Identifier ref=" + repr(str(self._ref)) + " fieldloc="] 82 out.append(repr(str(self._fieldloc))) 83 out.append(" length=") 84 out.append(repr(str(len(self._data)))) 85 out.append(" width=") 86 out.append(repr(str(self._data.shape[1]))) 87 out.append(" at=") 88 out.append(repr(hex(self._data.ctypes.data))) 89 out.append(">\n") 90 out.append(indent + " ") 91 out.append( 92 self._nplike.array_str(self._data, max_line_width=30).replace( 93 "\n", "\n" + indent + " " 94 ) 95 ) 96 out.append("\n") 97 out.append(indent) 98 out.append("</Identifier>") 99 out.append(post) 100 return "".join(out) 101 102 def convert_to(self, nplike): 103 return Identifier(self._ref, self._fieldloc, nplike.asarray(self._data)) 104 105 def referentially_equal(self, other): 106 return ( 107 self._ref == other._ref 108 and self._fieldloc == other._fieldloc 109 and self._data.ctypes.data == other._data.ctypes.data 110 and self._data.shape == other._data.shape 111 and self._data.strides == other._data.strides 112 and self._data.dtype == other._data.dtype 113 ) 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/awkward/_v2/identifier.py b/src/awkward/_v2/identifier.py --- a/src/awkward/_v2/identifier.py +++ b/src/awkward/_v2/identifier.py @@ -27,7 +27,7 @@ self._data = self._nplike.asarray(data, order="C") if len(self._data.shape) != 2: - raise TypeError("Identifer data must be 2-dimensional") + raise TypeError("Identifier data must be 2-dimensional") # TypeError for unsupported types? self._T = self._data.dtype
{"golden_diff": "diff --git a/src/awkward/_v2/identifier.py b/src/awkward/_v2/identifier.py\n--- a/src/awkward/_v2/identifier.py\n+++ b/src/awkward/_v2/identifier.py\n@@ -27,7 +27,7 @@\n \n self._data = self._nplike.asarray(data, order=\"C\")\n if len(self._data.shape) != 2:\n- raise TypeError(\"Identifer data must be 2-dimensional\")\n+ raise TypeError(\"Identifier data must be 2-dimensional\")\n \n # TypeError for unsupported types?\n self._T = self._data.dtype\n", "issue": "Typo in `identifier.py`\nhttps://github.com/scikit-hep/awkward-1.0/blob/a0ec3bcacacc81a47fe61a1d99b0bc512a8bb3cf/src/awkward/_v2/identifier.py#L30\n", "before_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport awkward as ak\n\nnp = ak.nplike.NumpyMetadata.instance()\n\n\nclass Identifier(object):\n _numrefs = 0\n\n @staticmethod\n def newref():\n out = Identifier._numrefs\n Identifier._numrefs += 1\n return out\n\n def __init__(self, ref, fieldloc, data):\n self._ref = ref\n self._fieldloc = fieldloc\n if not isinstance(fieldloc, dict) or not all(\n isinstance(k, int) and isinstance(v, str) for k, v in fieldloc.items()\n ):\n raise TypeError(\"Identifier fieldloc must be a dict of int -> str\")\n self._nplike = ak.nplike.of(data)\n\n self._data = self._nplike.asarray(data, order=\"C\")\n if len(self._data.shape) != 2:\n raise TypeError(\"Identifer data must be 2-dimensional\")\n\n # TypeError for unsupported types?\n self._T = self._data.dtype\n if self._T not in (np.dtype(np.int32), np.dtype(np.int64)):\n raise TypeError(\"Identifier data must be int32, int64\")\n\n @classmethod\n # cpp takes width, length?\n def zeros(cls, ref, fieldloc, length, width, nplike, dtype):\n return Identifier(ref, fieldloc, nplike.zeros((length, width), dtype=dtype))\n\n @classmethod\n def empty(cls, ref, fieldloc, length, width, nplike, dtype):\n return Identifier(ref, fieldloc, nplike.empty((length, width), dtype=dtype))\n\n @property\n def ref(self):\n return self._ref\n\n @property\n def filedloc(self):\n return self._fieldloc\n\n @property\n def data(self):\n return self._data\n\n @property\n def nplike(self):\n return self._nplike\n\n def __len__(self):\n return len(self._data)\n\n def width(self):\n return self._data.shape[1]\n\n def to64(self):\n return Identifier(self._ref, self._fieldloc, self._data.astype(np.int64))\n\n def __getitem__(self, where):\n return self._data[where]\n\n def __copy__(self):\n return Identifier(self._ref, self._fieldloc, self._data.copy())\n\n def __repr__(self):\n return self._repr(\"\", \"\", \"\")\n\n def _repr(self, indent, pre, post):\n out = [indent, pre, \"<Identifier ref=\" + repr(str(self._ref)) + \" fieldloc=\"]\n out.append(repr(str(self._fieldloc)))\n out.append(\" length=\")\n out.append(repr(str(len(self._data))))\n out.append(\" width=\")\n out.append(repr(str(self._data.shape[1])))\n out.append(\" at=\")\n out.append(repr(hex(self._data.ctypes.data)))\n out.append(\">\\n\")\n out.append(indent + \" \")\n out.append(\n self._nplike.array_str(self._data, max_line_width=30).replace(\n \"\\n\", \"\\n\" + indent + \" \"\n )\n )\n out.append(\"\\n\")\n out.append(indent)\n out.append(\"</Identifier>\")\n out.append(post)\n return \"\".join(out)\n\n def convert_to(self, nplike):\n return Identifier(self._ref, self._fieldloc, nplike.asarray(self._data))\n\n def referentially_equal(self, other):\n return (\n self._ref == other._ref\n and self._fieldloc == other._fieldloc\n and self._data.ctypes.data == other._data.ctypes.data\n and self._data.shape == other._data.shape\n and self._data.strides == other._data.strides\n and self._data.dtype == other._data.dtype\n )\n", "path": "src/awkward/_v2/identifier.py"}], "after_files": [{"content": "# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE\n\nfrom __future__ import absolute_import\n\nimport awkward as ak\n\nnp = ak.nplike.NumpyMetadata.instance()\n\n\nclass Identifier(object):\n _numrefs = 0\n\n @staticmethod\n def newref():\n out = Identifier._numrefs\n Identifier._numrefs += 1\n return out\n\n def __init__(self, ref, fieldloc, data):\n self._ref = ref\n self._fieldloc = fieldloc\n if not isinstance(fieldloc, dict) or not all(\n isinstance(k, int) and isinstance(v, str) for k, v in fieldloc.items()\n ):\n raise TypeError(\"Identifier fieldloc must be a dict of int -> str\")\n self._nplike = ak.nplike.of(data)\n\n self._data = self._nplike.asarray(data, order=\"C\")\n if len(self._data.shape) != 2:\n raise TypeError(\"Identifier data must be 2-dimensional\")\n\n # TypeError for unsupported types?\n self._T = self._data.dtype\n if self._T not in (np.dtype(np.int32), np.dtype(np.int64)):\n raise TypeError(\"Identifier data must be int32, int64\")\n\n @classmethod\n # cpp takes width, length?\n def zeros(cls, ref, fieldloc, length, width, nplike, dtype):\n return Identifier(ref, fieldloc, nplike.zeros((length, width), dtype=dtype))\n\n @classmethod\n def empty(cls, ref, fieldloc, length, width, nplike, dtype):\n return Identifier(ref, fieldloc, nplike.empty((length, width), dtype=dtype))\n\n @property\n def ref(self):\n return self._ref\n\n @property\n def filedloc(self):\n return self._fieldloc\n\n @property\n def data(self):\n return self._data\n\n @property\n def nplike(self):\n return self._nplike\n\n def __len__(self):\n return len(self._data)\n\n def width(self):\n return self._data.shape[1]\n\n def to64(self):\n return Identifier(self._ref, self._fieldloc, self._data.astype(np.int64))\n\n def __getitem__(self, where):\n return self._data[where]\n\n def __copy__(self):\n return Identifier(self._ref, self._fieldloc, self._data.copy())\n\n def __repr__(self):\n return self._repr(\"\", \"\", \"\")\n\n def _repr(self, indent, pre, post):\n out = [indent, pre, \"<Identifier ref=\" + repr(str(self._ref)) + \" fieldloc=\"]\n out.append(repr(str(self._fieldloc)))\n out.append(\" length=\")\n out.append(repr(str(len(self._data))))\n out.append(\" width=\")\n out.append(repr(str(self._data.shape[1])))\n out.append(\" at=\")\n out.append(repr(hex(self._data.ctypes.data)))\n out.append(\">\\n\")\n out.append(indent + \" \")\n out.append(\n self._nplike.array_str(self._data, max_line_width=30).replace(\n \"\\n\", \"\\n\" + indent + \" \"\n )\n )\n out.append(\"\\n\")\n out.append(indent)\n out.append(\"</Identifier>\")\n out.append(post)\n return \"\".join(out)\n\n def convert_to(self, nplike):\n return Identifier(self._ref, self._fieldloc, nplike.asarray(self._data))\n\n def referentially_equal(self, other):\n return (\n self._ref == other._ref\n and self._fieldloc == other._fieldloc\n and self._data.ctypes.data == other._data.ctypes.data\n and self._data.shape == other._data.shape\n and self._data.strides == other._data.strides\n and self._data.dtype == other._data.dtype\n )\n", "path": "src/awkward/_v2/identifier.py"}]}
1,463
139
gh_patches_debug_37345
rasdani/github-patches
git_diff
kymatio__kymatio-1001
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- JTFS needs a JAX frontend v0.4.0 Already addressed by #1001 (@cyrusvahidi) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kymatio/scattering1d/frontend/jax_frontend.py` Content: ``` 1 from ...frontend.jax_frontend import ScatteringJax 2 from .numpy_frontend import ScatteringNumPy1D 3 from .base_frontend import ScatteringBase1D 4 5 class ScatteringJax1D(ScatteringJax, ScatteringNumPy1D): 6 # This class inherits the attribute "frontend" from ScatteringJax 7 # It overrides the __init__ function present in ScatteringNumPy1D 8 # in order to add the default argument for backend and call the 9 # ScatteringJax.__init__ 10 # Through ScatteringBase1D._instantiate_backend the jax backend will 11 # be loaded 12 13 14 def __init__(self, J, shape, Q=1, T=None, stride=None, max_order=2, 15 oversampling=0, out_type='array', backend='jax'): 16 17 ScatteringJax.__init__(self) 18 ScatteringBase1D.__init__(self, J, shape, Q, T, stride, max_order, 19 oversampling, out_type, backend) 20 ScatteringBase1D._instantiate_backend(self, 'kymatio.scattering1d.backend.') 21 ScatteringBase1D.build(self) 22 ScatteringBase1D.create_filters(self) 23 24 ScatteringJax1D._document() 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kymatio/scattering1d/frontend/jax_frontend.py b/kymatio/scattering1d/frontend/jax_frontend.py --- a/kymatio/scattering1d/frontend/jax_frontend.py +++ b/kymatio/scattering1d/frontend/jax_frontend.py @@ -1,6 +1,7 @@ from ...frontend.jax_frontend import ScatteringJax -from .numpy_frontend import ScatteringNumPy1D -from .base_frontend import ScatteringBase1D +from .numpy_frontend import ScatteringNumPy1D, TimeFrequencyScatteringNumPy +from .base_frontend import ScatteringBase1D, TimeFrequencyScatteringBase + class ScatteringJax1D(ScatteringJax, ScatteringNumPy1D): # This class inherits the attribute "frontend" from ScatteringJax @@ -10,15 +11,77 @@ # Through ScatteringBase1D._instantiate_backend the jax backend will # be loaded - - def __init__(self, J, shape, Q=1, T=None, stride=None, max_order=2, - oversampling=0, out_type='array', backend='jax'): + def __init__( + self, + J, + shape, + Q=1, + T=None, + stride=None, + max_order=2, + oversampling=0, + out_type="array", + backend="jax", + ): ScatteringJax.__init__(self) - ScatteringBase1D.__init__(self, J, shape, Q, T, stride, max_order, - oversampling, out_type, backend) - ScatteringBase1D._instantiate_backend(self, 'kymatio.scattering1d.backend.') + ScatteringBase1D.__init__( + self, J, shape, Q, T, stride, max_order, oversampling, out_type, backend + ) + ScatteringBase1D._instantiate_backend(self, "kymatio.scattering1d.backend.") ScatteringBase1D.build(self) ScatteringBase1D.create_filters(self) + ScatteringJax1D._document() + + +class TimeFrequencyScatteringJax(ScatteringJax, TimeFrequencyScatteringNumPy): + # This class inherits the attribute "frontend" from ScatteringJax + # It overrides the __init__ function present in TimeFrequencyScatteringNumPy + # in order to add the default argument for backend and call the + # ScatteringJax.__init__ + # Through TimeFrequencyScatteringBase._instantiate_backend the jax backend will + # be loaded + + def __init__( + self, + *, + J, + J_fr, + shape, + Q, + T=None, + stride=None, + Q_fr=1, + F=None, + stride_fr=None, + out_type="array", + format="joint", + backend="jax" + ): + + ScatteringJax.__init__(self) + TimeFrequencyScatteringBase.__init__( + self, + J=J, + J_fr=J_fr, + shape=shape, + Q=Q, + T=T, + stride=stride, + Q_fr=Q_fr, + F=F, + stride_fr=stride_fr, + out_type=out_type, + format=format, + backend=backend, + ) + ScatteringBase1D._instantiate_backend(self, "kymatio.scattering1d.backend.") + TimeFrequencyScatteringBase.build(self) + TimeFrequencyScatteringBase.create_filters(self) + + +TimeFrequencyScatteringJax._document() + +__all__ = ["ScatteringJax1D", "TimeFrequencyScatteringJax"]
{"golden_diff": "diff --git a/kymatio/scattering1d/frontend/jax_frontend.py b/kymatio/scattering1d/frontend/jax_frontend.py\n--- a/kymatio/scattering1d/frontend/jax_frontend.py\n+++ b/kymatio/scattering1d/frontend/jax_frontend.py\n@@ -1,6 +1,7 @@\n from ...frontend.jax_frontend import ScatteringJax\n-from .numpy_frontend import ScatteringNumPy1D\n-from .base_frontend import ScatteringBase1D\n+from .numpy_frontend import ScatteringNumPy1D, TimeFrequencyScatteringNumPy\n+from .base_frontend import ScatteringBase1D, TimeFrequencyScatteringBase\n+\n \n class ScatteringJax1D(ScatteringJax, ScatteringNumPy1D):\n # This class inherits the attribute \"frontend\" from ScatteringJax\n@@ -10,15 +11,77 @@\n # Through ScatteringBase1D._instantiate_backend the jax backend will\n # be loaded\n \n-\n- def __init__(self, J, shape, Q=1, T=None, stride=None, max_order=2,\n- oversampling=0, out_type='array', backend='jax'):\n+ def __init__(\n+ self,\n+ J,\n+ shape,\n+ Q=1,\n+ T=None,\n+ stride=None,\n+ max_order=2,\n+ oversampling=0,\n+ out_type=\"array\",\n+ backend=\"jax\",\n+ ):\n \n ScatteringJax.__init__(self)\n- ScatteringBase1D.__init__(self, J, shape, Q, T, stride, max_order,\n- oversampling, out_type, backend)\n- ScatteringBase1D._instantiate_backend(self, 'kymatio.scattering1d.backend.')\n+ ScatteringBase1D.__init__(\n+ self, J, shape, Q, T, stride, max_order, oversampling, out_type, backend\n+ )\n+ ScatteringBase1D._instantiate_backend(self, \"kymatio.scattering1d.backend.\")\n ScatteringBase1D.build(self)\n ScatteringBase1D.create_filters(self)\n \n+\n ScatteringJax1D._document()\n+\n+\n+class TimeFrequencyScatteringJax(ScatteringJax, TimeFrequencyScatteringNumPy):\n+ # This class inherits the attribute \"frontend\" from ScatteringJax\n+ # It overrides the __init__ function present in TimeFrequencyScatteringNumPy\n+ # in order to add the default argument for backend and call the\n+ # ScatteringJax.__init__\n+ # Through TimeFrequencyScatteringBase._instantiate_backend the jax backend will\n+ # be loaded\n+\n+ def __init__(\n+ self,\n+ *,\n+ J,\n+ J_fr,\n+ shape,\n+ Q,\n+ T=None,\n+ stride=None,\n+ Q_fr=1,\n+ F=None,\n+ stride_fr=None,\n+ out_type=\"array\",\n+ format=\"joint\",\n+ backend=\"jax\"\n+ ):\n+\n+ ScatteringJax.__init__(self)\n+ TimeFrequencyScatteringBase.__init__(\n+ self,\n+ J=J,\n+ J_fr=J_fr,\n+ shape=shape,\n+ Q=Q,\n+ T=T,\n+ stride=stride,\n+ Q_fr=Q_fr,\n+ F=F,\n+ stride_fr=stride_fr,\n+ out_type=out_type,\n+ format=format,\n+ backend=backend,\n+ )\n+ ScatteringBase1D._instantiate_backend(self, \"kymatio.scattering1d.backend.\")\n+ TimeFrequencyScatteringBase.build(self)\n+ TimeFrequencyScatteringBase.create_filters(self)\n+\n+\n+TimeFrequencyScatteringJax._document()\n+\n+__all__ = [\"ScatteringJax1D\", \"TimeFrequencyScatteringJax\"]\n", "issue": "JTFS needs a JAX frontend\nv0.4.0\r\n\r\nAlready addressed by #1001 (@cyrusvahidi)\n", "before_files": [{"content": "from ...frontend.jax_frontend import ScatteringJax\nfrom .numpy_frontend import ScatteringNumPy1D\nfrom .base_frontend import ScatteringBase1D\n\nclass ScatteringJax1D(ScatteringJax, ScatteringNumPy1D):\n # This class inherits the attribute \"frontend\" from ScatteringJax\n # It overrides the __init__ function present in ScatteringNumPy1D\n # in order to add the default argument for backend and call the\n # ScatteringJax.__init__\n # Through ScatteringBase1D._instantiate_backend the jax backend will\n # be loaded\n\n\n def __init__(self, J, shape, Q=1, T=None, stride=None, max_order=2,\n oversampling=0, out_type='array', backend='jax'):\n\n ScatteringJax.__init__(self)\n ScatteringBase1D.__init__(self, J, shape, Q, T, stride, max_order,\n oversampling, out_type, backend)\n ScatteringBase1D._instantiate_backend(self, 'kymatio.scattering1d.backend.')\n ScatteringBase1D.build(self)\n ScatteringBase1D.create_filters(self)\n\nScatteringJax1D._document()\n", "path": "kymatio/scattering1d/frontend/jax_frontend.py"}], "after_files": [{"content": "from ...frontend.jax_frontend import ScatteringJax\nfrom .numpy_frontend import ScatteringNumPy1D, TimeFrequencyScatteringNumPy\nfrom .base_frontend import ScatteringBase1D, TimeFrequencyScatteringBase\n\n\nclass ScatteringJax1D(ScatteringJax, ScatteringNumPy1D):\n # This class inherits the attribute \"frontend\" from ScatteringJax\n # It overrides the __init__ function present in ScatteringNumPy1D\n # in order to add the default argument for backend and call the\n # ScatteringJax.__init__\n # Through ScatteringBase1D._instantiate_backend the jax backend will\n # be loaded\n\n def __init__(\n self,\n J,\n shape,\n Q=1,\n T=None,\n stride=None,\n max_order=2,\n oversampling=0,\n out_type=\"array\",\n backend=\"jax\",\n ):\n\n ScatteringJax.__init__(self)\n ScatteringBase1D.__init__(\n self, J, shape, Q, T, stride, max_order, oversampling, out_type, backend\n )\n ScatteringBase1D._instantiate_backend(self, \"kymatio.scattering1d.backend.\")\n ScatteringBase1D.build(self)\n ScatteringBase1D.create_filters(self)\n\n\nScatteringJax1D._document()\n\n\nclass TimeFrequencyScatteringJax(ScatteringJax, TimeFrequencyScatteringNumPy):\n # This class inherits the attribute \"frontend\" from ScatteringJax\n # It overrides the __init__ function present in TimeFrequencyScatteringNumPy\n # in order to add the default argument for backend and call the\n # ScatteringJax.__init__\n # Through TimeFrequencyScatteringBase._instantiate_backend the jax backend will\n # be loaded\n\n def __init__(\n self,\n *,\n J,\n J_fr,\n shape,\n Q,\n T=None,\n stride=None,\n Q_fr=1,\n F=None,\n stride_fr=None,\n out_type=\"array\",\n format=\"joint\",\n backend=\"jax\"\n ):\n\n ScatteringJax.__init__(self)\n TimeFrequencyScatteringBase.__init__(\n self,\n J=J,\n J_fr=J_fr,\n shape=shape,\n Q=Q,\n T=T,\n stride=stride,\n Q_fr=Q_fr,\n F=F,\n stride_fr=stride_fr,\n out_type=out_type,\n format=format,\n backend=backend,\n )\n ScatteringBase1D._instantiate_backend(self, \"kymatio.scattering1d.backend.\")\n TimeFrequencyScatteringBase.build(self)\n TimeFrequencyScatteringBase.create_filters(self)\n\n\nTimeFrequencyScatteringJax._document()\n\n__all__ = [\"ScatteringJax1D\", \"TimeFrequencyScatteringJax\"]\n", "path": "kymatio/scattering1d/frontend/jax_frontend.py"}]}
613
860
gh_patches_debug_30558
rasdani/github-patches
git_diff
ManimCommunity__manim-1847
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow `Cross` to be initialized without being based on an existing Mobject. ## Enhancement proposal <!-- Add a clear and concise description of your enhancement proposal. In particular, if your enhancement introduces changes to the API, illustrate them with (fictional) code examples. --> Currently Cross needs a mobject passed to it, I think it should be reworked to not necessarily require this. Also maybe easily specifying the length of the cross in the `__init__` too? Current `Cross` code: ```py class Cross(VGroup): def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs): VGroup.__init__( self, Line(UP + LEFT, DOWN + RIGHT), Line(UP + RIGHT, DOWN + LEFT), ) self.replace(mobject, stretch=True) self.set_stroke(color=stroke_color, width=stroke_width) ``` ## Additional comments <!-- Add further context that you think might be relevant. --> replacing `VGroup.__init__` with `super().__init__` too --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `manim/mobject/shape_matchers.py` Content: ``` 1 """Mobjects used to mark and annotate other mobjects.""" 2 3 __all__ = ["SurroundingRectangle", "BackgroundRectangle", "Cross", "Underline"] 4 5 6 from ..constants import * 7 from ..mobject.geometry import Line, Rectangle 8 from ..mobject.types.vectorized_mobject import VGroup, VMobject 9 from ..utils.color import BLACK, RED, YELLOW, Color 10 11 12 class SurroundingRectangle(Rectangle): 13 r"""A rectangle surrounding a :class:`~.Mobject` 14 15 Examples 16 -------- 17 18 .. manim:: SurroundingRectExample 19 :save_last_frame: 20 21 class SurroundingRectExample(Scene): 22 def construct(self): 23 title = Title("A Quote from Newton") 24 quote = Text( 25 "If I have seen further than others, \n" 26 "it is by standing upon the shoulders of giants.", 27 color=BLUE 28 ).scale(0.75) 29 box = SurroundingRectangle(quote, color=YELLOW, buff=MED_LARGE_BUFF) 30 self.add(title, box, quote) 31 32 """ 33 34 def __init__(self, mobject, color=YELLOW, buff=SMALL_BUFF, **kwargs): 35 self.color = color 36 self.buff = buff 37 Rectangle.__init__( 38 self, 39 color=color, 40 width=mobject.width + 2 * self.buff, 41 height=mobject.height + 2 * self.buff, 42 **kwargs 43 ) 44 self.move_to(mobject) 45 46 47 class BackgroundRectangle(SurroundingRectangle): 48 """A background rectangle 49 50 Examples 51 -------- 52 53 .. manim:: ExampleBackgroundRectangle 54 :save_last_frame: 55 56 class ExampleBackgroundRectangle(Scene): 57 def construct(self): 58 circle = Circle().shift(LEFT) 59 circle.set_stroke(color=GREEN, width=20) 60 triangle = Triangle().shift(2 * RIGHT) 61 triangle.set_fill(PINK, opacity=0.5) 62 backgroundRectangle1 = BackgroundRectangle(circle, color=WHITE, fill_opacity=0.15) 63 backgroundRectangle2 = BackgroundRectangle(triangle, color=WHITE, fill_opacity=0.15) 64 self.add(backgroundRectangle1) 65 self.add(backgroundRectangle2) 66 self.add(circle) 67 self.add(triangle) 68 self.play(Rotate(backgroundRectangle1, PI / 4)) 69 self.play(Rotate(backgroundRectangle2, PI / 2)) 70 """ 71 72 def __init__( 73 self, 74 mobject, 75 color=BLACK, 76 stroke_width=0, 77 stroke_opacity=0, 78 fill_opacity=0.75, 79 buff=0, 80 **kwargs 81 ): 82 SurroundingRectangle.__init__( 83 self, 84 mobject, 85 color=color, 86 stroke_width=stroke_width, 87 stroke_opacity=stroke_opacity, 88 fill_opacity=fill_opacity, 89 buff=buff, 90 **kwargs 91 ) 92 self.original_fill_opacity = self.fill_opacity 93 94 def pointwise_become_partial(self, mobject, a, b): 95 self.set_fill(opacity=b * self.original_fill_opacity) 96 return self 97 98 def set_style( 99 self, 100 stroke_color=None, 101 stroke_width=None, 102 fill_color=None, 103 fill_opacity=None, 104 family=True, 105 ): 106 # Unchangeable style, except for fill_opacity 107 super().set_style( 108 stroke_color=BLACK, 109 stroke_width=0, 110 fill_color=BLACK, 111 fill_opacity=fill_opacity, 112 ) 113 return self 114 115 def get_fill_color(self): 116 return Color(self.color) 117 118 119 class Cross(VGroup): 120 def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs): 121 VGroup.__init__( 122 self, 123 Line(UP + LEFT, DOWN + RIGHT), 124 Line(UP + RIGHT, DOWN + LEFT), 125 ) 126 self.replace(mobject, stretch=True) 127 self.set_stroke(color=stroke_color, width=stroke_width) 128 129 130 class Underline(Line): 131 """Creates an underline. 132 133 Parameters 134 ---------- 135 Line 136 The underline. 137 138 Examples 139 -------- 140 .. manim:: UnderLine 141 :save_last_frame: 142 143 class UnderLine(Scene): 144 def construct(self): 145 man = Tex("Manim") # Full Word 146 ul = Underline(man) # Underlining the word 147 self.add(man, ul) 148 """ 149 150 def __init__(self, mobject, buff=SMALL_BUFF, **kwargs): 151 super().__init__(LEFT, RIGHT, buff=buff, **kwargs) 152 self.match_width(mobject) 153 self.next_to(mobject, DOWN, buff=self.buff) 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/manim/mobject/shape_matchers.py b/manim/mobject/shape_matchers.py --- a/manim/mobject/shape_matchers.py +++ b/manim/mobject/shape_matchers.py @@ -2,10 +2,12 @@ __all__ = ["SurroundingRectangle", "BackgroundRectangle", "Cross", "Underline"] +from typing import Optional from ..constants import * from ..mobject.geometry import Line, Rectangle -from ..mobject.types.vectorized_mobject import VGroup, VMobject +from ..mobject.mobject import Mobject +from ..mobject.types.vectorized_mobject import VGroup from ..utils.color import BLACK, RED, YELLOW, Color @@ -117,13 +119,44 @@ class Cross(VGroup): - def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs): - VGroup.__init__( - self, - Line(UP + LEFT, DOWN + RIGHT), - Line(UP + RIGHT, DOWN + LEFT), + """Creates a cross. + + Parameters + ---------- + mobject + The mobject linked to this instance. It fits the mobject when specified. Defaults to None. + stroke_color + Specifies the color of the cross lines. Defaults to RED. + stroke_width + Specifies the width of the cross lines. Defaults to 6. + scale_factor + Scales the cross to the provided units. Defaults to 1. + + Examples + -------- + .. manim:: ExampleCross + :save_last_frame: + + class ExampleCross(Scene): + def construct(self): + cross = Cross() + self.add(cross) + """ + + def __init__( + self, + mobject: Optional["Mobject"] = None, + stroke_color: Color = RED, + stroke_width: float = 6, + scale_factor: float = 1, + **kwargs + ): + super().__init__( + Line(UP + LEFT, DOWN + RIGHT), Line(UP + RIGHT, DOWN + LEFT), **kwargs ) - self.replace(mobject, stretch=True) + if mobject is not None: + self.replace(mobject, stretch=True) + self.scale(scale_factor) self.set_stroke(color=stroke_color, width=stroke_width)
{"golden_diff": "diff --git a/manim/mobject/shape_matchers.py b/manim/mobject/shape_matchers.py\n--- a/manim/mobject/shape_matchers.py\n+++ b/manim/mobject/shape_matchers.py\n@@ -2,10 +2,12 @@\n \n __all__ = [\"SurroundingRectangle\", \"BackgroundRectangle\", \"Cross\", \"Underline\"]\n \n+from typing import Optional\n \n from ..constants import *\n from ..mobject.geometry import Line, Rectangle\n-from ..mobject.types.vectorized_mobject import VGroup, VMobject\n+from ..mobject.mobject import Mobject\n+from ..mobject.types.vectorized_mobject import VGroup\n from ..utils.color import BLACK, RED, YELLOW, Color\n \n \n@@ -117,13 +119,44 @@\n \n \n class Cross(VGroup):\n- def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):\n- VGroup.__init__(\n- self,\n- Line(UP + LEFT, DOWN + RIGHT),\n- Line(UP + RIGHT, DOWN + LEFT),\n+ \"\"\"Creates a cross.\n+\n+ Parameters\n+ ----------\n+ mobject\n+ The mobject linked to this instance. It fits the mobject when specified. Defaults to None.\n+ stroke_color\n+ Specifies the color of the cross lines. Defaults to RED.\n+ stroke_width\n+ Specifies the width of the cross lines. Defaults to 6.\n+ scale_factor\n+ Scales the cross to the provided units. Defaults to 1.\n+\n+ Examples\n+ --------\n+ .. manim:: ExampleCross\n+ :save_last_frame:\n+\n+ class ExampleCross(Scene):\n+ def construct(self):\n+ cross = Cross()\n+ self.add(cross)\n+ \"\"\"\n+\n+ def __init__(\n+ self,\n+ mobject: Optional[\"Mobject\"] = None,\n+ stroke_color: Color = RED,\n+ stroke_width: float = 6,\n+ scale_factor: float = 1,\n+ **kwargs\n+ ):\n+ super().__init__(\n+ Line(UP + LEFT, DOWN + RIGHT), Line(UP + RIGHT, DOWN + LEFT), **kwargs\n )\n- self.replace(mobject, stretch=True)\n+ if mobject is not None:\n+ self.replace(mobject, stretch=True)\n+ self.scale(scale_factor)\n self.set_stroke(color=stroke_color, width=stroke_width)\n", "issue": "Allow `Cross` to be initialized without being based on an existing Mobject.\n## Enhancement proposal\r\n<!-- Add a clear and concise description of your enhancement proposal. In particular,\r\n if your enhancement introduces changes to the API, illustrate them with\r\n (fictional) code examples. -->\r\n\r\nCurrently Cross needs a mobject passed to it, I think it should be reworked to not necessarily require this. Also maybe easily specifying the length of the cross in the `__init__` too?\r\n\r\n\r\nCurrent `Cross` code:\r\n```py\r\nclass Cross(VGroup):\r\n def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):\r\n VGroup.__init__(\r\n self,\r\n Line(UP + LEFT, DOWN + RIGHT),\r\n Line(UP + RIGHT, DOWN + LEFT),\r\n )\r\n self.replace(mobject, stretch=True)\r\n self.set_stroke(color=stroke_color, width=stroke_width)\r\n```\r\n\r\n## Additional comments\r\n<!-- Add further context that you think might be relevant. -->\r\nreplacing `VGroup.__init__` with `super().__init__` too \r\n\n", "before_files": [{"content": "\"\"\"Mobjects used to mark and annotate other mobjects.\"\"\"\n\n__all__ = [\"SurroundingRectangle\", \"BackgroundRectangle\", \"Cross\", \"Underline\"]\n\n\nfrom ..constants import *\nfrom ..mobject.geometry import Line, Rectangle\nfrom ..mobject.types.vectorized_mobject import VGroup, VMobject\nfrom ..utils.color import BLACK, RED, YELLOW, Color\n\n\nclass SurroundingRectangle(Rectangle):\n r\"\"\"A rectangle surrounding a :class:`~.Mobject`\n\n Examples\n --------\n\n .. manim:: SurroundingRectExample\n :save_last_frame:\n\n class SurroundingRectExample(Scene):\n def construct(self):\n title = Title(\"A Quote from Newton\")\n quote = Text(\n \"If I have seen further than others, \\n\"\n \"it is by standing upon the shoulders of giants.\",\n color=BLUE\n ).scale(0.75)\n box = SurroundingRectangle(quote, color=YELLOW, buff=MED_LARGE_BUFF)\n self.add(title, box, quote)\n\n \"\"\"\n\n def __init__(self, mobject, color=YELLOW, buff=SMALL_BUFF, **kwargs):\n self.color = color\n self.buff = buff\n Rectangle.__init__(\n self,\n color=color,\n width=mobject.width + 2 * self.buff,\n height=mobject.height + 2 * self.buff,\n **kwargs\n )\n self.move_to(mobject)\n\n\nclass BackgroundRectangle(SurroundingRectangle):\n \"\"\"A background rectangle\n\n Examples\n --------\n\n .. manim:: ExampleBackgroundRectangle\n :save_last_frame:\n\n class ExampleBackgroundRectangle(Scene):\n def construct(self):\n circle = Circle().shift(LEFT)\n circle.set_stroke(color=GREEN, width=20)\n triangle = Triangle().shift(2 * RIGHT)\n triangle.set_fill(PINK, opacity=0.5)\n backgroundRectangle1 = BackgroundRectangle(circle, color=WHITE, fill_opacity=0.15)\n backgroundRectangle2 = BackgroundRectangle(triangle, color=WHITE, fill_opacity=0.15)\n self.add(backgroundRectangle1)\n self.add(backgroundRectangle2)\n self.add(circle)\n self.add(triangle)\n self.play(Rotate(backgroundRectangle1, PI / 4))\n self.play(Rotate(backgroundRectangle2, PI / 2))\n \"\"\"\n\n def __init__(\n self,\n mobject,\n color=BLACK,\n stroke_width=0,\n stroke_opacity=0,\n fill_opacity=0.75,\n buff=0,\n **kwargs\n ):\n SurroundingRectangle.__init__(\n self,\n mobject,\n color=color,\n stroke_width=stroke_width,\n stroke_opacity=stroke_opacity,\n fill_opacity=fill_opacity,\n buff=buff,\n **kwargs\n )\n self.original_fill_opacity = self.fill_opacity\n\n def pointwise_become_partial(self, mobject, a, b):\n self.set_fill(opacity=b * self.original_fill_opacity)\n return self\n\n def set_style(\n self,\n stroke_color=None,\n stroke_width=None,\n fill_color=None,\n fill_opacity=None,\n family=True,\n ):\n # Unchangeable style, except for fill_opacity\n super().set_style(\n stroke_color=BLACK,\n stroke_width=0,\n fill_color=BLACK,\n fill_opacity=fill_opacity,\n )\n return self\n\n def get_fill_color(self):\n return Color(self.color)\n\n\nclass Cross(VGroup):\n def __init__(self, mobject, stroke_color=RED, stroke_width=6, **kwargs):\n VGroup.__init__(\n self,\n Line(UP + LEFT, DOWN + RIGHT),\n Line(UP + RIGHT, DOWN + LEFT),\n )\n self.replace(mobject, stretch=True)\n self.set_stroke(color=stroke_color, width=stroke_width)\n\n\nclass Underline(Line):\n \"\"\"Creates an underline.\n\n Parameters\n ----------\n Line\n The underline.\n\n Examples\n --------\n .. manim:: UnderLine\n :save_last_frame:\n\n class UnderLine(Scene):\n def construct(self):\n man = Tex(\"Manim\") # Full Word\n ul = Underline(man) # Underlining the word\n self.add(man, ul)\n \"\"\"\n\n def __init__(self, mobject, buff=SMALL_BUFF, **kwargs):\n super().__init__(LEFT, RIGHT, buff=buff, **kwargs)\n self.match_width(mobject)\n self.next_to(mobject, DOWN, buff=self.buff)\n", "path": "manim/mobject/shape_matchers.py"}], "after_files": [{"content": "\"\"\"Mobjects used to mark and annotate other mobjects.\"\"\"\n\n__all__ = [\"SurroundingRectangle\", \"BackgroundRectangle\", \"Cross\", \"Underline\"]\n\nfrom typing import Optional\n\nfrom ..constants import *\nfrom ..mobject.geometry import Line, Rectangle\nfrom ..mobject.mobject import Mobject\nfrom ..mobject.types.vectorized_mobject import VGroup\nfrom ..utils.color import BLACK, RED, YELLOW, Color\n\n\nclass SurroundingRectangle(Rectangle):\n r\"\"\"A rectangle surrounding a :class:`~.Mobject`\n\n Examples\n --------\n\n .. manim:: SurroundingRectExample\n :save_last_frame:\n\n class SurroundingRectExample(Scene):\n def construct(self):\n title = Title(\"A Quote from Newton\")\n quote = Text(\n \"If I have seen further than others, \\n\"\n \"it is by standing upon the shoulders of giants.\",\n color=BLUE\n ).scale(0.75)\n box = SurroundingRectangle(quote, color=YELLOW, buff=MED_LARGE_BUFF)\n self.add(title, box, quote)\n\n \"\"\"\n\n def __init__(self, mobject, color=YELLOW, buff=SMALL_BUFF, **kwargs):\n self.color = color\n self.buff = buff\n Rectangle.__init__(\n self,\n color=color,\n width=mobject.width + 2 * self.buff,\n height=mobject.height + 2 * self.buff,\n **kwargs\n )\n self.move_to(mobject)\n\n\nclass BackgroundRectangle(SurroundingRectangle):\n \"\"\"A background rectangle\n\n Examples\n --------\n\n .. manim:: ExampleBackgroundRectangle\n :save_last_frame:\n\n class ExampleBackgroundRectangle(Scene):\n def construct(self):\n circle = Circle().shift(LEFT)\n circle.set_stroke(color=GREEN, width=20)\n triangle = Triangle().shift(2 * RIGHT)\n triangle.set_fill(PINK, opacity=0.5)\n backgroundRectangle1 = BackgroundRectangle(circle, color=WHITE, fill_opacity=0.15)\n backgroundRectangle2 = BackgroundRectangle(triangle, color=WHITE, fill_opacity=0.15)\n self.add(backgroundRectangle1)\n self.add(backgroundRectangle2)\n self.add(circle)\n self.add(triangle)\n self.play(Rotate(backgroundRectangle1, PI / 4))\n self.play(Rotate(backgroundRectangle2, PI / 2))\n \"\"\"\n\n def __init__(\n self,\n mobject,\n color=BLACK,\n stroke_width=0,\n stroke_opacity=0,\n fill_opacity=0.75,\n buff=0,\n **kwargs\n ):\n SurroundingRectangle.__init__(\n self,\n mobject,\n color=color,\n stroke_width=stroke_width,\n stroke_opacity=stroke_opacity,\n fill_opacity=fill_opacity,\n buff=buff,\n **kwargs\n )\n self.original_fill_opacity = self.fill_opacity\n\n def pointwise_become_partial(self, mobject, a, b):\n self.set_fill(opacity=b * self.original_fill_opacity)\n return self\n\n def set_style(\n self,\n stroke_color=None,\n stroke_width=None,\n fill_color=None,\n fill_opacity=None,\n family=True,\n ):\n # Unchangeable style, except for fill_opacity\n super().set_style(\n stroke_color=BLACK,\n stroke_width=0,\n fill_color=BLACK,\n fill_opacity=fill_opacity,\n )\n return self\n\n def get_fill_color(self):\n return Color(self.color)\n\n\nclass Cross(VGroup):\n \"\"\"Creates a cross.\n\n Parameters\n ----------\n mobject\n The mobject linked to this instance. It fits the mobject when specified. Defaults to None.\n stroke_color\n Specifies the color of the cross lines. Defaults to RED.\n stroke_width\n Specifies the width of the cross lines. Defaults to 6.\n scale_factor\n Scales the cross to the provided units. Defaults to 1.\n\n Examples\n --------\n .. manim:: ExampleCross\n :save_last_frame:\n\n class ExampleCross(Scene):\n def construct(self):\n cross = Cross()\n self.add(cross)\n \"\"\"\n\n def __init__(\n self,\n mobject: Optional[\"Mobject\"] = None,\n stroke_color: Color = RED,\n stroke_width: float = 6,\n scale_factor: float = 1,\n **kwargs\n ):\n super().__init__(\n Line(UP + LEFT, DOWN + RIGHT), Line(UP + RIGHT, DOWN + LEFT), **kwargs\n )\n if mobject is not None:\n self.replace(mobject, stretch=True)\n self.scale(scale_factor)\n self.set_stroke(color=stroke_color, width=stroke_width)\n\n\nclass Underline(Line):\n \"\"\"Creates an underline.\n\n Parameters\n ----------\n Line\n The underline.\n\n Examples\n --------\n .. manim:: UnderLine\n :save_last_frame:\n\n class UnderLine(Scene):\n def construct(self):\n man = Tex(\"Manim\") # Full Word\n ul = Underline(man) # Underlining the word\n self.add(man, ul)\n \"\"\"\n\n def __init__(self, mobject, buff=SMALL_BUFF, **kwargs):\n super().__init__(LEFT, RIGHT, buff=buff, **kwargs)\n self.match_width(mobject)\n self.next_to(mobject, DOWN, buff=self.buff)\n", "path": "manim/mobject/shape_matchers.py"}]}
1,840
541
gh_patches_debug_15397
rasdani/github-patches
git_diff
crytic__slither-1945
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- similar variables detector is extremely slow This detector makes up the majority of runtime on large codebases ``` ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 41.286 41.286 /Users/alpharush/tob/slither/slither/__main__.py:81(process_all) 1 0.000 0.000 39.059 39.059 /Users/alpharush/tob/slither/slither/__main__.py:58(process_single) 1 0.000 0.000 33.319 33.319 /Users/alpharush/tob/slither/slither/__main__.py:111(_process) 1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:198(run_detectors) 1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:204(<listcomp>) 84 0.001 0.000 33.317 0.397 /Users/alpharush/tob/slither/slither/detectors/abstract_detector.py:176(detect) 1 0.000 0.000 31.215 31.215 /Users/alpharush/tob/slither/slither/detectors/variables/similar_variables.py:72(_detect) ``` https://github.com/crytic/slither/blob/master/slither/detectors/variables/similar_variables.py#L63-L66 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slither/detectors/variables/similar_variables.py` Content: ``` 1 """ 2 Check for state variables too similar 3 Do not check contract inheritance 4 """ 5 import difflib 6 from typing import List, Set, Tuple 7 8 from slither.core.declarations.contract import Contract 9 from slither.core.variables.local_variable import LocalVariable 10 from slither.detectors.abstract_detector import ( 11 AbstractDetector, 12 DetectorClassification, 13 DETECTOR_INFO, 14 ) 15 from slither.utils.output import Output 16 17 18 class SimilarVarsDetection(AbstractDetector): 19 """ 20 Variable similar detector 21 """ 22 23 ARGUMENT = "similar-names" 24 HELP = "Variable names are too similar" 25 IMPACT = DetectorClassification.INFORMATIONAL 26 CONFIDENCE = DetectorClassification.MEDIUM 27 28 WIKI = ( 29 "https://github.com/crytic/slither/wiki/Detector-Documentation#variable-names-too-similar" 30 ) 31 32 WIKI_TITLE = "Variable names too similar" 33 WIKI_DESCRIPTION = "Detect variables with names that are too similar." 34 WIKI_EXPLOIT_SCENARIO = "Bob uses several variables with similar names. As a result, his code is difficult to review." 35 WIKI_RECOMMENDATION = "Prevent variables from having similar names." 36 37 @staticmethod 38 def similar(seq1: str, seq2: str) -> bool: 39 """Test the name similarity 40 41 Two name are similar if difflib.SequenceMatcher on the lowercase 42 version of the name is greater than 0.90 43 See: https://docs.python.org/2/library/difflib.html 44 Args: 45 seq1 (str): first name 46 seq2 (str): second name 47 Returns: 48 bool: true if names are similar 49 """ 50 if len(seq1) != len(seq2): 51 return False 52 val = difflib.SequenceMatcher(a=seq1.lower(), b=seq2.lower()).ratio() 53 ret = val > 0.90 54 return ret 55 56 @staticmethod 57 def detect_sim(contract: Contract) -> Set[Tuple[LocalVariable, LocalVariable]]: 58 """Detect variables with similar name 59 60 Returns: 61 bool: true if variables have similar name 62 """ 63 all_var = [x.variables for x in contract.functions] 64 all_var = [x for l in all_var for x in l] 65 66 contract_var = contract.variables 67 68 all_var = set(all_var + contract_var) 69 70 ret = [] 71 for v1 in all_var: 72 for v2 in all_var: 73 if v1.name.lower() != v2.name.lower(): 74 if SimilarVarsDetection.similar(v1.name, v2.name): 75 if (v2, v1) not in ret: 76 ret.append((v1, v2)) 77 78 return set(ret) 79 80 def _detect(self) -> List[Output]: 81 """Detect similar variables name 82 83 Returns: 84 list: {'vuln', 'filename,'contract','vars'} 85 """ 86 results = [] 87 for c in self.contracts: 88 allVars = self.detect_sim(c) 89 if allVars: 90 for (v1, v2) in sorted(allVars, key=lambda x: (x[0].name, x[1].name)): 91 v_left = v1 if v1.name < v2.name else v2 92 v_right = v2 if v_left == v1 else v1 93 info: DETECTOR_INFO = [ 94 "Variable ", 95 v_left, 96 " is too similar to ", 97 v_right, 98 "\n", 99 ] 100 json = self.generate_result(info) 101 results.append(json) 102 return results 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slither/detectors/variables/similar_variables.py b/slither/detectors/variables/similar_variables.py --- a/slither/detectors/variables/similar_variables.py +++ b/slither/detectors/variables/similar_variables.py @@ -65,12 +65,16 @@ contract_var = contract.variables - all_var = set(all_var + contract_var) + all_var = list(set(all_var + contract_var)) ret = [] - for v1 in all_var: - for v2 in all_var: - if v1.name.lower() != v2.name.lower(): + # pylint: disable=consider-using-enumerate + for i in range(len(all_var)): + v1 = all_var[i] + _v1_name_lower = v1.name.lower() + for j in range(i, len(all_var)): + v2 = all_var[j] + if _v1_name_lower != v2.name.lower(): if SimilarVarsDetection.similar(v1.name, v2.name): if (v2, v1) not in ret: ret.append((v1, v2))
{"golden_diff": "diff --git a/slither/detectors/variables/similar_variables.py b/slither/detectors/variables/similar_variables.py\n--- a/slither/detectors/variables/similar_variables.py\n+++ b/slither/detectors/variables/similar_variables.py\n@@ -65,12 +65,16 @@\n \n contract_var = contract.variables\n \n- all_var = set(all_var + contract_var)\n+ all_var = list(set(all_var + contract_var))\n \n ret = []\n- for v1 in all_var:\n- for v2 in all_var:\n- if v1.name.lower() != v2.name.lower():\n+ # pylint: disable=consider-using-enumerate\n+ for i in range(len(all_var)):\n+ v1 = all_var[i]\n+ _v1_name_lower = v1.name.lower()\n+ for j in range(i, len(all_var)):\n+ v2 = all_var[j]\n+ if _v1_name_lower != v2.name.lower():\n if SimilarVarsDetection.similar(v1.name, v2.name):\n if (v2, v1) not in ret:\n ret.append((v1, v2))\n", "issue": " similar variables detector is extremely slow\n This detector makes up the majority of runtime on large codebases\r\n```\r\nncalls tottime percall cumtime percall filename:lineno(function)\r\n 1 0.000 0.000 41.286 41.286 /Users/alpharush/tob/slither/slither/__main__.py:81(process_all)\r\n 1 0.000 0.000 39.059 39.059 /Users/alpharush/tob/slither/slither/__main__.py:58(process_single)\r\n 1 0.000 0.000 33.319 33.319 /Users/alpharush/tob/slither/slither/__main__.py:111(_process)\r\n 1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:198(run_detectors)\r\n 1 0.000 0.000 33.317 33.317 /Users/alpharush/tob/slither/slither/slither.py:204(<listcomp>)\r\n 84 0.001 0.000 33.317 0.397 /Users/alpharush/tob/slither/slither/detectors/abstract_detector.py:176(detect)\r\n 1 0.000 0.000 31.215 31.215 /Users/alpharush/tob/slither/slither/detectors/variables/similar_variables.py:72(_detect)\r\n\r\n```\r\nhttps://github.com/crytic/slither/blob/master/slither/detectors/variables/similar_variables.py#L63-L66\n", "before_files": [{"content": "\"\"\"\nCheck for state variables too similar\nDo not check contract inheritance\n\"\"\"\nimport difflib\nfrom typing import List, Set, Tuple\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.variables.local_variable import LocalVariable\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\nclass SimilarVarsDetection(AbstractDetector):\n \"\"\"\n Variable similar detector\n \"\"\"\n\n ARGUMENT = \"similar-names\"\n HELP = \"Variable names are too similar\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = (\n \"https://github.com/crytic/slither/wiki/Detector-Documentation#variable-names-too-similar\"\n )\n\n WIKI_TITLE = \"Variable names too similar\"\n WIKI_DESCRIPTION = \"Detect variables with names that are too similar.\"\n WIKI_EXPLOIT_SCENARIO = \"Bob uses several variables with similar names. As a result, his code is difficult to review.\"\n WIKI_RECOMMENDATION = \"Prevent variables from having similar names.\"\n\n @staticmethod\n def similar(seq1: str, seq2: str) -> bool:\n \"\"\"Test the name similarity\n\n Two name are similar if difflib.SequenceMatcher on the lowercase\n version of the name is greater than 0.90\n See: https://docs.python.org/2/library/difflib.html\n Args:\n seq1 (str): first name\n seq2 (str): second name\n Returns:\n bool: true if names are similar\n \"\"\"\n if len(seq1) != len(seq2):\n return False\n val = difflib.SequenceMatcher(a=seq1.lower(), b=seq2.lower()).ratio()\n ret = val > 0.90\n return ret\n\n @staticmethod\n def detect_sim(contract: Contract) -> Set[Tuple[LocalVariable, LocalVariable]]:\n \"\"\"Detect variables with similar name\n\n Returns:\n bool: true if variables have similar name\n \"\"\"\n all_var = [x.variables for x in contract.functions]\n all_var = [x for l in all_var for x in l]\n\n contract_var = contract.variables\n\n all_var = set(all_var + contract_var)\n\n ret = []\n for v1 in all_var:\n for v2 in all_var:\n if v1.name.lower() != v2.name.lower():\n if SimilarVarsDetection.similar(v1.name, v2.name):\n if (v2, v1) not in ret:\n ret.append((v1, v2))\n\n return set(ret)\n\n def _detect(self) -> List[Output]:\n \"\"\"Detect similar variables name\n\n Returns:\n list: {'vuln', 'filename,'contract','vars'}\n \"\"\"\n results = []\n for c in self.contracts:\n allVars = self.detect_sim(c)\n if allVars:\n for (v1, v2) in sorted(allVars, key=lambda x: (x[0].name, x[1].name)):\n v_left = v1 if v1.name < v2.name else v2\n v_right = v2 if v_left == v1 else v1\n info: DETECTOR_INFO = [\n \"Variable \",\n v_left,\n \" is too similar to \",\n v_right,\n \"\\n\",\n ]\n json = self.generate_result(info)\n results.append(json)\n return results\n", "path": "slither/detectors/variables/similar_variables.py"}], "after_files": [{"content": "\"\"\"\nCheck for state variables too similar\nDo not check contract inheritance\n\"\"\"\nimport difflib\nfrom typing import List, Set, Tuple\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.core.variables.local_variable import LocalVariable\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.utils.output import Output\n\n\nclass SimilarVarsDetection(AbstractDetector):\n \"\"\"\n Variable similar detector\n \"\"\"\n\n ARGUMENT = \"similar-names\"\n HELP = \"Variable names are too similar\"\n IMPACT = DetectorClassification.INFORMATIONAL\n CONFIDENCE = DetectorClassification.MEDIUM\n\n WIKI = (\n \"https://github.com/crytic/slither/wiki/Detector-Documentation#variable-names-too-similar\"\n )\n\n WIKI_TITLE = \"Variable names too similar\"\n WIKI_DESCRIPTION = \"Detect variables with names that are too similar.\"\n WIKI_EXPLOIT_SCENARIO = \"Bob uses several variables with similar names. As a result, his code is difficult to review.\"\n WIKI_RECOMMENDATION = \"Prevent variables from having similar names.\"\n\n @staticmethod\n def similar(seq1: str, seq2: str) -> bool:\n \"\"\"Test the name similarity\n\n Two name are similar if difflib.SequenceMatcher on the lowercase\n version of the name is greater than 0.90\n See: https://docs.python.org/2/library/difflib.html\n Args:\n seq1 (str): first name\n seq2 (str): second name\n Returns:\n bool: true if names are similar\n \"\"\"\n if len(seq1) != len(seq2):\n return False\n val = difflib.SequenceMatcher(a=seq1.lower(), b=seq2.lower()).ratio()\n ret = val > 0.90\n return ret\n\n @staticmethod\n def detect_sim(contract: Contract) -> Set[Tuple[LocalVariable, LocalVariable]]:\n \"\"\"Detect variables with similar name\n\n Returns:\n bool: true if variables have similar name\n \"\"\"\n all_var = [x.variables for x in contract.functions]\n all_var = [x for l in all_var for x in l]\n\n contract_var = contract.variables\n\n all_var = list(set(all_var + contract_var))\n\n ret = []\n # pylint: disable=consider-using-enumerate\n for i in range(len(all_var)):\n v1 = all_var[i]\n _v1_name_lower = v1.name.lower()\n for j in range(i, len(all_var)):\n v2 = all_var[j]\n if _v1_name_lower != v2.name.lower():\n if SimilarVarsDetection.similar(v1.name, v2.name):\n if (v2, v1) not in ret:\n ret.append((v1, v2))\n\n return set(ret)\n\n def _detect(self) -> List[Output]:\n \"\"\"Detect similar variables name\n\n Returns:\n list: {'vuln', 'filename,'contract','vars'}\n \"\"\"\n results = []\n for c in self.contracts:\n allVars = self.detect_sim(c)\n if allVars:\n for (v1, v2) in sorted(allVars, key=lambda x: (x[0].name, x[1].name)):\n v_left = v1 if v1.name < v2.name else v2\n v_right = v2 if v_left == v1 else v1\n info: DETECTOR_INFO = [\n \"Variable \",\n v_left,\n \" is too similar to \",\n v_right,\n \"\\n\",\n ]\n json = self.generate_result(info)\n results.append(json)\n return results\n", "path": "slither/detectors/variables/similar_variables.py"}]}
1,704
258
gh_patches_debug_28132
rasdani/github-patches
git_diff
bokeh__bokeh-5457
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- If main.py is run by bokeh serve, warn about running with directory name instead Lots of reports of people running, e.g. ``` bokeh serve --show crossfilter/main.py ``` Which prevents all the features of "directory style" apps from being enabled. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bokeh/command/util.py` Content: ``` 1 ''' Provide utility functions for implementing the Bokeh command. 2 3 ''' 4 from __future__ import print_function 5 6 import os 7 import sys 8 9 from bokeh.application import Application 10 from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler 11 12 def die(message): 13 ''' Print an error message and exit. 14 15 Args: 16 message (str) : error message to print 17 18 ''' 19 print(message, file=sys.stderr) 20 sys.exit(1) 21 22 def build_single_handler_application(path, argv=None): 23 ''' Return a Bokeh application built using a single handler for a file 24 or directory. 25 26 Args: 27 path (str) : path to a file or directory for creating a Bokeh 28 application. 29 argv (seq[str], optional) : command line arguments to pass to the 30 application handler 31 32 Returns: 33 Application 34 35 Raises: 36 RuntimeError 37 38 ''' 39 argv = argv or [] 40 path = os.path.abspath(path) 41 if os.path.isdir(path): 42 handler = DirectoryHandler(filename=path, argv=argv) 43 else: 44 if path.endswith(".ipynb"): 45 handler = NotebookHandler(filename=path, argv=argv) 46 elif path.endswith(".py"): 47 handler = ScriptHandler(filename=path, argv=argv) 48 else: 49 raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path) 50 51 if handler.failed: 52 raise RuntimeError("Error loading %s:\n\n%s\n%s " % (path, handler.error, handler.error_detail)) 53 54 application = Application(handler) 55 56 return application 57 58 def build_single_handler_applications(paths, argvs=None): 59 ''' Return a dictionary mapping routes to Bokeh applications built using 60 single handlers, for specified files or directories. 61 62 Args: 63 path (seq[str]) : paths to files or directories for creating Bokeh 64 applications. 65 argvs (dict[str, list[str]], optional) : mapping of paths to command 66 line arguments to pass to the handler for each path 67 68 Returns: 69 dict[str, Application] 70 71 Raises: 72 RuntimeError 73 74 ''' 75 applications = {} 76 argvs = {} or argvs 77 78 for path in paths: 79 application = build_single_handler_application(path, argvs.get(path, [])) 80 81 route = application.handlers[0].url_path() 82 83 if not route: 84 if '/' in applications: 85 raise RuntimeError("Don't know the URL path to use for %s" % (path)) 86 route = '/' 87 applications[route] = application 88 89 return applications 90 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bokeh/command/util.py b/bokeh/command/util.py --- a/bokeh/command/util.py +++ b/bokeh/command/util.py @@ -5,6 +5,7 @@ import os import sys +import warnings from bokeh.application import Application from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler @@ -19,6 +20,16 @@ print(message, file=sys.stderr) sys.exit(1) +DIRSTYLE_MAIN_WARNING = """ +It looks like you might be running the main.py of a directory app directly. +If this is the case, to enable the features of directory style apps, you must +call "bokeh serve" on the directory instead. For example: + + bokeh serve my_app_dir/ + +If this is not the case, renaming main.py will supress this warning. +""" + def build_single_handler_application(path, argv=None): ''' Return a Bokeh application built using a single handler for a file or directory. @@ -44,6 +55,8 @@ if path.endswith(".ipynb"): handler = NotebookHandler(filename=path, argv=argv) elif path.endswith(".py"): + if path.endswith("main.py"): + warnings.warn(DIRSTYLE_MAIN_WARNING) handler = ScriptHandler(filename=path, argv=argv) else: raise ValueError("Expected a '.py' script or '.ipynb' notebook, got: '%s'" % path)
{"golden_diff": "diff --git a/bokeh/command/util.py b/bokeh/command/util.py\n--- a/bokeh/command/util.py\n+++ b/bokeh/command/util.py\n@@ -5,6 +5,7 @@\n \n import os\n import sys\n+import warnings\n \n from bokeh.application import Application\n from bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler\n@@ -19,6 +20,16 @@\n print(message, file=sys.stderr)\n sys.exit(1)\n \n+DIRSTYLE_MAIN_WARNING = \"\"\"\n+It looks like you might be running the main.py of a directory app directly.\n+If this is the case, to enable the features of directory style apps, you must\n+call \"bokeh serve\" on the directory instead. For example:\n+\n+ bokeh serve my_app_dir/\n+\n+If this is not the case, renaming main.py will supress this warning.\n+\"\"\"\n+\n def build_single_handler_application(path, argv=None):\n ''' Return a Bokeh application built using a single handler for a file\n or directory.\n@@ -44,6 +55,8 @@\n if path.endswith(\".ipynb\"):\n handler = NotebookHandler(filename=path, argv=argv)\n elif path.endswith(\".py\"):\n+ if path.endswith(\"main.py\"):\n+ warnings.warn(DIRSTYLE_MAIN_WARNING)\n handler = ScriptHandler(filename=path, argv=argv)\n else:\n raise ValueError(\"Expected a '.py' script or '.ipynb' notebook, got: '%s'\" % path)\n", "issue": "If main.py is run by bokeh serve, warn about running with directory name instead\nLots of reports of people running, e.g.\r\n```\r\nbokeh serve --show crossfilter/main.py\r\n```\r\nWhich prevents all the features of \"directory style\" apps from being enabled. \n", "before_files": [{"content": "''' Provide utility functions for implementing the Bokeh command.\n\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler\n\ndef die(message):\n ''' Print an error message and exit.\n\n Args:\n message (str) : error message to print\n\n '''\n print(message, file=sys.stderr)\n sys.exit(1)\n\ndef build_single_handler_application(path, argv=None):\n ''' Return a Bokeh application built using a single handler for a file\n or directory.\n\n Args:\n path (str) : path to a file or directory for creating a Bokeh\n application.\n argv (seq[str], optional) : command line arguments to pass to the\n application handler\n\n Returns:\n Application\n\n Raises:\n RuntimeError\n\n '''\n argv = argv or []\n path = os.path.abspath(path)\n if os.path.isdir(path):\n handler = DirectoryHandler(filename=path, argv=argv)\n else:\n if path.endswith(\".ipynb\"):\n handler = NotebookHandler(filename=path, argv=argv)\n elif path.endswith(\".py\"):\n handler = ScriptHandler(filename=path, argv=argv)\n else:\n raise ValueError(\"Expected a '.py' script or '.ipynb' notebook, got: '%s'\" % path)\n\n if handler.failed:\n raise RuntimeError(\"Error loading %s:\\n\\n%s\\n%s \" % (path, handler.error, handler.error_detail))\n\n application = Application(handler)\n\n return application\n\ndef build_single_handler_applications(paths, argvs=None):\n ''' Return a dictionary mapping routes to Bokeh applications built using\n single handlers, for specified files or directories.\n\n Args:\n path (seq[str]) : paths to files or directories for creating Bokeh\n applications.\n argvs (dict[str, list[str]], optional) : mapping of paths to command\n line arguments to pass to the handler for each path\n\n Returns:\n dict[str, Application]\n\n Raises:\n RuntimeError\n\n '''\n applications = {}\n argvs = {} or argvs\n\n for path in paths:\n application = build_single_handler_application(path, argvs.get(path, []))\n\n route = application.handlers[0].url_path()\n\n if not route:\n if '/' in applications:\n raise RuntimeError(\"Don't know the URL path to use for %s\" % (path))\n route = '/'\n applications[route] = application\n\n return applications\n", "path": "bokeh/command/util.py"}], "after_files": [{"content": "''' Provide utility functions for implementing the Bokeh command.\n\n'''\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport warnings\n\nfrom bokeh.application import Application\nfrom bokeh.application.handlers import ScriptHandler, DirectoryHandler, NotebookHandler\n\ndef die(message):\n ''' Print an error message and exit.\n\n Args:\n message (str) : error message to print\n\n '''\n print(message, file=sys.stderr)\n sys.exit(1)\n\nDIRSTYLE_MAIN_WARNING = \"\"\"\nIt looks like you might be running the main.py of a directory app directly.\nIf this is the case, to enable the features of directory style apps, you must\ncall \"bokeh serve\" on the directory instead. For example:\n\n bokeh serve my_app_dir/\n\nIf this is not the case, renaming main.py will supress this warning.\n\"\"\"\n\ndef build_single_handler_application(path, argv=None):\n ''' Return a Bokeh application built using a single handler for a file\n or directory.\n\n Args:\n path (str) : path to a file or directory for creating a Bokeh\n application.\n argv (seq[str], optional) : command line arguments to pass to the\n application handler\n\n Returns:\n Application\n\n Raises:\n RuntimeError\n\n '''\n argv = argv or []\n path = os.path.abspath(path)\n if os.path.isdir(path):\n handler = DirectoryHandler(filename=path, argv=argv)\n else:\n if path.endswith(\".ipynb\"):\n handler = NotebookHandler(filename=path, argv=argv)\n elif path.endswith(\".py\"):\n if path.endswith(\"main.py\"):\n warnings.warn(DIRSTYLE_MAIN_WARNING)\n handler = ScriptHandler(filename=path, argv=argv)\n else:\n raise ValueError(\"Expected a '.py' script or '.ipynb' notebook, got: '%s'\" % path)\n\n if handler.failed:\n raise RuntimeError(\"Error loading %s:\\n\\n%s\\n%s \" % (path, handler.error, handler.error_detail))\n\n application = Application(handler)\n\n return application\n\ndef build_single_handler_applications(paths, argvs=None):\n ''' Return a dictionary mapping routes to Bokeh applications built using\n single handlers, for specified files or directories.\n\n Args:\n path (seq[str]) : paths to files or directories for creating Bokeh\n applications.\n argvs (dict[str, list[str]], optional) : mapping of paths to command\n line arguments to pass to the handler for each path\n\n Returns:\n dict[str, Application]\n\n Raises:\n RuntimeError\n\n '''\n applications = {}\n argvs = {} or argvs\n\n for path in paths:\n application = build_single_handler_application(path, argvs.get(path, []))\n\n route = application.handlers[0].url_path()\n\n if not route:\n if '/' in applications:\n raise RuntimeError(\"Don't know the URL path to use for %s\" % (path))\n route = '/'\n applications[route] = application\n\n return applications\n", "path": "bokeh/command/util.py"}]}
1,037
326
gh_patches_debug_28736
rasdani/github-patches
git_diff
opsdroid__opsdroid-183
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Change default log location Logs by default are written to `./opsdroid.log`. So they end up being written wherever you run the command. Logs should either be written to `/var/log/opsdroid.log` or as that may not be writeable by all users maybe `~/.opsdroid/opsdroid.log`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/const.py` Content: ``` 1 """Constants used by OpsDroid.""" 2 import os 3 4 __version__ = "0.8.1" 5 6 LOG_FILENAME = 'output.log' 7 DEFAULT_GIT_URL = "https://github.com/opsdroid/" 8 MODULES_DIRECTORY = "opsdroid-modules" 9 DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid") 10 DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules") 11 DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages") 12 DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml") 13 DEFAULT_MODULE_BRANCH = "master" 14 EXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 15 "configuration/example_configuration.yaml") 16 ``` Path: `opsdroid/__main__.py` Content: ``` 1 """Starts opsdroid.""" 2 3 import sys 4 import logging 5 import argparse 6 7 from opsdroid.core import OpsDroid 8 from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE 9 from opsdroid.web import Web 10 11 12 _LOGGER = logging.getLogger("opsdroid") 13 14 15 def configure_logging(config): 16 """Configure the root logger based on user config.""" 17 rootlogger = logging.getLogger() 18 while rootlogger.handlers: 19 rootlogger.handlers.pop() 20 21 try: 22 logfile_path = config["logging"]["path"] 23 except KeyError: 24 logfile_path = LOG_FILENAME 25 26 try: 27 log_level = get_logging_level( 28 config["logging"]["level"]) 29 except KeyError: 30 log_level = logging.INFO 31 32 rootlogger.setLevel(log_level) 33 formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s') 34 35 console_handler = logging.StreamHandler() 36 console_handler.setLevel(log_level) 37 console_handler.setFormatter(formatter) 38 rootlogger.addHandler(console_handler) 39 40 try: 41 if not config["logging"]["console"]: 42 console_handler.setLevel(logging.CRITICAL) 43 except KeyError: 44 pass 45 46 if logfile_path: 47 file_handler = logging.FileHandler(logfile_path) 48 file_handler.setLevel(log_level) 49 file_handler.setFormatter(formatter) 50 rootlogger.addHandler(file_handler) 51 52 _LOGGER.info("="*40) 53 _LOGGER.info("Stated application") 54 55 56 def get_logging_level(logging_level): 57 """Get the logger level based on the user configuration.""" 58 if logging_level == 'critical': 59 return logging.CRITICAL 60 elif logging_level == 'error': 61 return logging.ERROR 62 elif logging_level == 'warning': 63 return logging.WARNING 64 elif logging_level == 'debug': 65 return logging.DEBUG 66 67 return logging.INFO 68 69 70 def parse_args(args): 71 """Parse command line arguments.""" 72 parser = argparse.ArgumentParser(description='Run opsdroid.') 73 parser.add_argument('--gen-config', action="store_true", 74 help='prints out an example configuration file') 75 return parser.parse_args(args) 76 77 78 def check_dependencies(): 79 """Check for system dependencies required by opsdroid.""" 80 if sys.version_info[0] < 3 or sys.version_info[1] < 5: 81 logging.critical("Whoops! opsdroid requires python 3.5 or above.") 82 sys.exit(1) 83 84 85 def main(): 86 """Enter the application here.""" 87 args = parse_args(sys.argv[1:]) 88 89 if args.gen_config: 90 with open(EXAMPLE_CONFIG_FILE, 'r') as conf: 91 print(conf.read()) 92 sys.exit(0) 93 94 check_dependencies() 95 96 restart = True 97 98 while restart: 99 with OpsDroid() as opsdroid: 100 opsdroid.load() 101 configure_logging(opsdroid.config) 102 opsdroid.web_server = Web(opsdroid) 103 opsdroid.start_loop() 104 restart = opsdroid.should_restart 105 106 107 if __name__ == "__main__": 108 main() 109 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py --- a/opsdroid/__main__.py +++ b/opsdroid/__main__.py @@ -1,11 +1,12 @@ """Starts opsdroid.""" +import os import sys import logging import argparse from opsdroid.core import OpsDroid -from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE +from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE from opsdroid.web import Web @@ -19,9 +20,12 @@ rootlogger.handlers.pop() try: - logfile_path = config["logging"]["path"] + if config["logging"]["path"]: + logfile_path = os.path.expanduser(config["logging"]["path"]) + else: + logfile_path = config["logging"]["path"] except KeyError: - logfile_path = LOG_FILENAME + logfile_path = DEFAULT_LOG_FILENAME try: log_level = get_logging_level( diff --git a/opsdroid/const.py b/opsdroid/const.py --- a/opsdroid/const.py +++ b/opsdroid/const.py @@ -3,10 +3,10 @@ __version__ = "0.8.1" -LOG_FILENAME = 'output.log' DEFAULT_GIT_URL = "https://github.com/opsdroid/" MODULES_DIRECTORY = "opsdroid-modules" -DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser("~"), ".opsdroid") +DEFAULT_ROOT_PATH = os.path.expanduser("~/.opsdroid") +DEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log') DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, "modules") DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, "site-packages") DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, "configuration.yaml")
{"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -1,11 +1,12 @@\n \"\"\"Starts opsdroid.\"\"\"\n \n+import os\n import sys\n import logging\n import argparse\n \n from opsdroid.core import OpsDroid\n-from opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE\n+from opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE\n from opsdroid.web import Web\n \n \n@@ -19,9 +20,12 @@\n rootlogger.handlers.pop()\n \n try:\n- logfile_path = config[\"logging\"][\"path\"]\n+ if config[\"logging\"][\"path\"]:\n+ logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n+ else:\n+ logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n- logfile_path = LOG_FILENAME\n+ logfile_path = DEFAULT_LOG_FILENAME\n \n try:\n log_level = get_logging_level(\ndiff --git a/opsdroid/const.py b/opsdroid/const.py\n--- a/opsdroid/const.py\n+++ b/opsdroid/const.py\n@@ -3,10 +3,10 @@\n \n __version__ = \"0.8.1\"\n \n-LOG_FILENAME = 'output.log'\n DEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\n MODULES_DIRECTORY = \"opsdroid-modules\"\n-DEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\n+DEFAULT_ROOT_PATH = os.path.expanduser(\"~/.opsdroid\")\n+DEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log')\n DEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\n DEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\n DEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\n", "issue": "Change default log location\nLogs by default are written to `./opsdroid.log`. So they end up being written wherever you run the command.\r\n\r\nLogs should either be written to `/var/log/opsdroid.log` or as that may not be writeable by all users maybe `~/.opsdroid/opsdroid.log`.\n", "before_files": [{"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\n\n__version__ = \"0.8.1\"\n\nLOG_FILENAME = 'output.log'\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = os.path.join(os.path.expanduser(\"~\"), \".opsdroid\")\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\nDEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nDEFAULT_MODULE_BRANCH = \"master\"\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n", "path": "opsdroid/const.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport sys\nimport logging\nimport argparse\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import LOG_FILENAME, EXAMPLE_CONFIG_FILE\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(\"Stated application\")\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info[0] < 3 or sys.version_info[1] < 5:\n logging.critical(\"Whoops! opsdroid requires python 3.5 or above.\")\n sys.exit(1)\n\n\ndef main():\n \"\"\"Enter the application here.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n restart = True\n\n while restart:\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_logging(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n restart = opsdroid.should_restart\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}], "after_files": [{"content": "\"\"\"Constants used by OpsDroid.\"\"\"\nimport os\n\n__version__ = \"0.8.1\"\n\nDEFAULT_GIT_URL = \"https://github.com/opsdroid/\"\nMODULES_DIRECTORY = \"opsdroid-modules\"\nDEFAULT_ROOT_PATH = os.path.expanduser(\"~/.opsdroid\")\nDEFAULT_LOG_FILENAME = os.path.join(DEFAULT_ROOT_PATH, 'output.log')\nDEFAULT_MODULES_PATH = os.path.join(DEFAULT_ROOT_PATH, \"modules\")\nDEFAULT_MODULE_DEPS_PATH = os.path.join(DEFAULT_ROOT_PATH, \"site-packages\")\nDEFAULT_CONFIG_PATH = os.path.join(DEFAULT_ROOT_PATH, \"configuration.yaml\")\nDEFAULT_MODULE_BRANCH = \"master\"\nEXAMPLE_CONFIG_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n \"configuration/example_configuration.yaml\")\n", "path": "opsdroid/const.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport os\nimport sys\nimport logging\nimport argparse\n\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, EXAMPLE_CONFIG_FILE\nfrom opsdroid.web import Web\n\n\n_LOGGER = logging.getLogger(\"opsdroid\")\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(\n config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n formatter = logging.Formatter('%(levelname)s %(name)s: %(message)s')\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n rootlogger.addHandler(console_handler)\n\n try:\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n except KeyError:\n pass\n\n if logfile_path:\n file_handler = logging.FileHandler(logfile_path)\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n rootlogger.addHandler(file_handler)\n\n _LOGGER.info(\"=\"*40)\n _LOGGER.info(\"Stated application\")\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\"\"\"\n if logging_level == 'critical':\n return logging.CRITICAL\n elif logging_level == 'error':\n return logging.ERROR\n elif logging_level == 'warning':\n return logging.WARNING\n elif logging_level == 'debug':\n return logging.DEBUG\n\n return logging.INFO\n\n\ndef parse_args(args):\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description='Run opsdroid.')\n parser.add_argument('--gen-config', action=\"store_true\",\n help='prints out an example configuration file')\n return parser.parse_args(args)\n\n\ndef check_dependencies():\n \"\"\"Check for system dependencies required by opsdroid.\"\"\"\n if sys.version_info[0] < 3 or sys.version_info[1] < 5:\n logging.critical(\"Whoops! opsdroid requires python 3.5 or above.\")\n sys.exit(1)\n\n\ndef main():\n \"\"\"Enter the application here.\"\"\"\n args = parse_args(sys.argv[1:])\n\n if args.gen_config:\n with open(EXAMPLE_CONFIG_FILE, 'r') as conf:\n print(conf.read())\n sys.exit(0)\n\n check_dependencies()\n\n restart = True\n\n while restart:\n with OpsDroid() as opsdroid:\n opsdroid.load()\n configure_logging(opsdroid.config)\n opsdroid.web_server = Web(opsdroid)\n opsdroid.start_loop()\n restart = opsdroid.should_restart\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}]}
1,373
423
gh_patches_debug_25413
rasdani/github-patches
git_diff
getsentry__sentry-27105
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade from 21.6.1 to 21.6.2 migration error, relation already exists ### Environment self-hosted (`onpremise` deployment) ### Version 21.6.1 upgrade to 21.6.1 ### Steps to Reproduce 1. git fetch tags/21.6.2 2. ./install.sh ### Expected Result Migration to succeed. ### Actual Result ``` django.db.utils.ProgrammingError: ProgrammingError('relation "sentry_groupedmessage_project_id_id_515aaa7e_uniq" already exists\n',) SQL: ALTER TABLE "sentry_groupedmessage" ADD CONSTRAINT "sentry_groupedmessage_project_id_id_515aaa7e_uniq" UNIQUE ("project_id", "id") ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/migrations/0216_cdc_setup_replication_index.py` Content: ``` 1 # Generated by Django 1.11.29 on 2021-06-30 18:51 2 3 from django.db import migrations 4 5 6 class Migration(migrations.Migration): 7 # This flag is used to mark that a migration shouldn't be automatically run in 8 # production. We set this to True for operations that we think are risky and want 9 # someone from ops to run manually and monitor. 10 # General advice is that if in doubt, mark your migration as `is_dangerous`. 11 # Some things you should always mark as dangerous: 12 # - Large data migrations. Typically we want these to be run manually by ops so that 13 # they can be monitored. Since data migrations will now hold a transaction open 14 # this is even more important. 15 # - Adding columns to highly active tables, even ones that are NULL. 16 is_dangerous = True 17 18 # This flag is used to decide whether to run this migration in a transaction or not. 19 # By default we prefer to run in a transaction, but for migrations where you want 20 # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll 21 # want to create an index concurrently when adding one to an existing table. 22 # You'll also usually want to set this to `False` if you're writing a data 23 # migration, since we don't want the entire migration to run in one long-running 24 # transaction. 25 atomic = False 26 27 dependencies = [ 28 ("sentry", "0215_fix_state"), 29 ] 30 31 operations = [ 32 migrations.AlterUniqueTogether( 33 name="group", 34 unique_together={("project", "id"), ("project", "short_id")}, 35 ), 36 migrations.RunSQL( 37 sql=""" 38 ALTER TABLE sentry_groupasignee REPLICA IDENTITY USING INDEX 39 sentry_groupasignee_project_id_group_id_fbf4364e_uniq 40 """, 41 reverse_sql=""" 42 ALTER TABLE sentry_groupasignee REPLICA IDENTITY DEFAULT 43 """, 44 hints={"tables": ["sentry_groupasignee"]}, 45 ), 46 migrations.RunSQL( 47 sql=""" 48 ALTER TABLE sentry_groupedmessage REPLICA IDENTITY USING INDEX 49 sentry_groupedmessage_project_id_id_515aaa7e_uniq 50 """, 51 reverse_sql=""" 52 ALTER TABLE sentry_groupedmessage REPLICA IDENTITY DEFAULT 53 """, 54 hints={"tables": ["sentry_groupedmessage"]}, 55 ), 56 ] 57 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/migrations/0216_cdc_setup_replication_index.py b/src/sentry/migrations/0216_cdc_setup_replication_index.py --- a/src/sentry/migrations/0216_cdc_setup_replication_index.py +++ b/src/sentry/migrations/0216_cdc_setup_replication_index.py @@ -29,9 +29,33 @@ ] operations = [ - migrations.AlterUniqueTogether( - name="group", - unique_together={("project", "id"), ("project", "short_id")}, + migrations.SeparateDatabaseAndState( + database_operations=[ + migrations.RunSQL( + """ + CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS "sentry_groupedmessage_project_id_id_515aaa7e_uniq" ON "sentry_groupedmessage" ("project_id", "id"); + """, + reverse_sql=""" + DROP INDEX CONCURRENTLY IF EXISTS sentry_groupedmessage_project_id_id_515aaa7e_uniq; + """, + hints={"tables": ["sentry_groupedmessage"]}, + ), + migrations.RunSQL( + """ + ALTER TABLE "sentry_groupedmessage" ADD CONSTRAINT "sentry_groupedmessage_project_id_id_515aaa7e_uniq" UNIQUE USING INDEX "sentry_groupedmessage_project_id_id_515aaa7e_uniq"; + """, + reverse_sql=""" + ALTER TABLE "sentry_groupedmessage" DROP CONSTRAINT IF EXISTS "sentry_groupedmessage_project_id_id_515aaa7e_uniq"; + """, + hints={"tables": ["sentry_groupedmessage"]}, + ), + ], + state_operations=[ + migrations.AlterUniqueTogether( + name="group", + unique_together={("project", "id"), ("project", "short_id")}, + ), + ], ), migrations.RunSQL( sql="""
{"golden_diff": "diff --git a/src/sentry/migrations/0216_cdc_setup_replication_index.py b/src/sentry/migrations/0216_cdc_setup_replication_index.py\n--- a/src/sentry/migrations/0216_cdc_setup_replication_index.py\n+++ b/src/sentry/migrations/0216_cdc_setup_replication_index.py\n@@ -29,9 +29,33 @@\n ]\n \n operations = [\n- migrations.AlterUniqueTogether(\n- name=\"group\",\n- unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n+ migrations.SeparateDatabaseAndState(\n+ database_operations=[\n+ migrations.RunSQL(\n+ \"\"\"\n+ CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" ON \"sentry_groupedmessage\" (\"project_id\", \"id\");\n+ \"\"\",\n+ reverse_sql=\"\"\"\n+ DROP INDEX CONCURRENTLY IF EXISTS sentry_groupedmessage_project_id_id_515aaa7e_uniq;\n+ \"\"\",\n+ hints={\"tables\": [\"sentry_groupedmessage\"]},\n+ ),\n+ migrations.RunSQL(\n+ \"\"\"\n+ ALTER TABLE \"sentry_groupedmessage\" ADD CONSTRAINT \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" UNIQUE USING INDEX \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n+ \"\"\",\n+ reverse_sql=\"\"\"\n+ ALTER TABLE \"sentry_groupedmessage\" DROP CONSTRAINT IF EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n+ \"\"\",\n+ hints={\"tables\": [\"sentry_groupedmessage\"]},\n+ ),\n+ ],\n+ state_operations=[\n+ migrations.AlterUniqueTogether(\n+ name=\"group\",\n+ unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n+ ),\n+ ],\n ),\n migrations.RunSQL(\n sql=\"\"\"\n", "issue": "Upgrade from 21.6.1 to 21.6.2 migration error, relation already exists\n### Environment\n\nself-hosted (`onpremise` deployment)\n\n### Version\n\n21.6.1 upgrade to 21.6.1\n\n### Steps to Reproduce\n\n1. git fetch tags/21.6.2\r\n2. ./install.sh\n\n### Expected Result\n\nMigration to succeed.\n\n### Actual Result\n\n```\r\ndjango.db.utils.ProgrammingError: ProgrammingError('relation \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" already exists\\n',)\r\nSQL: ALTER TABLE \"sentry_groupedmessage\" ADD CONSTRAINT \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" UNIQUE (\"project_id\", \"id\")\r\n```\n", "before_files": [{"content": "# Generated by Django 1.11.29 on 2021-06-30 18:51\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n # This flag is used to decide whether to run this migration in a transaction or not.\n # By default we prefer to run in a transaction, but for migrations where you want\n # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll\n # want to create an index concurrently when adding one to an existing table.\n # You'll also usually want to set this to `False` if you're writing a data\n # migration, since we don't want the entire migration to run in one long-running\n # transaction.\n atomic = False\n\n dependencies = [\n (\"sentry\", \"0215_fix_state\"),\n ]\n\n operations = [\n migrations.AlterUniqueTogether(\n name=\"group\",\n unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY USING INDEX\n sentry_groupasignee_project_id_group_id_fbf4364e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupasignee\"]},\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY USING INDEX\n sentry_groupedmessage_project_id_id_515aaa7e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n ]\n", "path": "src/sentry/migrations/0216_cdc_setup_replication_index.py"}], "after_files": [{"content": "# Generated by Django 1.11.29 on 2021-06-30 18:51\n\nfrom django.db import migrations\n\n\nclass Migration(migrations.Migration):\n # This flag is used to mark that a migration shouldn't be automatically run in\n # production. We set this to True for operations that we think are risky and want\n # someone from ops to run manually and monitor.\n # General advice is that if in doubt, mark your migration as `is_dangerous`.\n # Some things you should always mark as dangerous:\n # - Large data migrations. Typically we want these to be run manually by ops so that\n # they can be monitored. Since data migrations will now hold a transaction open\n # this is even more important.\n # - Adding columns to highly active tables, even ones that are NULL.\n is_dangerous = True\n\n # This flag is used to decide whether to run this migration in a transaction or not.\n # By default we prefer to run in a transaction, but for migrations where you want\n # to `CREATE INDEX CONCURRENTLY` this needs to be set to False. Typically you'll\n # want to create an index concurrently when adding one to an existing table.\n # You'll also usually want to set this to `False` if you're writing a data\n # migration, since we don't want the entire migration to run in one long-running\n # transaction.\n atomic = False\n\n dependencies = [\n (\"sentry\", \"0215_fix_state\"),\n ]\n\n operations = [\n migrations.SeparateDatabaseAndState(\n database_operations=[\n migrations.RunSQL(\n \"\"\"\n CREATE UNIQUE INDEX CONCURRENTLY IF NOT EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" ON \"sentry_groupedmessage\" (\"project_id\", \"id\");\n \"\"\",\n reverse_sql=\"\"\"\n DROP INDEX CONCURRENTLY IF EXISTS sentry_groupedmessage_project_id_id_515aaa7e_uniq;\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n migrations.RunSQL(\n \"\"\"\n ALTER TABLE \"sentry_groupedmessage\" ADD CONSTRAINT \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\" UNIQUE USING INDEX \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE \"sentry_groupedmessage\" DROP CONSTRAINT IF EXISTS \"sentry_groupedmessage_project_id_id_515aaa7e_uniq\";\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n ],\n state_operations=[\n migrations.AlterUniqueTogether(\n name=\"group\",\n unique_together={(\"project\", \"id\"), (\"project\", \"short_id\")},\n ),\n ],\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY USING INDEX\n sentry_groupasignee_project_id_group_id_fbf4364e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupasignee REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupasignee\"]},\n ),\n migrations.RunSQL(\n sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY USING INDEX\n sentry_groupedmessage_project_id_id_515aaa7e_uniq\n \"\"\",\n reverse_sql=\"\"\"\n ALTER TABLE sentry_groupedmessage REPLICA IDENTITY DEFAULT\n \"\"\",\n hints={\"tables\": [\"sentry_groupedmessage\"]},\n ),\n ]\n", "path": "src/sentry/migrations/0216_cdc_setup_replication_index.py"}]}
1,100
444
gh_patches_debug_14886
rasdani/github-patches
git_diff
DDMAL__CantusDB-582
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- On Century Detail pages, sometimes unpublished sources are listed Visiting century/3863 while not logged in, there's a link to source/672452. When I click on it, I get a 403 Access Denied error. We need to ensure that links to sources that are inaccessible to a user are never displayed. This bug occurs on both staging and production. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django/cantusdb_project/main_app/views/century.py` Content: ``` 1 from django.views.generic import DetailView 2 from main_app.models import Century 3 4 class CenturyDetailView(DetailView): 5 model = Century 6 context_object_name = "century" 7 template_name = "century_detail.html" 8 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py --- a/django/cantusdb_project/main_app/views/century.py +++ b/django/cantusdb_project/main_app/views/century.py @@ -1,7 +1,20 @@ from django.views.generic import DetailView -from main_app.models import Century +from main_app.models import Century, Source +from typing import Any class CenturyDetailView(DetailView): model = Century context_object_name = "century" template_name = "century_detail.html" + + def get_context_data(self, **kwargs: Any) -> dict[str, Any]: + context = super().get_context_data(**kwargs) + century = self.get_object() + user = self.request.user + display_unpublished = user.is_authenticated + sources = Source.objects.filter(century=century) + if not display_unpublished: + sources = sources.filter(published=True) + sources=sources.only("title", "id") + context["sources"] = sources + return context \ No newline at end of file
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py\n--- a/django/cantusdb_project/main_app/views/century.py\n+++ b/django/cantusdb_project/main_app/views/century.py\n@@ -1,7 +1,20 @@\n from django.views.generic import DetailView\n-from main_app.models import Century\n+from main_app.models import Century, Source\n+from typing import Any\n \n class CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n+\n+ def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n+ context = super().get_context_data(**kwargs)\n+ century = self.get_object()\n+ user = self.request.user\n+ display_unpublished = user.is_authenticated\n+ sources = Source.objects.filter(century=century)\n+ if not display_unpublished:\n+ sources = sources.filter(published=True)\n+ sources=sources.only(\"title\", \"id\")\n+ context[\"sources\"] = sources\n+ return context\n\\ No newline at end of file\n", "issue": "On Century Detail pages, sometimes unpublished sources are listed\nVisiting century/3863 while not logged in, there's a link to source/672452. When I click on it, I get a 403 Access Denied error. We need to ensure that links to sources that are inaccessible to a user are never displayed.\r\n\r\nThis bug occurs on both staging and production.\n", "before_files": [{"content": "from django.views.generic import DetailView\nfrom main_app.models import Century\n\nclass CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n", "path": "django/cantusdb_project/main_app/views/century.py"}], "after_files": [{"content": "from django.views.generic import DetailView\nfrom main_app.models import Century, Source\nfrom typing import Any\n\nclass CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n\n def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n context = super().get_context_data(**kwargs)\n century = self.get_object()\n user = self.request.user\n display_unpublished = user.is_authenticated\n sources = Source.objects.filter(century=century)\n if not display_unpublished:\n sources = sources.filter(published=True)\n sources=sources.only(\"title\", \"id\")\n context[\"sources\"] = sources\n return context", "path": "django/cantusdb_project/main_app/views/century.py"}]}
404
265
gh_patches_debug_36629
rasdani/github-patches
git_diff
svthalia__concrexit-3382
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Facedetection admin resubmit action Sometimes the facedetection lambda can fail randomly. Photos are resubmitted nightly if that happens, but it may be nice to have an alternative for that to do it sooner, without SSHing into the server. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `website/facedetection/admin.py` Content: ``` 1 from django.contrib import admin 2 from django.db.models.query import Prefetch 3 from django.urls import reverse 4 from django.utils.html import format_html 5 from django.utils.safestring import mark_safe 6 7 from .models import ( 8 FaceDetectionPhoto, 9 PhotoFaceEncoding, 10 ReferenceFace, 11 ReferenceFaceEncoding, 12 ) 13 14 15 class ReferenceFaceEncodingInline(admin.TabularInline): 16 model = ReferenceFaceEncoding 17 readonly_fields = ["num_matches"] 18 fields = ["num_matches"] 19 can_delete = False 20 extra = 0 21 22 def has_add_permission(self, request, obj=None): 23 return False # Encodings should not be created manually. 24 25 def get_queryset(self, request): 26 return super().get_queryset(request).only("reference") 27 28 29 @admin.register(ReferenceFace) 30 class ReferenceFaceAdmin(admin.ModelAdmin): 31 list_display = [ 32 "user", 33 "status", 34 "created_at", 35 "marked_for_deletion_at", 36 ] 37 38 search_fields = [ 39 "user__username", 40 "user__first_name", 41 "user__last_name", 42 ] 43 44 list_filter = ["status", "marked_for_deletion_at"] 45 inlines = [ReferenceFaceEncodingInline] 46 47 def get_readonly_fields(self, request, obj=None): 48 if obj is None: 49 return ["created_at", "submitted_at", "status"] 50 return ["file", "user", "created_at", "submitted_at", "status"] 51 52 53 class PhotoFaceEncodingInline(admin.TabularInline): 54 model = PhotoFaceEncoding 55 readonly_fields = ["view_matches"] 56 fields = ["view_matches"] 57 can_delete = False 58 extra = 0 59 60 @admin.display(description="Matches") 61 def view_matches(self, obj): 62 reference_faces = [match.reference for match in obj.matches.all()] 63 if not reference_faces: 64 return "-" 65 66 links = [ 67 format_html( 68 '<a href="{url}">{text}</a>', 69 url=reverse( 70 "admin:facedetection_referenceface_change", 71 kwargs={"object_id": rf.pk}, 72 ), 73 text=str(rf), 74 ) 75 for rf in reference_faces 76 ] 77 return mark_safe(", ".join(links)) 78 79 def has_add_permission(self, request, obj=None): 80 return False # Encodings should not be created manually. 81 82 def get_queryset(self, request): 83 return ( 84 super() 85 .get_queryset(request) 86 .only("photo") # Don't select the 128 encoding fields. 87 .prefetch_related( 88 "photo__photo__album", 89 Prefetch( 90 "matches", 91 queryset=ReferenceFaceEncoding.objects.select_related( 92 "reference", "reference__user" 93 ).only("reference"), 94 ), 95 ) 96 ) 97 98 99 @admin.register(FaceDetectionPhoto) 100 class FaceDetectionPhotoAdmin(admin.ModelAdmin): 101 list_display = [ 102 "__str__", 103 "status", 104 "submitted_at", 105 "num_faces", 106 ] 107 108 readonly_fields = [ 109 "photo", 110 "submitted_at", 111 "status", 112 ] 113 114 search_fields = [ 115 "photo__album__title", 116 "photo__album__date", 117 "photo__file", 118 ] 119 120 list_filter = ["status", "submitted_at"] 121 inlines = [PhotoFaceEncodingInline] 122 123 def get_queryset(self, request): 124 return ( 125 super() 126 .get_queryset(request) 127 .select_related("photo") 128 .prefetch_related("photo__album") 129 .select_properties("num_faces") 130 ) 131 132 def has_add_permission(self, request): 133 return False 134 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/website/facedetection/admin.py b/website/facedetection/admin.py --- a/website/facedetection/admin.py +++ b/website/facedetection/admin.py @@ -1,9 +1,11 @@ -from django.contrib import admin +from django.contrib import admin, messages from django.db.models.query import Prefetch from django.urls import reverse from django.utils.html import format_html from django.utils.safestring import mark_safe +from facedetection.services import trigger_facedetection_lambda + from .models import ( FaceDetectionPhoto, PhotoFaceEncoding, @@ -44,11 +46,25 @@ list_filter = ["status", "marked_for_deletion_at"] inlines = [ReferenceFaceEncodingInline] + actions = ["resubmit_reference_faces"] + def get_readonly_fields(self, request, obj=None): if obj is None: return ["created_at", "submitted_at", "status"] return ["file", "user", "created_at", "submitted_at", "status"] + @admin.action(description="Resubmit reference faces for analysis.") + def resubmit_reference_faces(self, request, queryset) -> list[ReferenceFace]: + querylist = list( + queryset.filter( + status=FaceDetectionPhoto.Status.PROCESSING, + ) + ) + if querylist: + trigger_facedetection_lambda(querylist) + messages.success(request, "Resubmit successful.") + return querylist + class PhotoFaceEncodingInline(admin.TabularInline): model = PhotoFaceEncoding @@ -120,6 +136,8 @@ list_filter = ["status", "submitted_at"] inlines = [PhotoFaceEncodingInline] + actions = ["resubmit_face_detection_photos"] + def get_queryset(self, request): return ( super() @@ -131,3 +149,17 @@ def has_add_permission(self, request): return False + + @admin.action(description="Resubmits face detection photos for analysis.") + def resubmit_face_detection_photos( + self, request, queryset + ) -> list[FaceDetectionPhoto]: + querylist = list( + queryset.filter( + status=FaceDetectionPhoto.Status.PROCESSING, + ) + ) + if querylist: + trigger_facedetection_lambda(querylist) + messages.success(request, "Resubmit successful.") + return querylist
{"golden_diff": "diff --git a/website/facedetection/admin.py b/website/facedetection/admin.py\n--- a/website/facedetection/admin.py\n+++ b/website/facedetection/admin.py\n@@ -1,9 +1,11 @@\n-from django.contrib import admin\n+from django.contrib import admin, messages\n from django.db.models.query import Prefetch\n from django.urls import reverse\n from django.utils.html import format_html\n from django.utils.safestring import mark_safe\n \n+from facedetection.services import trigger_facedetection_lambda\n+\n from .models import (\n FaceDetectionPhoto,\n PhotoFaceEncoding,\n@@ -44,11 +46,25 @@\n list_filter = [\"status\", \"marked_for_deletion_at\"]\n inlines = [ReferenceFaceEncodingInline]\n \n+ actions = [\"resubmit_reference_faces\"]\n+\n def get_readonly_fields(self, request, obj=None):\n if obj is None:\n return [\"created_at\", \"submitted_at\", \"status\"]\n return [\"file\", \"user\", \"created_at\", \"submitted_at\", \"status\"]\n \n+ @admin.action(description=\"Resubmit reference faces for analysis.\")\n+ def resubmit_reference_faces(self, request, queryset) -> list[ReferenceFace]:\n+ querylist = list(\n+ queryset.filter(\n+ status=FaceDetectionPhoto.Status.PROCESSING,\n+ )\n+ )\n+ if querylist:\n+ trigger_facedetection_lambda(querylist)\n+ messages.success(request, \"Resubmit successful.\")\n+ return querylist\n+\n \n class PhotoFaceEncodingInline(admin.TabularInline):\n model = PhotoFaceEncoding\n@@ -120,6 +136,8 @@\n list_filter = [\"status\", \"submitted_at\"]\n inlines = [PhotoFaceEncodingInline]\n \n+ actions = [\"resubmit_face_detection_photos\"]\n+\n def get_queryset(self, request):\n return (\n super()\n@@ -131,3 +149,17 @@\n \n def has_add_permission(self, request):\n return False\n+\n+ @admin.action(description=\"Resubmits face detection photos for analysis.\")\n+ def resubmit_face_detection_photos(\n+ self, request, queryset\n+ ) -> list[FaceDetectionPhoto]:\n+ querylist = list(\n+ queryset.filter(\n+ status=FaceDetectionPhoto.Status.PROCESSING,\n+ )\n+ )\n+ if querylist:\n+ trigger_facedetection_lambda(querylist)\n+ messages.success(request, \"Resubmit successful.\")\n+ return querylist\n", "issue": "Facedetection admin resubmit action\nSometimes the facedetection lambda can fail randomly. Photos are resubmitted nightly if that happens, but it may be nice to have an alternative for that to do it sooner, without SSHing into the server.\r\n\n", "before_files": [{"content": "from django.contrib import admin\nfrom django.db.models.query import Prefetch\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom .models import (\n FaceDetectionPhoto,\n PhotoFaceEncoding,\n ReferenceFace,\n ReferenceFaceEncoding,\n)\n\n\nclass ReferenceFaceEncodingInline(admin.TabularInline):\n model = ReferenceFaceEncoding\n readonly_fields = [\"num_matches\"]\n fields = [\"num_matches\"]\n can_delete = False\n extra = 0\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return super().get_queryset(request).only(\"reference\")\n\n\[email protected](ReferenceFace)\nclass ReferenceFaceAdmin(admin.ModelAdmin):\n list_display = [\n \"user\",\n \"status\",\n \"created_at\",\n \"marked_for_deletion_at\",\n ]\n\n search_fields = [\n \"user__username\",\n \"user__first_name\",\n \"user__last_name\",\n ]\n\n list_filter = [\"status\", \"marked_for_deletion_at\"]\n inlines = [ReferenceFaceEncodingInline]\n\n def get_readonly_fields(self, request, obj=None):\n if obj is None:\n return [\"created_at\", \"submitted_at\", \"status\"]\n return [\"file\", \"user\", \"created_at\", \"submitted_at\", \"status\"]\n\n\nclass PhotoFaceEncodingInline(admin.TabularInline):\n model = PhotoFaceEncoding\n readonly_fields = [\"view_matches\"]\n fields = [\"view_matches\"]\n can_delete = False\n extra = 0\n\n @admin.display(description=\"Matches\")\n def view_matches(self, obj):\n reference_faces = [match.reference for match in obj.matches.all()]\n if not reference_faces:\n return \"-\"\n\n links = [\n format_html(\n '<a href=\"{url}\">{text}</a>',\n url=reverse(\n \"admin:facedetection_referenceface_change\",\n kwargs={\"object_id\": rf.pk},\n ),\n text=str(rf),\n )\n for rf in reference_faces\n ]\n return mark_safe(\", \".join(links))\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .only(\"photo\") # Don't select the 128 encoding fields.\n .prefetch_related(\n \"photo__photo__album\",\n Prefetch(\n \"matches\",\n queryset=ReferenceFaceEncoding.objects.select_related(\n \"reference\", \"reference__user\"\n ).only(\"reference\"),\n ),\n )\n )\n\n\[email protected](FaceDetectionPhoto)\nclass FaceDetectionPhotoAdmin(admin.ModelAdmin):\n list_display = [\n \"__str__\",\n \"status\",\n \"submitted_at\",\n \"num_faces\",\n ]\n\n readonly_fields = [\n \"photo\",\n \"submitted_at\",\n \"status\",\n ]\n\n search_fields = [\n \"photo__album__title\",\n \"photo__album__date\",\n \"photo__file\",\n ]\n\n list_filter = [\"status\", \"submitted_at\"]\n inlines = [PhotoFaceEncodingInline]\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .select_related(\"photo\")\n .prefetch_related(\"photo__album\")\n .select_properties(\"num_faces\")\n )\n\n def has_add_permission(self, request):\n return False\n", "path": "website/facedetection/admin.py"}], "after_files": [{"content": "from django.contrib import admin, messages\nfrom django.db.models.query import Prefetch\nfrom django.urls import reverse\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom facedetection.services import trigger_facedetection_lambda\n\nfrom .models import (\n FaceDetectionPhoto,\n PhotoFaceEncoding,\n ReferenceFace,\n ReferenceFaceEncoding,\n)\n\n\nclass ReferenceFaceEncodingInline(admin.TabularInline):\n model = ReferenceFaceEncoding\n readonly_fields = [\"num_matches\"]\n fields = [\"num_matches\"]\n can_delete = False\n extra = 0\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return super().get_queryset(request).only(\"reference\")\n\n\[email protected](ReferenceFace)\nclass ReferenceFaceAdmin(admin.ModelAdmin):\n list_display = [\n \"user\",\n \"status\",\n \"created_at\",\n \"marked_for_deletion_at\",\n ]\n\n search_fields = [\n \"user__username\",\n \"user__first_name\",\n \"user__last_name\",\n ]\n\n list_filter = [\"status\", \"marked_for_deletion_at\"]\n inlines = [ReferenceFaceEncodingInline]\n\n actions = [\"resubmit_reference_faces\"]\n\n def get_readonly_fields(self, request, obj=None):\n if obj is None:\n return [\"created_at\", \"submitted_at\", \"status\"]\n return [\"file\", \"user\", \"created_at\", \"submitted_at\", \"status\"]\n\n @admin.action(description=\"Resubmit reference faces for analysis.\")\n def resubmit_reference_faces(self, request, queryset) -> list[ReferenceFace]:\n querylist = list(\n queryset.filter(\n status=FaceDetectionPhoto.Status.PROCESSING,\n )\n )\n if querylist:\n trigger_facedetection_lambda(querylist)\n messages.success(request, \"Resubmit successful.\")\n return querylist\n\n\nclass PhotoFaceEncodingInline(admin.TabularInline):\n model = PhotoFaceEncoding\n readonly_fields = [\"view_matches\"]\n fields = [\"view_matches\"]\n can_delete = False\n extra = 0\n\n @admin.display(description=\"Matches\")\n def view_matches(self, obj):\n reference_faces = [match.reference for match in obj.matches.all()]\n if not reference_faces:\n return \"-\"\n\n links = [\n format_html(\n '<a href=\"{url}\">{text}</a>',\n url=reverse(\n \"admin:facedetection_referenceface_change\",\n kwargs={\"object_id\": rf.pk},\n ),\n text=str(rf),\n )\n for rf in reference_faces\n ]\n return mark_safe(\", \".join(links))\n\n def has_add_permission(self, request, obj=None):\n return False # Encodings should not be created manually.\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .only(\"photo\") # Don't select the 128 encoding fields.\n .prefetch_related(\n \"photo__photo__album\",\n Prefetch(\n \"matches\",\n queryset=ReferenceFaceEncoding.objects.select_related(\n \"reference\", \"reference__user\"\n ).only(\"reference\"),\n ),\n )\n )\n\n\[email protected](FaceDetectionPhoto)\nclass FaceDetectionPhotoAdmin(admin.ModelAdmin):\n list_display = [\n \"__str__\",\n \"status\",\n \"submitted_at\",\n \"num_faces\",\n ]\n\n readonly_fields = [\n \"photo\",\n \"submitted_at\",\n \"status\",\n ]\n\n search_fields = [\n \"photo__album__title\",\n \"photo__album__date\",\n \"photo__file\",\n ]\n\n list_filter = [\"status\", \"submitted_at\"]\n inlines = [PhotoFaceEncodingInline]\n\n actions = [\"resubmit_face_detection_photos\"]\n\n def get_queryset(self, request):\n return (\n super()\n .get_queryset(request)\n .select_related(\"photo\")\n .prefetch_related(\"photo__album\")\n .select_properties(\"num_faces\")\n )\n\n def has_add_permission(self, request):\n return False\n\n @admin.action(description=\"Resubmits face detection photos for analysis.\")\n def resubmit_face_detection_photos(\n self, request, queryset\n ) -> list[FaceDetectionPhoto]:\n querylist = list(\n queryset.filter(\n status=FaceDetectionPhoto.Status.PROCESSING,\n )\n )\n if querylist:\n trigger_facedetection_lambda(querylist)\n messages.success(request, \"Resubmit successful.\")\n return querylist\n", "path": "website/facedetection/admin.py"}]}
1,371
549
gh_patches_debug_8484
rasdani/github-patches
git_diff
pre-commit__pre-commit-1789
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- recursive submodule support for `language: golang` I added this hook in `pre-commit-hooks.yaml` in `https://github.com/google/go-jsonnet`, and then when I try to use this hook as: ``` repos: - repo: https://github.com/google/go-jsonnet rev: 4a3144a417b7eb9b1f7e56741a9e72f3155de3fa hooks: - id: jsonnet-format ``` then I see following error. ``` Traceback (most recent call last): File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py", line 65, in error_handler yield File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/main.py", line 378, in main return run(args.config, store, args) File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py", line 403, in run install_hook_envs(hooks, store) File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 224, in install_hook_envs _hook_install(hook) File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py", line 82, in _hook_install lang.install_environment( File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/languages/golang.py", line 81, in install_environment cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env) File "/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/util.py", line 154, in cmd_output_b raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b) pre_commit.util.CalledProcessError: command: ('/usr/local/bin/go', 'get', './...') return code: 2 expected return code: 0 stdout: (none) stderr: go: downloading github.com/sergi/go-diff v1.1.0 go: downloading github.com/fatih/color v1.9.0 go: downloading github.com/mattn/go-colorable v0.1.4 go: downloading github.com/mattn/go-isatty v0.0.11 go: downloading golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 # github.com/google/go-jsonnet/c-bindings libjsonnet.cpp:5:14: fatal error: 'libjsonnet.h' file not found ``` Any idea? Thanks. _Originally posted by @gaurav517 in https://github.com/pre-commit/pre-commit/issues/1785#issuecomment-774486062_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/languages/golang.py` Content: ``` 1 import contextlib 2 import os.path 3 import sys 4 from typing import Generator 5 from typing import Sequence 6 from typing import Tuple 7 8 import pre_commit.constants as C 9 from pre_commit import git 10 from pre_commit.envcontext import envcontext 11 from pre_commit.envcontext import PatchesT 12 from pre_commit.envcontext import Var 13 from pre_commit.hook import Hook 14 from pre_commit.languages import helpers 15 from pre_commit.prefix import Prefix 16 from pre_commit.util import clean_path_on_failure 17 from pre_commit.util import cmd_output 18 from pre_commit.util import cmd_output_b 19 from pre_commit.util import rmtree 20 21 ENVIRONMENT_DIR = 'golangenv' 22 get_default_version = helpers.basic_get_default_version 23 healthy = helpers.basic_healthy 24 25 26 def get_env_patch(venv: str) -> PatchesT: 27 return ( 28 ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))), 29 ) 30 31 32 @contextlib.contextmanager 33 def in_env(prefix: Prefix) -> Generator[None, None, None]: 34 envdir = prefix.path( 35 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT), 36 ) 37 with envcontext(get_env_patch(envdir)): 38 yield 39 40 41 def guess_go_dir(remote_url: str) -> str: 42 if remote_url.endswith('.git'): 43 remote_url = remote_url[:-1 * len('.git')] 44 looks_like_url = ( 45 not remote_url.startswith('file://') and 46 ('//' in remote_url or '@' in remote_url) 47 ) 48 remote_url = remote_url.replace(':', '/') 49 if looks_like_url: 50 _, _, remote_url = remote_url.rpartition('//') 51 _, _, remote_url = remote_url.rpartition('@') 52 return remote_url 53 else: 54 return 'unknown_src_dir' 55 56 57 def install_environment( 58 prefix: Prefix, 59 version: str, 60 additional_dependencies: Sequence[str], 61 ) -> None: 62 helpers.assert_version_default('golang', version) 63 directory = prefix.path( 64 helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT), 65 ) 66 67 with clean_path_on_failure(directory): 68 remote = git.get_remote_url(prefix.prefix_dir) 69 repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote)) 70 71 # Clone into the goenv we'll create 72 helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir)) 73 74 if sys.platform == 'cygwin': # pragma: no cover 75 _, gopath, _ = cmd_output('cygpath', '-w', directory) 76 gopath = gopath.strip() 77 else: 78 gopath = directory 79 env = dict(os.environ, GOPATH=gopath) 80 env.pop('GOBIN', None) 81 cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env) 82 for dependency in additional_dependencies: 83 cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env) 84 # Same some disk space, we don't need these after installation 85 rmtree(prefix.path(directory, 'src')) 86 pkgdir = prefix.path(directory, 'pkg') 87 if os.path.exists(pkgdir): # pragma: no cover (go<1.10) 88 rmtree(pkgdir) 89 90 91 def run_hook( 92 hook: Hook, 93 file_args: Sequence[str], 94 color: bool, 95 ) -> Tuple[int, bytes]: 96 with in_env(hook.prefix): 97 return helpers.run_xargs(hook, hook.cmd, file_args, color=color) 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/languages/golang.py b/pre_commit/languages/golang.py --- a/pre_commit/languages/golang.py +++ b/pre_commit/languages/golang.py @@ -69,7 +69,8 @@ repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote)) # Clone into the goenv we'll create - helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir)) + cmd = ('git', 'clone', '--recursive', '.', repo_src_dir) + helpers.run_setup_cmd(prefix, cmd) if sys.platform == 'cygwin': # pragma: no cover _, gopath, _ = cmd_output('cygpath', '-w', directory)
{"golden_diff": "diff --git a/pre_commit/languages/golang.py b/pre_commit/languages/golang.py\n--- a/pre_commit/languages/golang.py\n+++ b/pre_commit/languages/golang.py\n@@ -69,7 +69,8 @@\n repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))\n \n # Clone into the goenv we'll create\n- helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))\n+ cmd = ('git', 'clone', '--recursive', '.', repo_src_dir)\n+ helpers.run_setup_cmd(prefix, cmd)\n \n if sys.platform == 'cygwin': # pragma: no cover\n _, gopath, _ = cmd_output('cygpath', '-w', directory)\n", "issue": "recursive submodule support for `language: golang`\nI added this hook in `pre-commit-hooks.yaml` in `https://github.com/google/go-jsonnet`, and then when I try to use this hook as:\r\n```\r\nrepos:\r\n - repo: https://github.com/google/go-jsonnet\r\n rev: 4a3144a417b7eb9b1f7e56741a9e72f3155de3fa\r\n hooks:\r\n - id: jsonnet-format\r\n```\r\nthen I see following error.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/error_handler.py\", line 65, in error_handler\r\n yield\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/main.py\", line 378, in main\r\n return run(args.config, store, args)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/commands/run.py\", line 403, in run\r\n install_hook_envs(hooks, store)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py\", line 224, in install_hook_envs\r\n _hook_install(hook)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/repository.py\", line 82, in _hook_install\r\n lang.install_environment(\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/languages/golang.py\", line 81, in install_environment\r\n cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)\r\n File \"/usr/local/Cellar/pre-commit/2.10.0/libexec/lib/python3.9/site-packages/pre_commit/util.py\", line 154, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: command: ('/usr/local/bin/go', 'get', './...')\r\nreturn code: 2\r\nexpected return code: 0\r\nstdout: (none)\r\nstderr:\r\n go: downloading github.com/sergi/go-diff v1.1.0\r\n go: downloading github.com/fatih/color v1.9.0\r\n go: downloading github.com/mattn/go-colorable v0.1.4\r\n go: downloading github.com/mattn/go-isatty v0.0.11\r\n go: downloading golang.org/x/sys v0.0.0-20191026070338-33540a1f6037\r\n # github.com/google/go-jsonnet/c-bindings\r\n libjsonnet.cpp:5:14: fatal error: 'libjsonnet.h' file not found\r\n\r\n```\r\nAny idea? Thanks.\r\n\r\n_Originally posted by @gaurav517 in https://github.com/pre-commit/pre-commit/issues/1785#issuecomment-774486062_\n", "before_files": [{"content": "import contextlib\nimport os.path\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\n\nENVIRONMENT_DIR = 'golangenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef guess_go_dir(remote_url: str) -> str:\n if remote_url.endswith('.git'):\n remote_url = remote_url[:-1 * len('.git')]\n looks_like_url = (\n not remote_url.startswith('file://') and\n ('//' in remote_url or '@' in remote_url)\n )\n remote_url = remote_url.replace(':', '/')\n if looks_like_url:\n _, _, remote_url = remote_url.rpartition('//')\n _, _, remote_url = remote_url.rpartition('@')\n return remote_url\n else:\n return 'unknown_src_dir'\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n helpers.assert_version_default('golang', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n with clean_path_on_failure(directory):\n remote = git.get_remote_url(prefix.prefix_dir)\n repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))\n\n # Clone into the goenv we'll create\n helpers.run_setup_cmd(prefix, ('git', 'clone', '.', repo_src_dir))\n\n if sys.platform == 'cygwin': # pragma: no cover\n _, gopath, _ = cmd_output('cygpath', '-w', directory)\n gopath = gopath.strip()\n else:\n gopath = directory\n env = dict(os.environ, GOPATH=gopath)\n env.pop('GOBIN', None)\n cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)\n for dependency in additional_dependencies:\n cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env)\n # Same some disk space, we don't need these after installation\n rmtree(prefix.path(directory, 'src'))\n pkgdir = prefix.path(directory, 'pkg')\n if os.path.exists(pkgdir): # pragma: no cover (go<1.10)\n rmtree(pkgdir)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/golang.py"}], "after_files": [{"content": "import contextlib\nimport os.path\nimport sys\nfrom typing import Generator\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import rmtree\n\nENVIRONMENT_DIR = 'golangenv'\nget_default_version = helpers.basic_get_default_version\nhealthy = helpers.basic_healthy\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PATH', (os.path.join(venv, 'bin'), os.pathsep, Var('PATH'))),\n )\n\n\[email protected]\ndef in_env(prefix: Prefix) -> Generator[None, None, None]:\n envdir = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n with envcontext(get_env_patch(envdir)):\n yield\n\n\ndef guess_go_dir(remote_url: str) -> str:\n if remote_url.endswith('.git'):\n remote_url = remote_url[:-1 * len('.git')]\n looks_like_url = (\n not remote_url.startswith('file://') and\n ('//' in remote_url or '@' in remote_url)\n )\n remote_url = remote_url.replace(':', '/')\n if looks_like_url:\n _, _, remote_url = remote_url.rpartition('//')\n _, _, remote_url = remote_url.rpartition('@')\n return remote_url\n else:\n return 'unknown_src_dir'\n\n\ndef install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n) -> None:\n helpers.assert_version_default('golang', version)\n directory = prefix.path(\n helpers.environment_dir(ENVIRONMENT_DIR, C.DEFAULT),\n )\n\n with clean_path_on_failure(directory):\n remote = git.get_remote_url(prefix.prefix_dir)\n repo_src_dir = os.path.join(directory, 'src', guess_go_dir(remote))\n\n # Clone into the goenv we'll create\n cmd = ('git', 'clone', '--recursive', '.', repo_src_dir)\n helpers.run_setup_cmd(prefix, cmd)\n\n if sys.platform == 'cygwin': # pragma: no cover\n _, gopath, _ = cmd_output('cygpath', '-w', directory)\n gopath = gopath.strip()\n else:\n gopath = directory\n env = dict(os.environ, GOPATH=gopath)\n env.pop('GOBIN', None)\n cmd_output_b('go', 'get', './...', cwd=repo_src_dir, env=env)\n for dependency in additional_dependencies:\n cmd_output_b('go', 'get', dependency, cwd=repo_src_dir, env=env)\n # Same some disk space, we don't need these after installation\n rmtree(prefix.path(directory, 'src'))\n pkgdir = prefix.path(directory, 'pkg')\n if os.path.exists(pkgdir): # pragma: no cover (go<1.10)\n rmtree(pkgdir)\n\n\ndef run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n) -> Tuple[int, bytes]:\n with in_env(hook.prefix):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n", "path": "pre_commit/languages/golang.py"}]}
1,938
166
gh_patches_debug_23810
rasdani/github-patches
git_diff
translate__pootle-5915
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dates not getting localised in browse tables seems like the dates are not getting localised as they should --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pootle/apps/pootle_app/panels.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 import re 10 11 from django.utils.safestring import mark_safe 12 13 from pootle.core.browser import get_table_headings 14 from pootle.core.decorators import persistent_property 15 from pootle.core.views.panels import TablePanel 16 17 from pootle.i18n.dates import timesince 18 19 20 class ChildrenPanel(TablePanel): 21 panel_name = "children" 22 _table_fields = ( 23 'name', 'progress', 'activity', 24 'total', 'need-translation', 25 'suggestions', 'critical') 26 27 @property 28 def table_fields(self): 29 fields = ( 30 ("name", "total") 31 if self.view.is_templates_context 32 else self._table_fields) 33 if self.view.has_admin_access: 34 fields += ('last-updated', ) 35 return fields 36 37 @property 38 def children(self): 39 return self.view.object_children 40 41 @property 42 def table(self): 43 if self.view.object_children: 44 return { 45 'id': self.view.view_name, 46 'fields': self.table_fields, 47 'headings': get_table_headings(self.table_fields), 48 'rows': self.view.object_children} 49 50 @persistent_property 51 def _content(self): 52 return self.render() 53 54 @property 55 def child_update_times(self): 56 _times = {} 57 for child in self.children: 58 if not child.get("stats"): 59 continue 60 last_created_unit = ( 61 timesince(child["stats"]["last_created_unit"]["creation_time"]) 62 if child["stats"].get("last_created_unit") 63 else None) 64 last_submission = ( 65 timesince(child["stats"]["last_submission"]["mtime"]) 66 if child["stats"].get("last_submission") 67 else None) 68 _times[child["code"]] = (last_submission, last_created_unit) 69 return _times 70 71 @property 72 def content(self): 73 return self.update_times(self._content) 74 75 def get_context_data(self): 76 return dict( 77 table=self.table, 78 can_translate=self.view.can_translate) 79 80 def update_times(self, content): 81 times = {} 82 update_times = self.child_update_times.items() 83 for name, (last_submission, last_created_unit) in update_times: 84 if last_submission: 85 times[ 86 "_XXX_LAST_SUBMISSION_%s_LAST_SUBMISSION_XXX_" 87 % name] = last_submission 88 if last_created_unit: 89 times[ 90 "_XXX_LAST_CREATED_%s_LAST_CREATED_XXX_" 91 % name] = last_created_unit 92 if times: 93 regex = re.compile("(%s)" % "|".join(map(re.escape, times.keys()))) 94 return mark_safe( 95 regex.sub( 96 lambda match: times[match.string[match.start():match.end()]], 97 content)) 98 return content 99 ``` Path: `pootle/i18n/dates.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # Copyright (C) Pootle contributors. 4 # 5 # This file is a part of the Pootle project. It is distributed under the GPL3 6 # or later license. See the LICENSE file for a copy of the license and the 7 # AUTHORS file for copyright and authorship information. 8 9 import locale as system_locale 10 import os 11 from datetime import datetime 12 13 from babel.dates import format_timedelta 14 15 from django.utils import translation 16 17 18 class LocalDate(object): 19 20 def __init__(self): 21 if not self.locale_code and not os.name == "nt": 22 self.set_locale() 23 24 @property 25 def default_locale(self): 26 return translation.to_locale(translation.get_language()) 27 28 def set_locale(self): 29 system_locale.setlocale( 30 system_locale.LC_ALL, 31 (self.default_locale, 'UTF-8')) 32 33 @property 34 def locale_code(self): 35 return system_locale.getlocale()[0] 36 37 def format_timesince(self, timestamp, locale=None): 38 return format_timedelta( 39 datetime.now() 40 - datetime.fromtimestamp( 41 timestamp), 42 locale=( 43 locale 44 or self.locale_code 45 or self.default_locale)) 46 47 48 localdate = LocalDate() 49 50 51 def timesince(timestamp, locale=None): 52 return localdate.format_timesince(timestamp, locale=locale) 53 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pootle/apps/pootle_app/panels.py b/pootle/apps/pootle_app/panels.py --- a/pootle/apps/pootle_app/panels.py +++ b/pootle/apps/pootle_app/panels.py @@ -58,11 +58,15 @@ if not child.get("stats"): continue last_created_unit = ( - timesince(child["stats"]["last_created_unit"]["creation_time"]) + timesince( + child["stats"]["last_created_unit"]["creation_time"], + locale=self.view.request_lang) if child["stats"].get("last_created_unit") else None) last_submission = ( - timesince(child["stats"]["last_submission"]["mtime"]) + timesince( + child["stats"]["last_submission"]["mtime"], + locale=self.view.request_lang) if child["stats"].get("last_submission") else None) _times[child["code"]] = (last_submission, last_created_unit) diff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py --- a/pootle/i18n/dates.py +++ b/pootle/i18n/dates.py @@ -49,4 +49,6 @@ def timesince(timestamp, locale=None): + if locale: + locale = translation.to_locale(locale) return localdate.format_timesince(timestamp, locale=locale)
{"golden_diff": "diff --git a/pootle/apps/pootle_app/panels.py b/pootle/apps/pootle_app/panels.py\n--- a/pootle/apps/pootle_app/panels.py\n+++ b/pootle/apps/pootle_app/panels.py\n@@ -58,11 +58,15 @@\n if not child.get(\"stats\"):\n continue\n last_created_unit = (\n- timesince(child[\"stats\"][\"last_created_unit\"][\"creation_time\"])\n+ timesince(\n+ child[\"stats\"][\"last_created_unit\"][\"creation_time\"],\n+ locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_created_unit\")\n else None)\n last_submission = (\n- timesince(child[\"stats\"][\"last_submission\"][\"mtime\"])\n+ timesince(\n+ child[\"stats\"][\"last_submission\"][\"mtime\"],\n+ locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_submission\")\n else None)\n _times[child[\"code\"]] = (last_submission, last_created_unit)\ndiff --git a/pootle/i18n/dates.py b/pootle/i18n/dates.py\n--- a/pootle/i18n/dates.py\n+++ b/pootle/i18n/dates.py\n@@ -49,4 +49,6 @@\n \n \n def timesince(timestamp, locale=None):\n+ if locale:\n+ locale = translation.to_locale(locale)\n return localdate.format_timesince(timestamp, locale=locale)\n", "issue": "Dates not getting localised in browse tables\nseems like the dates are not getting localised as they should\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\n\nfrom django.utils.safestring import mark_safe\n\nfrom pootle.core.browser import get_table_headings\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.views.panels import TablePanel\n\nfrom pootle.i18n.dates import timesince\n\n\nclass ChildrenPanel(TablePanel):\n panel_name = \"children\"\n _table_fields = (\n 'name', 'progress', 'activity',\n 'total', 'need-translation',\n 'suggestions', 'critical')\n\n @property\n def table_fields(self):\n fields = (\n (\"name\", \"total\")\n if self.view.is_templates_context\n else self._table_fields)\n if self.view.has_admin_access:\n fields += ('last-updated', )\n return fields\n\n @property\n def children(self):\n return self.view.object_children\n\n @property\n def table(self):\n if self.view.object_children:\n return {\n 'id': self.view.view_name,\n 'fields': self.table_fields,\n 'headings': get_table_headings(self.table_fields),\n 'rows': self.view.object_children}\n\n @persistent_property\n def _content(self):\n return self.render()\n\n @property\n def child_update_times(self):\n _times = {}\n for child in self.children:\n if not child.get(\"stats\"):\n continue\n last_created_unit = (\n timesince(child[\"stats\"][\"last_created_unit\"][\"creation_time\"])\n if child[\"stats\"].get(\"last_created_unit\")\n else None)\n last_submission = (\n timesince(child[\"stats\"][\"last_submission\"][\"mtime\"])\n if child[\"stats\"].get(\"last_submission\")\n else None)\n _times[child[\"code\"]] = (last_submission, last_created_unit)\n return _times\n\n @property\n def content(self):\n return self.update_times(self._content)\n\n def get_context_data(self):\n return dict(\n table=self.table,\n can_translate=self.view.can_translate)\n\n def update_times(self, content):\n times = {}\n update_times = self.child_update_times.items()\n for name, (last_submission, last_created_unit) in update_times:\n if last_submission:\n times[\n \"_XXX_LAST_SUBMISSION_%s_LAST_SUBMISSION_XXX_\"\n % name] = last_submission\n if last_created_unit:\n times[\n \"_XXX_LAST_CREATED_%s_LAST_CREATED_XXX_\"\n % name] = last_created_unit\n if times:\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, times.keys())))\n return mark_safe(\n regex.sub(\n lambda match: times[match.string[match.start():match.end()]],\n content))\n return content\n", "path": "pootle/apps/pootle_app/panels.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale as system_locale\nimport os\nfrom datetime import datetime\n\nfrom babel.dates import format_timedelta\n\nfrom django.utils import translation\n\n\nclass LocalDate(object):\n\n def __init__(self):\n if not self.locale_code and not os.name == \"nt\":\n self.set_locale()\n\n @property\n def default_locale(self):\n return translation.to_locale(translation.get_language())\n\n def set_locale(self):\n system_locale.setlocale(\n system_locale.LC_ALL,\n (self.default_locale, 'UTF-8'))\n\n @property\n def locale_code(self):\n return system_locale.getlocale()[0]\n\n def format_timesince(self, timestamp, locale=None):\n return format_timedelta(\n datetime.now()\n - datetime.fromtimestamp(\n timestamp),\n locale=(\n locale\n or self.locale_code\n or self.default_locale))\n\n\nlocaldate = LocalDate()\n\n\ndef timesince(timestamp, locale=None):\n return localdate.format_timesince(timestamp, locale=locale)\n", "path": "pootle/i18n/dates.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport re\n\nfrom django.utils.safestring import mark_safe\n\nfrom pootle.core.browser import get_table_headings\nfrom pootle.core.decorators import persistent_property\nfrom pootle.core.views.panels import TablePanel\n\nfrom pootle.i18n.dates import timesince\n\n\nclass ChildrenPanel(TablePanel):\n panel_name = \"children\"\n _table_fields = (\n 'name', 'progress', 'activity',\n 'total', 'need-translation',\n 'suggestions', 'critical')\n\n @property\n def table_fields(self):\n fields = (\n (\"name\", \"total\")\n if self.view.is_templates_context\n else self._table_fields)\n if self.view.has_admin_access:\n fields += ('last-updated', )\n return fields\n\n @property\n def children(self):\n return self.view.object_children\n\n @property\n def table(self):\n if self.view.object_children:\n return {\n 'id': self.view.view_name,\n 'fields': self.table_fields,\n 'headings': get_table_headings(self.table_fields),\n 'rows': self.view.object_children}\n\n @persistent_property\n def _content(self):\n return self.render()\n\n @property\n def child_update_times(self):\n _times = {}\n for child in self.children:\n if not child.get(\"stats\"):\n continue\n last_created_unit = (\n timesince(\n child[\"stats\"][\"last_created_unit\"][\"creation_time\"],\n locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_created_unit\")\n else None)\n last_submission = (\n timesince(\n child[\"stats\"][\"last_submission\"][\"mtime\"],\n locale=self.view.request_lang)\n if child[\"stats\"].get(\"last_submission\")\n else None)\n _times[child[\"code\"]] = (last_submission, last_created_unit)\n return _times\n\n @property\n def content(self):\n return self.update_times(self._content)\n\n def get_context_data(self):\n return dict(\n table=self.table,\n can_translate=self.view.can_translate)\n\n def update_times(self, content):\n times = {}\n update_times = self.child_update_times.items()\n for name, (last_submission, last_created_unit) in update_times:\n if last_submission:\n times[\n \"_XXX_LAST_SUBMISSION_%s_LAST_SUBMISSION_XXX_\"\n % name] = last_submission\n if last_created_unit:\n times[\n \"_XXX_LAST_CREATED_%s_LAST_CREATED_XXX_\"\n % name] = last_created_unit\n if times:\n regex = re.compile(\"(%s)\" % \"|\".join(map(re.escape, times.keys())))\n return mark_safe(\n regex.sub(\n lambda match: times[match.string[match.start():match.end()]],\n content))\n return content\n", "path": "pootle/apps/pootle_app/panels.py"}, {"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport locale as system_locale\nimport os\nfrom datetime import datetime\n\nfrom babel.dates import format_timedelta\n\nfrom django.utils import translation\n\n\nclass LocalDate(object):\n\n def __init__(self):\n if not self.locale_code and not os.name == \"nt\":\n self.set_locale()\n\n @property\n def default_locale(self):\n return translation.to_locale(translation.get_language())\n\n def set_locale(self):\n system_locale.setlocale(\n system_locale.LC_ALL,\n (self.default_locale, 'UTF-8'))\n\n @property\n def locale_code(self):\n return system_locale.getlocale()[0]\n\n def format_timesince(self, timestamp, locale=None):\n return format_timedelta(\n datetime.now()\n - datetime.fromtimestamp(\n timestamp),\n locale=(\n locale\n or self.locale_code\n or self.default_locale))\n\n\nlocaldate = LocalDate()\n\n\ndef timesince(timestamp, locale=None):\n if locale:\n locale = translation.to_locale(locale)\n return localdate.format_timesince(timestamp, locale=locale)\n", "path": "pootle/i18n/dates.py"}]}
1,538
317
gh_patches_debug_3513
rasdani/github-patches
git_diff
cookiecutter__cookiecutter-578
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cookiecutter needs to always use utf-8 for writing files I get this on Windows (pitty me): ``` File "c:\program files\python 3.5\lib\site-packages\cookiecutter\generate.py", line 318, in generate_files run_hook('post_gen_project', project_dir, context) File "c:\program files\python 3.5\lib\site-packages\cookiecutter\hooks.py", line 107, in run_hook return run_script_with_context(script, project_dir, context) File "c:\program files\python 3.5\lib\site-packages\cookiecutter\hooks.py", line 90, in run_script_with_context temp.write(Template(contents).render(**context)) File "c:\program files\python 3.5\lib\tempfile.py", line 482, in func_wrapper return func(*args, **kwargs) File "c:\program files\python 3.5\lib\encodings\cp1252.py", line 19, in encode return codecs.charmap_encode(input,self.errors,encoding_table)[0] UnicodeEncodeError: 'charmap' codec can't encode character '\u0103' in position 1626: character maps to <undefined> ``` cookiecutter should pass `encoding='utf-8'` to `NamedTemporaryFile` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cookiecutter/hooks.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 4 """ 5 cookiecutter.hooks 6 ------------------ 7 8 Functions for discovering and executing various cookiecutter hooks. 9 """ 10 11 import io 12 import logging 13 import os 14 import subprocess 15 import sys 16 import tempfile 17 18 from jinja2 import Template 19 20 from cookiecutter import utils 21 from .exceptions import FailedHookException 22 23 24 _HOOKS = [ 25 'pre_gen_project', 26 'post_gen_project', 27 # TODO: other hooks should be listed here 28 ] 29 EXIT_SUCCESS = 0 30 31 32 def find_hooks(): 33 """ 34 Must be called with the project template as the current working directory. 35 Returns a dict of all hook scripts provided. 36 Dict's key will be the hook/script's name, without extension, while 37 values will be the absolute path to the script. 38 Missing scripts will not be included in the returned dict. 39 """ 40 hooks_dir = 'hooks' 41 r = {} 42 logging.debug('hooks_dir is {0}'.format(hooks_dir)) 43 if not os.path.isdir(hooks_dir): 44 logging.debug('No hooks/ dir in template_dir') 45 return r 46 for f in os.listdir(hooks_dir): 47 basename = os.path.splitext(os.path.basename(f))[0] 48 if basename in _HOOKS: 49 r[basename] = os.path.abspath(os.path.join(hooks_dir, f)) 50 return r 51 52 53 def run_script(script_path, cwd='.'): 54 """ 55 Executes a script from a working directory. 56 57 :param script_path: Absolute path to the script to run. 58 :param cwd: The directory to run the script from. 59 """ 60 run_thru_shell = sys.platform.startswith('win') 61 if script_path.endswith('.py'): 62 script_command = [sys.executable, script_path] 63 else: 64 script_command = [script_path] 65 66 utils.make_executable(script_path) 67 68 proc = subprocess.Popen( 69 script_command, 70 shell=run_thru_shell, 71 cwd=cwd 72 ) 73 exit_status = proc.wait() 74 if exit_status != EXIT_SUCCESS: 75 raise FailedHookException( 76 "Hook script failed (exit status: %d)" % exit_status) 77 78 79 def run_script_with_context(script_path, cwd, context): 80 """ 81 Executes a script after rendering with it Jinja. 82 83 :param script_path: Absolute path to the script to run. 84 :param cwd: The directory to run the script from. 85 :param context: Cookiecutter project template context. 86 """ 87 _, extension = os.path.splitext(script_path) 88 89 contents = io.open(script_path, 'r', encoding='utf-8').read() 90 91 with tempfile.NamedTemporaryFile( 92 delete=False, 93 mode='w', 94 suffix=extension 95 ) as temp: 96 temp.write(Template(contents).render(**context)) 97 98 run_script(temp.name, cwd) 99 100 101 def run_hook(hook_name, project_dir, context): 102 """ 103 Try to find and execute a hook from the specified project directory. 104 105 :param hook_name: The hook to execute. 106 :param project_dir: The directory to execute the script from. 107 :param context: Cookiecutter project context. 108 """ 109 script = find_hooks().get(hook_name) 110 if script is None: 111 logging.debug('No hooks found') 112 return 113 run_script_with_context(script, project_dir, context) 114 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py --- a/cookiecutter/hooks.py +++ b/cookiecutter/hooks.py @@ -90,10 +90,11 @@ with tempfile.NamedTemporaryFile( delete=False, - mode='w', + mode='wb', suffix=extension ) as temp: - temp.write(Template(contents).render(**context)) + output = Template(contents).render(**context) + temp.write(output.encode('utf-8')) run_script(temp.name, cwd)
{"golden_diff": "diff --git a/cookiecutter/hooks.py b/cookiecutter/hooks.py\n--- a/cookiecutter/hooks.py\n+++ b/cookiecutter/hooks.py\n@@ -90,10 +90,11 @@\n \n with tempfile.NamedTemporaryFile(\n delete=False,\n- mode='w',\n+ mode='wb',\n suffix=extension\n ) as temp:\n- temp.write(Template(contents).render(**context))\n+ output = Template(contents).render(**context)\n+ temp.write(output.encode('utf-8'))\n \n run_script(temp.name, cwd)\n", "issue": "Cookiecutter needs to always use utf-8 for writing files\nI get this on Windows (pitty me):\n\n```\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\generate.py\", line 318, in generate_files\n run_hook('post_gen_project', project_dir, context)\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\hooks.py\", line 107, in run_hook\n return run_script_with_context(script, project_dir, context)\n File \"c:\\program files\\python 3.5\\lib\\site-packages\\cookiecutter\\hooks.py\", line 90, in run_script_with_context\n temp.write(Template(contents).render(**context))\n File \"c:\\program files\\python 3.5\\lib\\tempfile.py\", line 482, in func_wrapper\n return func(*args, **kwargs)\n File \"c:\\program files\\python 3.5\\lib\\encodings\\cp1252.py\", line 19, in encode\n return codecs.charmap_encode(input,self.errors,encoding_table)[0]\nUnicodeEncodeError: 'charmap' codec can't encode character '\\u0103' in position 1626: character maps to <undefined>\n```\n\ncookiecutter should pass `encoding='utf-8'` to `NamedTemporaryFile`\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='w',\n suffix=extension\n ) as temp:\n temp.write(Template(contents).render(**context))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\ncookiecutter.hooks\n------------------\n\nFunctions for discovering and executing various cookiecutter hooks.\n\"\"\"\n\nimport io\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tempfile\n\nfrom jinja2 import Template\n\nfrom cookiecutter import utils\nfrom .exceptions import FailedHookException\n\n\n_HOOKS = [\n 'pre_gen_project',\n 'post_gen_project',\n # TODO: other hooks should be listed here\n]\nEXIT_SUCCESS = 0\n\n\ndef find_hooks():\n \"\"\"\n Must be called with the project template as the current working directory.\n Returns a dict of all hook scripts provided.\n Dict's key will be the hook/script's name, without extension, while\n values will be the absolute path to the script.\n Missing scripts will not be included in the returned dict.\n \"\"\"\n hooks_dir = 'hooks'\n r = {}\n logging.debug('hooks_dir is {0}'.format(hooks_dir))\n if not os.path.isdir(hooks_dir):\n logging.debug('No hooks/ dir in template_dir')\n return r\n for f in os.listdir(hooks_dir):\n basename = os.path.splitext(os.path.basename(f))[0]\n if basename in _HOOKS:\n r[basename] = os.path.abspath(os.path.join(hooks_dir, f))\n return r\n\n\ndef run_script(script_path, cwd='.'):\n \"\"\"\n Executes a script from a working directory.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n \"\"\"\n run_thru_shell = sys.platform.startswith('win')\n if script_path.endswith('.py'):\n script_command = [sys.executable, script_path]\n else:\n script_command = [script_path]\n\n utils.make_executable(script_path)\n\n proc = subprocess.Popen(\n script_command,\n shell=run_thru_shell,\n cwd=cwd\n )\n exit_status = proc.wait()\n if exit_status != EXIT_SUCCESS:\n raise FailedHookException(\n \"Hook script failed (exit status: %d)\" % exit_status)\n\n\ndef run_script_with_context(script_path, cwd, context):\n \"\"\"\n Executes a script after rendering with it Jinja.\n\n :param script_path: Absolute path to the script to run.\n :param cwd: The directory to run the script from.\n :param context: Cookiecutter project template context.\n \"\"\"\n _, extension = os.path.splitext(script_path)\n\n contents = io.open(script_path, 'r', encoding='utf-8').read()\n\n with tempfile.NamedTemporaryFile(\n delete=False,\n mode='wb',\n suffix=extension\n ) as temp:\n output = Template(contents).render(**context)\n temp.write(output.encode('utf-8'))\n\n run_script(temp.name, cwd)\n\n\ndef run_hook(hook_name, project_dir, context):\n \"\"\"\n Try to find and execute a hook from the specified project directory.\n\n :param hook_name: The hook to execute.\n :param project_dir: The directory to execute the script from.\n :param context: Cookiecutter project context.\n \"\"\"\n script = find_hooks().get(hook_name)\n if script is None:\n logging.debug('No hooks found')\n return\n run_script_with_context(script, project_dir, context)\n", "path": "cookiecutter/hooks.py"}]}
1,528
127
gh_patches_debug_2185
rasdani/github-patches
git_diff
googleapis__google-auth-library-python-913
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Setuptools as dependency is problematic w/ pip-tools https://github.com/googleapis/google-auth-library-python/commit/908da752d01fef728bd5cb3eb5b13f2b5c335e51 (#322) added `setuptools` as a dependency in this package. However, the [pip-tools](https://github.com/jazzband/pip-tools) package that's commonly used for pinning dependencies considers `setuptools` an unsafe dependency to have in a project at all (as discussed in #492), and as such doesn't save it in the pinned requirements file at all. Since `google-auth` depends on Setuptools but a version couldn't have been pinned in the requirements, we're seeing ``` Collecting setuptools>=40.3.0 (from google-auth==1.19.1->our-proprietary-package==0.31.1) Downloading https://files.pythonhosted.org/packages/b0/8b/379494d7dbd3854aa7b85b216cb0af54edcb7fce7d086ba3e35522a713cf/setuptools-50.0.0-py3-none-any.whl (783kB) ``` which wreaks havoc on Ubuntu 16.04 + Python 3.5 machines due to https://github.com/pypa/setuptools/issues/2352 / https://github.com/pypa/setuptools/issues/2350 / https://github.com/pypa/setuptools/issues/2356 ... The workaround is to add `--allow-unsafe` or manually pin `setuptools`, but is the requirement _actually_ necessary in this package? No other package in the 48-line `requirements.txt` for this particular project would have required a version of `setuptools`. #### Environment details - OS: Ubuntu 16.04 - Python version: 3.5 - pip version: irrelevant - `google-auth` version: 1.19.1 #### Steps to reproduce 1. Install `google-auth` on an Ubuntu 16.04 machine 2. It installs `setuptools==50.0.0` 3. https://github.com/pypa/setuptools/issues/2352 and friends --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright 2014 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 from setuptools import find_packages 19 from setuptools import setup 20 21 22 DEPENDENCIES = ( 23 "cachetools>=2.0.0,<5.0", 24 "pyasn1-modules>=0.2.1", 25 # rsa==4.5 is the last version to support 2.7 26 # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233 27 'rsa<4.6; python_version < "3.6"', 28 'rsa>=3.1.4,<5; python_version >= "3.6"', 29 # install enum34 to support 2.7. enum34 only works up to python version 3.3. 30 'enum34>=1.1.10; python_version < "3.4"', 31 "setuptools>=40.3.0", 32 "six>=1.9.0", 33 ) 34 35 extras = { 36 "aiohttp": [ 37 "aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'", 38 "requests >= 2.20.0, < 3.0.0dev", 39 ], 40 "pyopenssl": "pyopenssl>=20.0.0", 41 "reauth": "pyu2f>=0.1.5", 42 } 43 44 with io.open("README.rst", "r") as fh: 45 long_description = fh.read() 46 47 package_root = os.path.abspath(os.path.dirname(__file__)) 48 49 version = {} 50 with open(os.path.join(package_root, "google/auth/version.py")) as fp: 51 exec(fp.read(), version) 52 version = version["__version__"] 53 54 setup( 55 name="google-auth", 56 version=version, 57 author="Google Cloud Platform", 58 author_email="[email protected]", 59 description="Google Authentication Library", 60 long_description=long_description, 61 url="https://github.com/googleapis/google-auth-library-python", 62 packages=find_packages(exclude=("tests*", "system_tests*")), 63 namespace_packages=("google",), 64 install_requires=DEPENDENCIES, 65 extras_require=extras, 66 python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*", 67 license="Apache 2.0", 68 keywords="google auth oauth client", 69 classifiers=[ 70 "Programming Language :: Python :: 3", 71 "Programming Language :: Python :: 3.6", 72 "Programming Language :: Python :: 3.7", 73 "Programming Language :: Python :: 3.8", 74 "Programming Language :: Python :: 3.9", 75 "Programming Language :: Python :: 3.10", 76 "Development Status :: 5 - Production/Stable", 77 "Intended Audience :: Developers", 78 "License :: OSI Approved :: Apache Software License", 79 "Operating System :: POSIX", 80 "Operating System :: Microsoft :: Windows", 81 "Operating System :: MacOS :: MacOS X", 82 "Operating System :: OS Independent", 83 "Topic :: Internet :: WWW/HTTP", 84 ], 85 ) 86 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -28,7 +28,6 @@ 'rsa>=3.1.4,<5; python_version >= "3.6"', # install enum34 to support 2.7. enum34 only works up to python version 3.3. 'enum34>=1.1.10; python_version < "3.4"', - "setuptools>=40.3.0", "six>=1.9.0", )
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -28,7 +28,6 @@\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n 'enum34>=1.1.10; python_version < \"3.4\"',\n- \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n )\n", "issue": "Setuptools as dependency is problematic w/ pip-tools\nhttps://github.com/googleapis/google-auth-library-python/commit/908da752d01fef728bd5cb3eb5b13f2b5c335e51 (#322) added `setuptools` as a dependency in this package. However, the [pip-tools](https://github.com/jazzband/pip-tools) package that's commonly used for pinning dependencies considers `setuptools` an unsafe dependency to have in a project at all (as discussed in #492), and as such doesn't save it in the pinned requirements file at all.\r\n\r\nSince `google-auth` depends on Setuptools but a version couldn't have been pinned in the requirements, we're seeing\r\n\r\n```\r\nCollecting setuptools>=40.3.0 (from google-auth==1.19.1->our-proprietary-package==0.31.1)\r\n Downloading https://files.pythonhosted.org/packages/b0/8b/379494d7dbd3854aa7b85b216cb0af54edcb7fce7d086ba3e35522a713cf/setuptools-50.0.0-py3-none-any.whl (783kB)\r\n```\r\n\r\nwhich wreaks havoc on Ubuntu 16.04 + Python 3.5 machines due to https://github.com/pypa/setuptools/issues/2352 / https://github.com/pypa/setuptools/issues/2350 / https://github.com/pypa/setuptools/issues/2356 ...\r\n\r\nThe workaround is to add `--allow-unsafe` or manually pin `setuptools`, but is the requirement _actually_ necessary in this package? No other package in the 48-line `requirements.txt` for this particular project would have required a version of `setuptools`.\r\n\r\n#### Environment details\r\n\r\n - OS: Ubuntu 16.04\r\n - Python version: 3.5\r\n - pip version: irrelevant\r\n - `google-auth` version: 1.19.1\r\n\r\n#### Steps to reproduce\r\n\r\n 1. Install `google-auth` on an Ubuntu 16.04 machine\r\n 2. It installs `setuptools==50.0.0`\r\n 3. https://github.com/pypa/setuptools/issues/2352 and friends\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n 'enum34>=1.1.10; python_version < \"3.4\"',\n \"setuptools>=40.3.0\",\n \"six>=1.9.0\",\n)\n\nextras = {\n \"aiohttp\": [\n \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n \"requests >= 2.20.0, < 3.0.0dev\",\n ],\n \"pyopenssl\": \"pyopenssl>=20.0.0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\nDEPENDENCIES = (\n \"cachetools>=2.0.0,<5.0\",\n \"pyasn1-modules>=0.2.1\",\n # rsa==4.5 is the last version to support 2.7\n # https://github.com/sybrenstuvel/python-rsa/issues/152#issuecomment-643470233\n 'rsa<4.6; python_version < \"3.6\"',\n 'rsa>=3.1.4,<5; python_version >= \"3.6\"',\n # install enum34 to support 2.7. enum34 only works up to python version 3.3.\n 'enum34>=1.1.10; python_version < \"3.4\"',\n \"six>=1.9.0\",\n)\n\nextras = {\n \"aiohttp\": [\n \"aiohttp >= 3.6.2, < 4.0.0dev; python_version>='3.6'\",\n \"requests >= 2.20.0, < 3.0.0dev\",\n ],\n \"pyopenssl\": \"pyopenssl>=20.0.0\",\n \"reauth\": \"pyu2f>=0.1.5\",\n}\n\nwith io.open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nversion = {}\nwith open(os.path.join(package_root, \"google/auth/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\nsetup(\n name=\"google-auth\",\n version=version,\n author=\"Google Cloud Platform\",\n author_email=\"[email protected]\",\n description=\"Google Authentication Library\",\n long_description=long_description,\n url=\"https://github.com/googleapis/google-auth-library-python\",\n packages=find_packages(exclude=(\"tests*\", \"system_tests*\")),\n namespace_packages=(\"google\",),\n install_requires=DEPENDENCIES,\n extras_require=extras,\n python_requires=\">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*\",\n license=\"Apache 2.0\",\n keywords=\"google auth oauth client\",\n classifiers=[\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet :: WWW/HTTP\",\n ],\n)\n", "path": "setup.py"}]}
1,772
124
gh_patches_debug_15572
rasdani/github-patches
git_diff
scrapy__scrapy-4042
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error 302 redirection with headers location starts with 3 slash ### Description when the 302 response return a headers's location startswith 3 slash, the scrapy redirect to a url different from what the browser do. ### Steps to Reproduce 1. scrapy shell https://www.hjenglish.com/new/p1285798/ **Expected behavior:** redirect to `https://fr.hujiang.com/new/p1285798/` as browser `Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36` do. **Actual behavior:** redirct to `https://www.hjenglish.com/fr.hujiang.com/new/p1285798` **Reproduces how often:** everytime ### Versions Scrapy : 1.7.3 lxml : 4.3.2.0 libxml2 : 2.9.9 cssselect : 1.1.0 parsel : 1.5.2 w3lib : 1.20.0 Twisted : 19.7.0 Python : 3.7.3 (default, Mar 27 2019, 17:13:21) [MSC v.1915 64 bit (AMD64)] pyOpenSSL : 19.0.0 (OpenSSL 1.1.1c 28 May 2019) cryptography : 2.6.1 Platform : Windows-10-10.0.17134-SP0 ### Additional context I check the defination of [Location in rfc](https://tools.ietf.org/html/rfc7231#section-7.1.2) and end with [reference resolution](https://tools.ietf.org/html/rfc3986#section-5.3). But I fail to findout how to resolve the Location startswith `///`. So I don't know why Chrome did so. The behavior of scrapy is determined by [redirect.py#L73](https://github.com/scrapy/scrapy/blob/master/scrapy/downloadermiddlewares/redirect.py#L73), which will truncate `///` to `/`。 I'm wandering the differents betweent scarpy and browser... --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/downloadermiddlewares/redirect.py` Content: ``` 1 import logging 2 from six.moves.urllib.parse import urljoin 3 4 from w3lib.url import safe_url_string 5 6 from scrapy.http import HtmlResponse 7 from scrapy.utils.response import get_meta_refresh 8 from scrapy.exceptions import IgnoreRequest, NotConfigured 9 10 logger = logging.getLogger(__name__) 11 12 13 class BaseRedirectMiddleware(object): 14 15 enabled_setting = 'REDIRECT_ENABLED' 16 17 def __init__(self, settings): 18 if not settings.getbool(self.enabled_setting): 19 raise NotConfigured 20 21 self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES') 22 self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST') 23 24 @classmethod 25 def from_crawler(cls, crawler): 26 return cls(crawler.settings) 27 28 def _redirect(self, redirected, request, spider, reason): 29 ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times) 30 redirects = request.meta.get('redirect_times', 0) + 1 31 32 if ttl and redirects <= self.max_redirect_times: 33 redirected.meta['redirect_times'] = redirects 34 redirected.meta['redirect_ttl'] = ttl - 1 35 redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \ 36 [request.url] 37 redirected.meta['redirect_reasons'] = request.meta.get('redirect_reasons', []) + \ 38 [reason] 39 redirected.dont_filter = request.dont_filter 40 redirected.priority = request.priority + self.priority_adjust 41 logger.debug("Redirecting (%(reason)s) to %(redirected)s from %(request)s", 42 {'reason': reason, 'redirected': redirected, 'request': request}, 43 extra={'spider': spider}) 44 return redirected 45 else: 46 logger.debug("Discarding %(request)s: max redirections reached", 47 {'request': request}, extra={'spider': spider}) 48 raise IgnoreRequest("max redirections reached") 49 50 def _redirect_request_using_get(self, request, redirect_url): 51 redirected = request.replace(url=redirect_url, method='GET', body='') 52 redirected.headers.pop('Content-Type', None) 53 redirected.headers.pop('Content-Length', None) 54 return redirected 55 56 57 class RedirectMiddleware(BaseRedirectMiddleware): 58 """ 59 Handle redirection of requests based on response status 60 and meta-refresh html tag. 61 """ 62 def process_response(self, request, response, spider): 63 if (request.meta.get('dont_redirect', False) or 64 response.status in getattr(spider, 'handle_httpstatus_list', []) or 65 response.status in request.meta.get('handle_httpstatus_list', []) or 66 request.meta.get('handle_httpstatus_all', False)): 67 return response 68 69 allowed_status = (301, 302, 303, 307, 308) 70 if 'Location' not in response.headers or response.status not in allowed_status: 71 return response 72 73 location = safe_url_string(response.headers['location']) 74 75 redirected_url = urljoin(request.url, location) 76 77 if response.status in (301, 307, 308) or request.method == 'HEAD': 78 redirected = request.replace(url=redirected_url) 79 return self._redirect(redirected, request, spider, response.status) 80 81 redirected = self._redirect_request_using_get(request, redirected_url) 82 return self._redirect(redirected, request, spider, response.status) 83 84 85 class MetaRefreshMiddleware(BaseRedirectMiddleware): 86 87 enabled_setting = 'METAREFRESH_ENABLED' 88 89 def __init__(self, settings): 90 super(MetaRefreshMiddleware, self).__init__(settings) 91 self._ignore_tags = settings.getlist('METAREFRESH_IGNORE_TAGS') 92 self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY', 93 settings.getint('METAREFRESH_MAXDELAY')) 94 95 def process_response(self, request, response, spider): 96 if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \ 97 not isinstance(response, HtmlResponse): 98 return response 99 100 interval, url = get_meta_refresh(response, 101 ignore_tags=self._ignore_tags) 102 if url and interval < self._maxdelay: 103 redirected = self._redirect_request_using_get(request, url) 104 return self._redirect(redirected, request, spider, 'meta refresh') 105 106 return response 107 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py --- a/scrapy/downloadermiddlewares/redirect.py +++ b/scrapy/downloadermiddlewares/redirect.py @@ -1,5 +1,5 @@ import logging -from six.moves.urllib.parse import urljoin +from six.moves.urllib.parse import urljoin, urlparse from w3lib.url import safe_url_string @@ -70,7 +70,10 @@ if 'Location' not in response.headers or response.status not in allowed_status: return response - location = safe_url_string(response.headers['location']) + location = safe_url_string(response.headers['Location']) + if response.headers['Location'].startswith(b'//'): + request_scheme = urlparse(request.url).scheme + location = request_scheme + '://' + location.lstrip('/') redirected_url = urljoin(request.url, location)
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/redirect.py b/scrapy/downloadermiddlewares/redirect.py\n--- a/scrapy/downloadermiddlewares/redirect.py\n+++ b/scrapy/downloadermiddlewares/redirect.py\n@@ -1,5 +1,5 @@\n import logging\n-from six.moves.urllib.parse import urljoin\n+from six.moves.urllib.parse import urljoin, urlparse\n \n from w3lib.url import safe_url_string\n \n@@ -70,7 +70,10 @@\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n \n- location = safe_url_string(response.headers['location'])\n+ location = safe_url_string(response.headers['Location'])\n+ if response.headers['Location'].startswith(b'//'):\n+ request_scheme = urlparse(request.url).scheme\n+ location = request_scheme + '://' + location.lstrip('/')\n \n redirected_url = urljoin(request.url, location)\n", "issue": "Error 302 redirection with headers location starts with 3 slash\n\r\n### Description\r\n\r\nwhen the 302 response return a headers's location startswith 3 slash, the scrapy redirect to a url different from what the browser do.\r\n\r\n### Steps to Reproduce\r\n\r\n1. scrapy shell https://www.hjenglish.com/new/p1285798/\r\n\r\n**Expected behavior:** \r\nredirect to `https://fr.hujiang.com/new/p1285798/` as browser `Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36` do.\r\n\r\n\r\n**Actual behavior:** \r\nredirct to `https://www.hjenglish.com/fr.hujiang.com/new/p1285798`\r\n\r\n**Reproduces how often:** \r\n\r\neverytime\r\n\r\n### Versions\r\nScrapy : 1.7.3\r\nlxml : 4.3.2.0\r\nlibxml2 : 2.9.9\r\ncssselect : 1.1.0\r\nparsel : 1.5.2\r\nw3lib : 1.20.0\r\nTwisted : 19.7.0\r\nPython : 3.7.3 (default, Mar 27 2019, 17:13:21) [MSC v.1915 64 bit (AMD64)]\r\npyOpenSSL : 19.0.0 (OpenSSL 1.1.1c 28 May 2019)\r\ncryptography : 2.6.1\r\nPlatform : Windows-10-10.0.17134-SP0\r\n\r\n\r\n### Additional context\r\n\r\nI check the defination of [Location in rfc](https://tools.ietf.org/html/rfc7231#section-7.1.2) and end with [reference resolution](https://tools.ietf.org/html/rfc3986#section-5.3). But I fail to findout how to resolve the Location startswith `///`. So I don't know why Chrome did so.\r\n\r\nThe behavior of scrapy is determined by [redirect.py#L73](https://github.com/scrapy/scrapy/blob/master/scrapy/downloadermiddlewares/redirect.py#L73), which will truncate `///` to `/`\u3002\r\n\r\nI'm wandering the differents betweent scarpy and browser...\r\n\n", "before_files": [{"content": "import logging\nfrom six.moves.urllib.parse import urljoin\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http import HtmlResponse\nfrom scrapy.utils.response import get_meta_refresh\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseRedirectMiddleware(object):\n\n enabled_setting = 'REDIRECT_ENABLED'\n\n def __init__(self, settings):\n if not settings.getbool(self.enabled_setting):\n raise NotConfigured\n\n self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')\n self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def _redirect(self, redirected, request, spider, reason):\n ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)\n redirects = request.meta.get('redirect_times', 0) + 1\n\n if ttl and redirects <= self.max_redirect_times:\n redirected.meta['redirect_times'] = redirects\n redirected.meta['redirect_ttl'] = ttl - 1\n redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \\\n [request.url]\n redirected.meta['redirect_reasons'] = request.meta.get('redirect_reasons', []) + \\\n [reason]\n redirected.dont_filter = request.dont_filter\n redirected.priority = request.priority + self.priority_adjust\n logger.debug(\"Redirecting (%(reason)s) to %(redirected)s from %(request)s\",\n {'reason': reason, 'redirected': redirected, 'request': request},\n extra={'spider': spider})\n return redirected\n else:\n logger.debug(\"Discarding %(request)s: max redirections reached\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest(\"max redirections reached\")\n\n def _redirect_request_using_get(self, request, redirect_url):\n redirected = request.replace(url=redirect_url, method='GET', body='')\n redirected.headers.pop('Content-Type', None)\n redirected.headers.pop('Content-Length', None)\n return redirected\n\n\nclass RedirectMiddleware(BaseRedirectMiddleware):\n \"\"\"\n Handle redirection of requests based on response status\n and meta-refresh html tag.\n \"\"\"\n def process_response(self, request, response, spider):\n if (request.meta.get('dont_redirect', False) or\n response.status in getattr(spider, 'handle_httpstatus_list', []) or\n response.status in request.meta.get('handle_httpstatus_list', []) or\n request.meta.get('handle_httpstatus_all', False)):\n return response\n\n allowed_status = (301, 302, 303, 307, 308)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n\n location = safe_url_string(response.headers['location'])\n\n redirected_url = urljoin(request.url, location)\n\n if response.status in (301, 307, 308) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n redirected = self._redirect_request_using_get(request, redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n\nclass MetaRefreshMiddleware(BaseRedirectMiddleware):\n\n enabled_setting = 'METAREFRESH_ENABLED'\n\n def __init__(self, settings):\n super(MetaRefreshMiddleware, self).__init__(settings)\n self._ignore_tags = settings.getlist('METAREFRESH_IGNORE_TAGS')\n self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',\n settings.getint('METAREFRESH_MAXDELAY'))\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \\\n not isinstance(response, HtmlResponse):\n return response\n\n interval, url = get_meta_refresh(response,\n ignore_tags=self._ignore_tags)\n if url and interval < self._maxdelay:\n redirected = self._redirect_request_using_get(request, url)\n return self._redirect(redirected, request, spider, 'meta refresh')\n\n return response\n", "path": "scrapy/downloadermiddlewares/redirect.py"}], "after_files": [{"content": "import logging\nfrom six.moves.urllib.parse import urljoin, urlparse\n\nfrom w3lib.url import safe_url_string\n\nfrom scrapy.http import HtmlResponse\nfrom scrapy.utils.response import get_meta_refresh\nfrom scrapy.exceptions import IgnoreRequest, NotConfigured\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseRedirectMiddleware(object):\n\n enabled_setting = 'REDIRECT_ENABLED'\n\n def __init__(self, settings):\n if not settings.getbool(self.enabled_setting):\n raise NotConfigured\n\n self.max_redirect_times = settings.getint('REDIRECT_MAX_TIMES')\n self.priority_adjust = settings.getint('REDIRECT_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def _redirect(self, redirected, request, spider, reason):\n ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)\n redirects = request.meta.get('redirect_times', 0) + 1\n\n if ttl and redirects <= self.max_redirect_times:\n redirected.meta['redirect_times'] = redirects\n redirected.meta['redirect_ttl'] = ttl - 1\n redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + \\\n [request.url]\n redirected.meta['redirect_reasons'] = request.meta.get('redirect_reasons', []) + \\\n [reason]\n redirected.dont_filter = request.dont_filter\n redirected.priority = request.priority + self.priority_adjust\n logger.debug(\"Redirecting (%(reason)s) to %(redirected)s from %(request)s\",\n {'reason': reason, 'redirected': redirected, 'request': request},\n extra={'spider': spider})\n return redirected\n else:\n logger.debug(\"Discarding %(request)s: max redirections reached\",\n {'request': request}, extra={'spider': spider})\n raise IgnoreRequest(\"max redirections reached\")\n\n def _redirect_request_using_get(self, request, redirect_url):\n redirected = request.replace(url=redirect_url, method='GET', body='')\n redirected.headers.pop('Content-Type', None)\n redirected.headers.pop('Content-Length', None)\n return redirected\n\n\nclass RedirectMiddleware(BaseRedirectMiddleware):\n \"\"\"\n Handle redirection of requests based on response status\n and meta-refresh html tag.\n \"\"\"\n def process_response(self, request, response, spider):\n if (request.meta.get('dont_redirect', False) or\n response.status in getattr(spider, 'handle_httpstatus_list', []) or\n response.status in request.meta.get('handle_httpstatus_list', []) or\n request.meta.get('handle_httpstatus_all', False)):\n return response\n\n allowed_status = (301, 302, 303, 307, 308)\n if 'Location' not in response.headers or response.status not in allowed_status:\n return response\n\n location = safe_url_string(response.headers['Location'])\n if response.headers['Location'].startswith(b'//'):\n request_scheme = urlparse(request.url).scheme\n location = request_scheme + '://' + location.lstrip('/')\n\n redirected_url = urljoin(request.url, location)\n\n if response.status in (301, 307, 308) or request.method == 'HEAD':\n redirected = request.replace(url=redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n redirected = self._redirect_request_using_get(request, redirected_url)\n return self._redirect(redirected, request, spider, response.status)\n\n\nclass MetaRefreshMiddleware(BaseRedirectMiddleware):\n\n enabled_setting = 'METAREFRESH_ENABLED'\n\n def __init__(self, settings):\n super(MetaRefreshMiddleware, self).__init__(settings)\n self._ignore_tags = settings.getlist('METAREFRESH_IGNORE_TAGS')\n self._maxdelay = settings.getint('REDIRECT_MAX_METAREFRESH_DELAY',\n settings.getint('METAREFRESH_MAXDELAY'))\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_redirect', False) or request.method == 'HEAD' or \\\n not isinstance(response, HtmlResponse):\n return response\n\n interval, url = get_meta_refresh(response,\n ignore_tags=self._ignore_tags)\n if url and interval < self._maxdelay:\n redirected = self._redirect_request_using_get(request, url)\n return self._redirect(redirected, request, spider, 'meta refresh')\n\n return response\n", "path": "scrapy/downloadermiddlewares/redirect.py"}]}
1,964
198
gh_patches_debug_50089
rasdani/github-patches
git_diff
python-telegram-bot__python-telegram-bot-1228
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Can't change filename when send document after upgrading to v11.1.0 ### Steps to reproduce 1. Generate a pickle file "test" (I didn't test other common files yet) 2. Send this file to user `bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'), filename="test")` or `bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'))` ### Expected behaviour User will receive a file named **test** ### Actual behaviour User received a file named **application.octet-stream** ### Configuration **Operating System:** Debian (Server, where I first found this issue) Ubuntu(Local, **I test on v10.1.0, everything is fine**, so I upgrade to v11.1.0, then I have the same issue as Debian Server) **Version of Python, python-telegram-bot & dependencies:** ``$ python -m telegram`` *My Local Ubuntu After Upgrade:* python-telegram-bot 11.1.0 certifi 2018.08.24 future 0.16.0 Python 3.6.6 (default, Sep 12 2018, 18:26:19) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]] The pictures shows results of python-telegram-bot v10.1.0 (the first one) and v11.1.0 (the second one) : ![screenshot from 2018-09-29 14-16-06](https://user-images.githubusercontent.com/16657782/46246951-2382a080-c3f4-11e8-9f01-b4dd90960ccd.png) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `telegram/files/inputfile.py` Content: ``` 1 #!/usr/bin/env python 2 # pylint: disable=W0622,E0611 3 # 4 # A library that provides a Python interface to the Telegram Bot API 5 # Copyright (C) 2015-2018 6 # Leandro Toledo de Souza <[email protected]> 7 # 8 # This program is free software: you can redistribute it and/or modify 9 # it under the terms of the GNU Lesser Public License as published by 10 # the Free Software Foundation, either version 3 of the License, or 11 # (at your option) any later version. 12 # 13 # This program is distributed in the hope that it will be useful, 14 # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 # GNU Lesser Public License for more details. 17 # 18 # You should have received a copy of the GNU Lesser Public License 19 # along with this program. If not, see [http://www.gnu.org/licenses/]. 20 """This module contains an object that represents a Telegram InputFile.""" 21 22 import imghdr 23 import mimetypes 24 import os 25 from uuid import uuid4 26 27 from telegram import TelegramError 28 29 DEFAULT_MIME_TYPE = 'application/octet-stream' 30 31 32 class InputFile(object): 33 """This object represents a Telegram InputFile. 34 35 Attributes: 36 input_file_content (:obj:`bytes`): The binaray content of the file to send. 37 filename (:obj:`str`): Optional, Filename for the file to be sent. 38 attach (:obj:`str`): Optional, attach id for sending multiple files. 39 40 Args: 41 obj (:obj:`File handler`): An open file descriptor. 42 filename (:obj:`str`, optional): Filename for this InputFile. 43 attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a 44 collection of files. 45 46 Raises: 47 TelegramError 48 49 """ 50 51 def __init__(self, obj, filename=None, attach=None): 52 self.filename = None 53 self.input_file_content = obj.read() 54 self.attach = 'attached' + uuid4().hex if attach else None 55 56 if filename: 57 self.filename = filename 58 elif (hasattr(obj, 'name') and 59 not isinstance(obj.name, int) and # py3 60 obj.name != '<fdopen>'): # py2 61 # on py2.7, pylint fails to understand this properly 62 # pylint: disable=E1101 63 self.filename = os.path.basename(obj.name) 64 65 try: 66 self.mimetype = self.is_image(self.input_file_content) 67 except TelegramError: 68 if self.filename: 69 self.mimetype = mimetypes.guess_type( 70 self.filename)[0] or DEFAULT_MIME_TYPE 71 else: 72 self.mimetype = DEFAULT_MIME_TYPE 73 if not self.filename or '.' not in self.filename: 74 self.filename = self.mimetype.replace('/', '.') 75 76 @property 77 def field_tuple(self): 78 return self.filename, self.input_file_content, self.mimetype 79 80 @staticmethod 81 def is_image(stream): 82 """Check if the content file is an image by analyzing its headers. 83 84 Args: 85 stream (:obj:`str`): A str representing the content of a file. 86 87 Returns: 88 :obj:`str`: The str mime-type of an image. 89 90 """ 91 image = imghdr.what(None, stream) 92 if image: 93 return 'image/%s' % image 94 95 raise TelegramError('Could not parse file content') 96 97 @staticmethod 98 def is_file(obj): 99 return hasattr(obj, 'read') 100 101 def to_dict(self): 102 if self.attach: 103 return 'attach://' + self.attach 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py --- a/telegram/files/inputfile.py +++ b/telegram/files/inputfile.py @@ -70,7 +70,7 @@ self.filename)[0] or DEFAULT_MIME_TYPE else: self.mimetype = DEFAULT_MIME_TYPE - if not self.filename or '.' not in self.filename: + if not self.filename: self.filename = self.mimetype.replace('/', '.') @property
{"golden_diff": "diff --git a/telegram/files/inputfile.py b/telegram/files/inputfile.py\n--- a/telegram/files/inputfile.py\n+++ b/telegram/files/inputfile.py\n@@ -70,7 +70,7 @@\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n- if not self.filename or '.' not in self.filename:\n+ if not self.filename:\n self.filename = self.mimetype.replace('/', '.')\n \n @property\n", "issue": "Can't change filename when send document after upgrading to v11.1.0\n### Steps to reproduce\r\n1. Generate a pickle file \"test\" (I didn't test other common files yet)\r\n\r\n2. Send this file to user\r\n\r\n`bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'), filename=\"test\")`\r\n\r\nor\r\n\r\n`bot.send_document(chat_id=user_chat_id, document=open('./test', 'rb'))`\r\n\r\n### Expected behaviour\r\nUser will receive a file named **test**\r\n\r\n### Actual behaviour\r\nUser received a file named **application.octet-stream**\r\n\r\n### Configuration\r\n**Operating System:** \r\n\r\nDebian (Server, where I first found this issue)\r\n\r\nUbuntu(Local, **I test on v10.1.0, everything is fine**, so I upgrade to v11.1.0, then I have the same issue as Debian Server)\r\n\r\n**Version of Python, python-telegram-bot & dependencies:**\r\n\r\n``$ python -m telegram``\r\n\r\n*My Local Ubuntu After Upgrade:*\r\npython-telegram-bot 11.1.0\r\ncertifi 2018.08.24\r\nfuture 0.16.0\r\nPython 3.6.6 (default, Sep 12 2018, 18:26:19) [GCC 8.0.1 20180414 (experimental) [trunk revision 259383]]\r\n\r\nThe pictures shows results of python-telegram-bot v10.1.0 (the first one) and v11.1.0 (the second one) :\r\n\r\n![screenshot from 2018-09-29 14-16-06](https://user-images.githubusercontent.com/16657782/46246951-2382a080-c3f4-11e8-9f01-b4dd90960ccd.png)\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=W0622,E0611\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\nimport imghdr\nimport mimetypes\nimport os\nfrom uuid import uuid4\n\nfrom telegram import TelegramError\n\nDEFAULT_MIME_TYPE = 'application/octet-stream'\n\n\nclass InputFile(object):\n \"\"\"This object represents a Telegram InputFile.\n\n Attributes:\n input_file_content (:obj:`bytes`): The binaray content of the file to send.\n filename (:obj:`str`): Optional, Filename for the file to be sent.\n attach (:obj:`str`): Optional, attach id for sending multiple files.\n\n Args:\n obj (:obj:`File handler`): An open file descriptor.\n filename (:obj:`str`, optional): Filename for this InputFile.\n attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a\n collection of files.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n def __init__(self, obj, filename=None, attach=None):\n self.filename = None\n self.input_file_content = obj.read()\n self.attach = 'attached' + uuid4().hex if attach else None\n\n if filename:\n self.filename = filename\n elif (hasattr(obj, 'name') and\n not isinstance(obj.name, int) and # py3\n obj.name != '<fdopen>'): # py2\n # on py2.7, pylint fails to understand this properly\n # pylint: disable=E1101\n self.filename = os.path.basename(obj.name)\n\n try:\n self.mimetype = self.is_image(self.input_file_content)\n except TelegramError:\n if self.filename:\n self.mimetype = mimetypes.guess_type(\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n if not self.filename or '.' not in self.filename:\n self.filename = self.mimetype.replace('/', '.')\n\n @property\n def field_tuple(self):\n return self.filename, self.input_file_content, self.mimetype\n\n @staticmethod\n def is_image(stream):\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`str`): A str representing the content of a file.\n\n Returns:\n :obj:`str`: The str mime-type of an image.\n\n \"\"\"\n image = imghdr.what(None, stream)\n if image:\n return 'image/%s' % image\n\n raise TelegramError('Could not parse file content')\n\n @staticmethod\n def is_file(obj):\n return hasattr(obj, 'read')\n\n def to_dict(self):\n if self.attach:\n return 'attach://' + self.attach\n", "path": "telegram/files/inputfile.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# pylint: disable=W0622,E0611\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2018\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains an object that represents a Telegram InputFile.\"\"\"\n\nimport imghdr\nimport mimetypes\nimport os\nfrom uuid import uuid4\n\nfrom telegram import TelegramError\n\nDEFAULT_MIME_TYPE = 'application/octet-stream'\n\n\nclass InputFile(object):\n \"\"\"This object represents a Telegram InputFile.\n\n Attributes:\n input_file_content (:obj:`bytes`): The binaray content of the file to send.\n filename (:obj:`str`): Optional, Filename for the file to be sent.\n attach (:obj:`str`): Optional, attach id for sending multiple files.\n\n Args:\n obj (:obj:`File handler`): An open file descriptor.\n filename (:obj:`str`, optional): Filename for this InputFile.\n attach (:obj:`bool`, optional): Whether this should be send as one file or is part of a\n collection of files.\n\n Raises:\n TelegramError\n\n \"\"\"\n\n def __init__(self, obj, filename=None, attach=None):\n self.filename = None\n self.input_file_content = obj.read()\n self.attach = 'attached' + uuid4().hex if attach else None\n\n if filename:\n self.filename = filename\n elif (hasattr(obj, 'name') and\n not isinstance(obj.name, int) and # py3\n obj.name != '<fdopen>'): # py2\n # on py2.7, pylint fails to understand this properly\n # pylint: disable=E1101\n self.filename = os.path.basename(obj.name)\n\n try:\n self.mimetype = self.is_image(self.input_file_content)\n except TelegramError:\n if self.filename:\n self.mimetype = mimetypes.guess_type(\n self.filename)[0] or DEFAULT_MIME_TYPE\n else:\n self.mimetype = DEFAULT_MIME_TYPE\n if not self.filename:\n self.filename = self.mimetype.replace('/', '.')\n\n @property\n def field_tuple(self):\n return self.filename, self.input_file_content, self.mimetype\n\n @staticmethod\n def is_image(stream):\n \"\"\"Check if the content file is an image by analyzing its headers.\n\n Args:\n stream (:obj:`str`): A str representing the content of a file.\n\n Returns:\n :obj:`str`: The str mime-type of an image.\n\n \"\"\"\n image = imghdr.what(None, stream)\n if image:\n return 'image/%s' % image\n\n raise TelegramError('Could not parse file content')\n\n @staticmethod\n def is_file(obj):\n return hasattr(obj, 'read')\n\n def to_dict(self):\n if self.attach:\n return 'attach://' + self.attach\n", "path": "telegram/files/inputfile.py"}]}
1,702
109
gh_patches_debug_26
rasdani/github-patches
git_diff
nautobot__nautobot-3317
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove legacy `manage.py` <!-- NOTE: This template is for use by maintainers only. Please do not submit an issue using this template unless you have been specifically asked to do so. --> ### Proposed Changes Simply remove `manage.py` from the project root. <!-- Provide justification for the proposed change(s). --> ### Justification This was left there initially in v1.0.0 as a fallback, however it is absolutely no longer needed. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `manage.py` Content: ``` 1 #!/usr/bin/env python3 2 3 import sys 4 5 from nautobot.core.cli import main 6 7 8 if __name__ == "__main__": 9 main() 10 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/manage.py b/manage.py deleted file mode 100755 --- a/manage.py +++ /dev/null @@ -1,9 +0,0 @@ -#!/usr/bin/env python3 - -import sys - -from nautobot.core.cli import main - - -if __name__ == "__main__": - main()
{"golden_diff": "diff --git a/manage.py b/manage.py\ndeleted file mode 100755\n--- a/manage.py\n+++ /dev/null\n@@ -1,9 +0,0 @@\n-#!/usr/bin/env python3\n-\n-import sys\n-\n-from nautobot.core.cli import main\n-\n-\n-if __name__ == \"__main__\":\n- main()\n", "issue": "Remove legacy `manage.py` \n<!--\r\n NOTE: This template is for use by maintainers only. Please do not submit\r\n an issue using this template unless you have been specifically asked to\r\n do so.\r\n-->\r\n### Proposed Changes\r\n\r\nSimply remove `manage.py` from the project root.\r\n\r\n<!-- Provide justification for the proposed change(s). -->\r\n### Justification\r\n\r\nThis was left there initially in v1.0.0 as a fallback, however it is absolutely no longer needed.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport sys\n\nfrom nautobot.core.cli import main\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "manage.py"}], "after_files": [{"content": null, "path": "manage.py"}]}
401
77
gh_patches_debug_7304
rasdani/github-patches
git_diff
uccser__cs-unplugged-225
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Complete folder structure for test suite Each file should have a docstring explaining it's intended purpose. Add a code coverage tool --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `csunplugged/utils/BaseLoader.py` Content: ``` 1 """Base loader used to create custom loaders for content.""" 2 3 import yaml 4 import mdx_math 5 import abc 6 import sys 7 import re 8 import os.path 9 from os import listdir 10 from verto import Verto 11 12 from .check_required_files import check_converter_required_files 13 14 from utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError 15 from utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError 16 from utils.errors.EmptyConfigFileError import EmptyConfigFileError 17 from utils.errors.InvalidConfigFileError import InvalidConfigFileError 18 from utils.errors.NoHeadingFoundInMarkdownFileError import NoHeadingFoundInMarkdownFileError 19 from utils.errors.CouldNotFindConfigFileError import CouldNotFindConfigFileError 20 21 22 class BaseLoader(): 23 """Base loader class for individual loaders.""" 24 25 def __init__(self, BASE_PATH="", load_log=[]): 26 """Create a BaseLoader object. 27 28 Args: 29 BASE_PATH: string of base path. 30 load_log: list of log messages. 31 """ 32 if load_log: 33 self.load_log = load_log 34 else: 35 self.load_log = list(load_log) 36 self.BASE_PATH = BASE_PATH 37 self.setup_md_to_html_converter() 38 39 def setup_md_to_html_converter(self): 40 """Create Markdown converter. 41 42 The converter is created with custom processors, html templates, 43 and extensions. 44 """ 45 templates = self.load_template_files() 46 extensions = [ 47 "markdown.extensions.fenced_code", 48 "markdown.extensions.codehilite", 49 "markdown.extensions.sane_lists", 50 "markdown.extensions.tables", 51 mdx_math.MathExtension(enable_dollar_delimiter=True) 52 ] 53 self.converter = Verto(html_templates=templates, extensions=extensions) 54 custom_processors = self.converter.processor_defaults() 55 custom_processors.add("remove-title") 56 self.converter.update_processors(custom_processors) 57 58 def convert_md_file(self, md_file_path, config_file_path, heading_required=True): 59 """Return the Verto object for a given Markdown file. 60 61 Args: 62 md_file_path: location of Markdown file to convert 63 64 Returns: 65 VertoResult object 66 67 Raises: 68 CouldNotFindMarkdownFileError: when a given Markdown file cannot be found. 69 NoHeadingFoundInMarkdownFileError: when no heading can be found in a given 70 Markdown file. 71 EmptyMarkdownFileError: when no content can be found in a given Markdown 72 file. 73 """ 74 try: 75 # check file exists 76 content = open(md_file_path, encoding="UTF-8").read() 77 except: 78 raise CouldNotFindMarkdownFileError(md_file_path, config_file_path) 79 80 result = self.converter.convert(content) 81 82 if heading_required: 83 if result.title is None: 84 raise NoHeadingFoundInMarkdownFileError(md_file_path) 85 86 if len(result.html_string) == 0: 87 raise EmptyMarkdownFileError(md_file_path) 88 89 check_converter_required_files(result.required_files, md_file_path) 90 return result 91 92 def log(self, log_message, indent_amount=0): 93 """Add the log message to the load log with the specified indent.""" 94 self.load_log.append((log_message, indent_amount)) 95 96 def print_load_log(self): 97 """Output log messages from loader to console.""" 98 for (log, indent_amount) in self.load_log: 99 indent = " " * indent_amount 100 sys.stdout.write("{indent}{text}\n".format(indent=indent, text=log)) 101 sys.stdout.write("\n") 102 self.load_log = [] 103 104 def load_yaml_file(self, yaml_file_path): 105 """Load and read given YAML file. 106 107 Args: 108 file_path: location of yaml file to read 109 110 Returns: 111 Either list or string, depending on structure of given yaml file 112 113 Raises: 114 CouldNotFindConfigFileError: when a given config file cannot be found. 115 InvalidConfigFileError: when a given config file is incorrectly formatted. 116 EmptyConfigFileError: when a give config file is empty. 117 """ 118 try: 119 yaml_file = open(yaml_file_path, encoding="UTF-8").read() 120 except: 121 raise CouldNotFindConfigFileError(yaml_file_path) 122 123 try: 124 yaml_contents = yaml.load(yaml_file) 125 except: 126 raise InvalidConfigFileError(yaml_file_path) 127 128 if yaml_contents is None: 129 raise EmptyConfigFileError(yaml_file_path) 130 131 if isinstance(yaml_contents, dict) is False: 132 raise InvalidConfigFileError(yaml_file_path) 133 134 return yaml_contents 135 136 def load_template_files(self): 137 """Load custom HTML templates for converter. 138 139 Returns: 140 templates: dictionary of html templates 141 """ 142 templates = dict() 143 template_path = os.path.join( 144 os.path.dirname(__file__), 145 "custom_converter_templates/" 146 ) 147 for file in listdir(template_path): 148 template_file = re.search(r"(.*?).html$", file) 149 if template_file: 150 template_name = template_file.groups()[0] 151 templates[template_name] = open(template_path + file).read() 152 return templates 153 154 @abc.abstractmethod 155 def load(self): 156 """Abstract method to be implemented by subclasses. 157 158 Raise: 159 NotImplementedError: when a user attempts to run the load() method of the 160 BaseLoader class. 161 """ 162 raise NotImplementedError("Subclass does not implement this method") 163 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/csunplugged/utils/BaseLoader.py b/csunplugged/utils/BaseLoader.py --- a/csunplugged/utils/BaseLoader.py +++ b/csunplugged/utils/BaseLoader.py @@ -10,7 +10,6 @@ from verto import Verto from .check_required_files import check_converter_required_files - from utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError from utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError from utils.errors.EmptyConfigFileError import EmptyConfigFileError
{"golden_diff": "diff --git a/csunplugged/utils/BaseLoader.py b/csunplugged/utils/BaseLoader.py\n--- a/csunplugged/utils/BaseLoader.py\n+++ b/csunplugged/utils/BaseLoader.py\n@@ -10,7 +10,6 @@\n from verto import Verto\n \n from .check_required_files import check_converter_required_files\n-\n from utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError\n from utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError\n from utils.errors.EmptyConfigFileError import EmptyConfigFileError\n", "issue": "Complete folder structure for test suite\nEach file should have a docstring explaining it's intended purpose.\nAdd a code coverage tool\n\n", "before_files": [{"content": "\"\"\"Base loader used to create custom loaders for content.\"\"\"\n\nimport yaml\nimport mdx_math\nimport abc\nimport sys\nimport re\nimport os.path\nfrom os import listdir\nfrom verto import Verto\n\nfrom .check_required_files import check_converter_required_files\n\nfrom utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError\nfrom utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError\nfrom utils.errors.EmptyConfigFileError import EmptyConfigFileError\nfrom utils.errors.InvalidConfigFileError import InvalidConfigFileError\nfrom utils.errors.NoHeadingFoundInMarkdownFileError import NoHeadingFoundInMarkdownFileError\nfrom utils.errors.CouldNotFindConfigFileError import CouldNotFindConfigFileError\n\n\nclass BaseLoader():\n \"\"\"Base loader class for individual loaders.\"\"\"\n\n def __init__(self, BASE_PATH=\"\", load_log=[]):\n \"\"\"Create a BaseLoader object.\n\n Args:\n BASE_PATH: string of base path.\n load_log: list of log messages.\n \"\"\"\n if load_log:\n self.load_log = load_log\n else:\n self.load_log = list(load_log)\n self.BASE_PATH = BASE_PATH\n self.setup_md_to_html_converter()\n\n def setup_md_to_html_converter(self):\n \"\"\"Create Markdown converter.\n\n The converter is created with custom processors, html templates,\n and extensions.\n \"\"\"\n templates = self.load_template_files()\n extensions = [\n \"markdown.extensions.fenced_code\",\n \"markdown.extensions.codehilite\",\n \"markdown.extensions.sane_lists\",\n \"markdown.extensions.tables\",\n mdx_math.MathExtension(enable_dollar_delimiter=True)\n ]\n self.converter = Verto(html_templates=templates, extensions=extensions)\n custom_processors = self.converter.processor_defaults()\n custom_processors.add(\"remove-title\")\n self.converter.update_processors(custom_processors)\n\n def convert_md_file(self, md_file_path, config_file_path, heading_required=True):\n \"\"\"Return the Verto object for a given Markdown file.\n\n Args:\n md_file_path: location of Markdown file to convert\n\n Returns:\n VertoResult object\n\n Raises:\n CouldNotFindMarkdownFileError: when a given Markdown file cannot be found.\n NoHeadingFoundInMarkdownFileError: when no heading can be found in a given\n Markdown file.\n EmptyMarkdownFileError: when no content can be found in a given Markdown\n file.\n \"\"\"\n try:\n # check file exists\n content = open(md_file_path, encoding=\"UTF-8\").read()\n except:\n raise CouldNotFindMarkdownFileError(md_file_path, config_file_path)\n\n result = self.converter.convert(content)\n\n if heading_required:\n if result.title is None:\n raise NoHeadingFoundInMarkdownFileError(md_file_path)\n\n if len(result.html_string) == 0:\n raise EmptyMarkdownFileError(md_file_path)\n\n check_converter_required_files(result.required_files, md_file_path)\n return result\n\n def log(self, log_message, indent_amount=0):\n \"\"\"Add the log message to the load log with the specified indent.\"\"\"\n self.load_log.append((log_message, indent_amount))\n\n def print_load_log(self):\n \"\"\"Output log messages from loader to console.\"\"\"\n for (log, indent_amount) in self.load_log:\n indent = \" \" * indent_amount\n sys.stdout.write(\"{indent}{text}\\n\".format(indent=indent, text=log))\n sys.stdout.write(\"\\n\")\n self.load_log = []\n\n def load_yaml_file(self, yaml_file_path):\n \"\"\"Load and read given YAML file.\n\n Args:\n file_path: location of yaml file to read\n\n Returns:\n Either list or string, depending on structure of given yaml file\n\n Raises:\n CouldNotFindConfigFileError: when a given config file cannot be found.\n InvalidConfigFileError: when a given config file is incorrectly formatted.\n EmptyConfigFileError: when a give config file is empty.\n \"\"\"\n try:\n yaml_file = open(yaml_file_path, encoding=\"UTF-8\").read()\n except:\n raise CouldNotFindConfigFileError(yaml_file_path)\n\n try:\n yaml_contents = yaml.load(yaml_file)\n except:\n raise InvalidConfigFileError(yaml_file_path)\n\n if yaml_contents is None:\n raise EmptyConfigFileError(yaml_file_path)\n\n if isinstance(yaml_contents, dict) is False:\n raise InvalidConfigFileError(yaml_file_path)\n\n return yaml_contents\n\n def load_template_files(self):\n \"\"\"Load custom HTML templates for converter.\n\n Returns:\n templates: dictionary of html templates\n \"\"\"\n templates = dict()\n template_path = os.path.join(\n os.path.dirname(__file__),\n \"custom_converter_templates/\"\n )\n for file in listdir(template_path):\n template_file = re.search(r\"(.*?).html$\", file)\n if template_file:\n template_name = template_file.groups()[0]\n templates[template_name] = open(template_path + file).read()\n return templates\n\n @abc.abstractmethod\n def load(self):\n \"\"\"Abstract method to be implemented by subclasses.\n\n Raise:\n NotImplementedError: when a user attempts to run the load() method of the\n BaseLoader class.\n \"\"\"\n raise NotImplementedError(\"Subclass does not implement this method\")\n", "path": "csunplugged/utils/BaseLoader.py"}], "after_files": [{"content": "\"\"\"Base loader used to create custom loaders for content.\"\"\"\n\nimport yaml\nimport mdx_math\nimport abc\nimport sys\nimport re\nimport os.path\nfrom os import listdir\nfrom verto import Verto\n\nfrom .check_required_files import check_converter_required_files\nfrom utils.errors.CouldNotFindMarkdownFileError import CouldNotFindMarkdownFileError\nfrom utils.errors.EmptyMarkdownFileError import EmptyMarkdownFileError\nfrom utils.errors.EmptyConfigFileError import EmptyConfigFileError\nfrom utils.errors.InvalidConfigFileError import InvalidConfigFileError\nfrom utils.errors.NoHeadingFoundInMarkdownFileError import NoHeadingFoundInMarkdownFileError\nfrom utils.errors.CouldNotFindConfigFileError import CouldNotFindConfigFileError\n\n\nclass BaseLoader():\n \"\"\"Base loader class for individual loaders.\"\"\"\n\n def __init__(self, BASE_PATH=\"\", load_log=[]):\n \"\"\"Create a BaseLoader object.\n\n Args:\n BASE_PATH: string of base path.\n load_log: list of log messages.\n \"\"\"\n if load_log:\n self.load_log = load_log\n else:\n self.load_log = list(load_log)\n self.BASE_PATH = BASE_PATH\n self.setup_md_to_html_converter()\n\n def setup_md_to_html_converter(self):\n \"\"\"Create Markdown converter.\n\n The converter is created with custom processors, html templates,\n and extensions.\n \"\"\"\n templates = self.load_template_files()\n extensions = [\n \"markdown.extensions.fenced_code\",\n \"markdown.extensions.codehilite\",\n \"markdown.extensions.sane_lists\",\n \"markdown.extensions.tables\",\n mdx_math.MathExtension(enable_dollar_delimiter=True)\n ]\n self.converter = Verto(html_templates=templates, extensions=extensions)\n custom_processors = self.converter.processor_defaults()\n custom_processors.add(\"remove-title\")\n self.converter.update_processors(custom_processors)\n\n def convert_md_file(self, md_file_path, config_file_path, heading_required=True):\n \"\"\"Return the Verto object for a given Markdown file.\n\n Args:\n md_file_path: location of Markdown file to convert\n\n Returns:\n VertoResult object\n\n Raises:\n CouldNotFindMarkdownFileError: when a given Markdown file cannot be found.\n NoHeadingFoundInMarkdownFileError: when no heading can be found in a given\n Markdown file.\n EmptyMarkdownFileError: when no content can be found in a given Markdown\n file.\n \"\"\"\n try:\n # check file exists\n content = open(md_file_path, encoding=\"UTF-8\").read()\n except:\n raise CouldNotFindMarkdownFileError(md_file_path, config_file_path)\n\n result = self.converter.convert(content)\n\n if heading_required:\n if result.title is None:\n raise NoHeadingFoundInMarkdownFileError(md_file_path)\n\n if len(result.html_string) == 0:\n raise EmptyMarkdownFileError(md_file_path)\n\n check_converter_required_files(result.required_files, md_file_path)\n return result\n\n def log(self, log_message, indent_amount=0):\n \"\"\"Add the log message to the load log with the specified indent.\"\"\"\n self.load_log.append((log_message, indent_amount))\n\n def print_load_log(self):\n \"\"\"Output log messages from loader to console.\"\"\"\n for (log, indent_amount) in self.load_log:\n indent = \" \" * indent_amount\n sys.stdout.write(\"{indent}{text}\\n\".format(indent=indent, text=log))\n sys.stdout.write(\"\\n\")\n self.load_log = []\n\n def load_yaml_file(self, yaml_file_path):\n \"\"\"Load and read given YAML file.\n\n Args:\n file_path: location of yaml file to read\n\n Returns:\n Either list or string, depending on structure of given yaml file\n\n Raises:\n CouldNotFindConfigFileError: when a given config file cannot be found.\n InvalidConfigFileError: when a given config file is incorrectly formatted.\n EmptyConfigFileError: when a give config file is empty.\n \"\"\"\n try:\n yaml_file = open(yaml_file_path, encoding=\"UTF-8\").read()\n except:\n raise CouldNotFindConfigFileError(yaml_file_path)\n\n try:\n yaml_contents = yaml.load(yaml_file)\n except:\n raise InvalidConfigFileError(yaml_file_path)\n\n if yaml_contents is None:\n raise EmptyConfigFileError(yaml_file_path)\n\n if isinstance(yaml_contents, dict) is False:\n raise InvalidConfigFileError(yaml_file_path)\n\n return yaml_contents\n\n def load_template_files(self):\n \"\"\"Load custom HTML templates for converter.\n\n Returns:\n templates: dictionary of html templates\n \"\"\"\n templates = dict()\n template_path = os.path.join(\n os.path.dirname(__file__),\n \"custom_converter_templates/\"\n )\n for file in listdir(template_path):\n template_file = re.search(r\"(.*?).html$\", file)\n if template_file:\n template_name = template_file.groups()[0]\n templates[template_name] = open(template_path + file).read()\n return templates\n\n @abc.abstractmethod\n def load(self):\n \"\"\"Abstract method to be implemented by subclasses.\n\n Raise:\n NotImplementedError: when a user attempts to run the load() method of the\n BaseLoader class.\n \"\"\"\n raise NotImplementedError(\"Subclass does not implement this method\")\n", "path": "csunplugged/utils/BaseLoader.py"}]}
1,813
119
gh_patches_debug_22951
rasdani/github-patches
git_diff
ydataai__ydata-profiling-829
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Phi K correlation variable order For me all correlation plots show variables in the (domain-specific sensible) order of the columns in my data frame. Only Phi K shows them in some other order. Is this a bug or a feature? Is there a setting to get the "good" order? This is with pandas 1.3 and pandas-profiling 3.0.0 <img width="879" alt="Screenshot 2021-09-05 at 21 43 55" src="https://user-images.githubusercontent.com/852409/132139566-ba92033b-98fb-4b3d-a869-6c096ed294a1.png"> <img width="907" alt="Screenshot 2021-09-05 at 21 43 45" src="https://user-images.githubusercontent.com/852409/132139567-22e2d9ce-cdc8-4b95-93b2-7445a78ed397.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pandas_profiling/model/pandas/correlations_pandas.py` Content: ``` 1 """Correlations between variables.""" 2 import itertools 3 import warnings 4 from typing import Optional 5 6 import numpy as np 7 import pandas as pd 8 from scipy import stats 9 10 from pandas_profiling.config import Settings 11 from pandas_profiling.model.correlations import ( 12 Cramers, 13 Kendall, 14 Pearson, 15 PhiK, 16 Spearman, 17 ) 18 19 20 @Spearman.compute.register(Settings, pd.DataFrame, dict) 21 def pandas_spearman_compute( 22 config: Settings, df: pd.DataFrame, summary: dict 23 ) -> Optional[pd.DataFrame]: 24 return df.corr(method="spearman") 25 26 27 @Pearson.compute.register(Settings, pd.DataFrame, dict) 28 def pandas_pearson_compute( 29 config: Settings, df: pd.DataFrame, summary: dict 30 ) -> Optional[pd.DataFrame]: 31 return df.corr(method="pearson") 32 33 34 @Kendall.compute.register(Settings, pd.DataFrame, dict) 35 def pandas_kendall_compute( 36 config: Settings, df: pd.DataFrame, summary: dict 37 ) -> Optional[pd.DataFrame]: 38 return df.corr(method="kendall") 39 40 41 def _cramers_corrected_stat(confusion_matrix: pd.DataFrame, correction: bool) -> float: 42 """Calculate the Cramer's V corrected stat for two variables. 43 44 Args: 45 confusion_matrix: Crosstab between two variables. 46 correction: Should the correction be applied? 47 48 Returns: 49 The Cramer's V corrected stat for the two variables. 50 """ 51 chi2 = stats.chi2_contingency(confusion_matrix, correction=correction)[0] 52 n = confusion_matrix.sum().sum() 53 phi2 = chi2 / n 54 r = confusion_matrix.shape[0] 55 k = confusion_matrix.shape[1] if len(confusion_matrix.shape) > 1 else 1 56 57 # Deal with NaNs later on 58 with np.errstate(divide="ignore", invalid="ignore"): 59 phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0)) 60 rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0) 61 kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0) 62 rkcorr = min((kcorr - 1.0), (rcorr - 1.0)) 63 if rkcorr == 0.0: 64 corr = 1.0 65 else: 66 corr = np.sqrt(phi2corr / rkcorr) 67 return corr 68 69 70 @Cramers.compute.register(Settings, pd.DataFrame, dict) 71 def pandas_cramers_compute( 72 config: Settings, df: pd.DataFrame, summary: dict 73 ) -> Optional[pd.DataFrame]: 74 threshold = config.categorical_maximum_correlation_distinct 75 76 categoricals = { 77 key 78 for key, value in summary.items() 79 if value["type"] in {"Categorical", "Boolean"} 80 and value["n_distinct"] <= threshold 81 } 82 83 if len(categoricals) <= 1: 84 return None 85 86 matrix = np.zeros((len(categoricals), len(categoricals))) 87 np.fill_diagonal(matrix, 1.0) 88 correlation_matrix = pd.DataFrame( 89 matrix, 90 index=categoricals, 91 columns=categoricals, 92 ) 93 94 for name1, name2 in itertools.combinations(categoricals, 2): 95 confusion_matrix = pd.crosstab(df[name1], df[name2]) 96 correlation_matrix.loc[name2, name1] = _cramers_corrected_stat( 97 confusion_matrix, correction=True 98 ) 99 correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1] 100 return correlation_matrix 101 102 103 @PhiK.compute.register(Settings, pd.DataFrame, dict) 104 def pandas_phik_compute( 105 config: Settings, df: pd.DataFrame, summary: dict 106 ) -> Optional[pd.DataFrame]: 107 intcols = { 108 key 109 for key, value in summary.items() 110 # DateTime currently excluded 111 # In some use cases, it makes sense to convert it to interval 112 # See https://github.com/KaveIO/PhiK/issues/7 113 if value["type"] == "Numeric" and 1 < value["n_distinct"] 114 } 115 116 selcols = { 117 key 118 for key, value in summary.items() 119 if value["type"] != "Unsupported" 120 and 1 < value["n_distinct"] <= config.categorical_maximum_correlation_distinct 121 } 122 selcols = selcols.union(intcols) 123 124 if len(selcols) <= 1: 125 return None 126 127 with warnings.catch_warnings(): 128 warnings.simplefilter("ignore") 129 from phik import phik_matrix 130 131 correlation = phik_matrix(df[selcols], interval_cols=list(intcols)) 132 133 return correlation 134 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pandas_profiling/model/pandas/correlations_pandas.py b/src/pandas_profiling/model/pandas/correlations_pandas.py --- a/src/pandas_profiling/model/pandas/correlations_pandas.py +++ b/src/pandas_profiling/model/pandas/correlations_pandas.py @@ -104,6 +104,8 @@ def pandas_phik_compute( config: Settings, df: pd.DataFrame, summary: dict ) -> Optional[pd.DataFrame]: + df_cols_dict = {i: list(df.columns).index(i) for i in df.columns} + intcols = { key for key, value in summary.items() @@ -120,14 +122,15 @@ and 1 < value["n_distinct"] <= config.categorical_maximum_correlation_distinct } selcols = selcols.union(intcols) + selected_cols = sorted(selcols, key=lambda i: df_cols_dict[i]) - if len(selcols) <= 1: + if len(selected_cols) <= 1: return None with warnings.catch_warnings(): warnings.simplefilter("ignore") from phik import phik_matrix - correlation = phik_matrix(df[selcols], interval_cols=list(intcols)) + correlation = phik_matrix(df[selected_cols], interval_cols=list(intcols)) return correlation
{"golden_diff": "diff --git a/src/pandas_profiling/model/pandas/correlations_pandas.py b/src/pandas_profiling/model/pandas/correlations_pandas.py\n--- a/src/pandas_profiling/model/pandas/correlations_pandas.py\n+++ b/src/pandas_profiling/model/pandas/correlations_pandas.py\n@@ -104,6 +104,8 @@\n def pandas_phik_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n ) -> Optional[pd.DataFrame]:\n+ df_cols_dict = {i: list(df.columns).index(i) for i in df.columns}\n+\n intcols = {\n key\n for key, value in summary.items()\n@@ -120,14 +122,15 @@\n and 1 < value[\"n_distinct\"] <= config.categorical_maximum_correlation_distinct\n }\n selcols = selcols.union(intcols)\n+ selected_cols = sorted(selcols, key=lambda i: df_cols_dict[i])\n \n- if len(selcols) <= 1:\n+ if len(selected_cols) <= 1:\n return None\n \n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from phik import phik_matrix\n \n- correlation = phik_matrix(df[selcols], interval_cols=list(intcols))\n+ correlation = phik_matrix(df[selected_cols], interval_cols=list(intcols))\n \n return correlation\n", "issue": "Phi K correlation variable order\nFor me all correlation plots show variables in the (domain-specific sensible) order of the columns in my data frame.\r\n\r\nOnly Phi K shows them in some other order.\r\n\r\nIs this a bug or a feature?\r\n\r\nIs there a setting to get the \"good\" order?\r\n\r\nThis is with pandas 1.3 and pandas-profiling 3.0.0\r\n\r\n<img width=\"879\" alt=\"Screenshot 2021-09-05 at 21 43 55\" src=\"https://user-images.githubusercontent.com/852409/132139566-ba92033b-98fb-4b3d-a869-6c096ed294a1.png\">\r\n<img width=\"907\" alt=\"Screenshot 2021-09-05 at 21 43 45\" src=\"https://user-images.githubusercontent.com/852409/132139567-22e2d9ce-cdc8-4b95-93b2-7445a78ed397.png\">\r\n\n", "before_files": [{"content": "\"\"\"Correlations between variables.\"\"\"\nimport itertools\nimport warnings\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.correlations import (\n Cramers,\n Kendall,\n Pearson,\n PhiK,\n Spearman,\n)\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_spearman_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"spearman\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_pearson_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"pearson\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_kendall_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"kendall\")\n\n\ndef _cramers_corrected_stat(confusion_matrix: pd.DataFrame, correction: bool) -> float:\n \"\"\"Calculate the Cramer's V corrected stat for two variables.\n\n Args:\n confusion_matrix: Crosstab between two variables.\n correction: Should the correction be applied?\n\n Returns:\n The Cramer's V corrected stat for the two variables.\n \"\"\"\n chi2 = stats.chi2_contingency(confusion_matrix, correction=correction)[0]\n n = confusion_matrix.sum().sum()\n phi2 = chi2 / n\n r = confusion_matrix.shape[0]\n k = confusion_matrix.shape[1] if len(confusion_matrix.shape) > 1 else 1\n\n # Deal with NaNs later on\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0))\n rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0)\n kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0)\n rkcorr = min((kcorr - 1.0), (rcorr - 1.0))\n if rkcorr == 0.0:\n corr = 1.0\n else:\n corr = np.sqrt(phi2corr / rkcorr)\n return corr\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_cramers_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n threshold = config.categorical_maximum_correlation_distinct\n\n categoricals = {\n key\n for key, value in summary.items()\n if value[\"type\"] in {\"Categorical\", \"Boolean\"}\n and value[\"n_distinct\"] <= threshold\n }\n\n if len(categoricals) <= 1:\n return None\n\n matrix = np.zeros((len(categoricals), len(categoricals)))\n np.fill_diagonal(matrix, 1.0)\n correlation_matrix = pd.DataFrame(\n matrix,\n index=categoricals,\n columns=categoricals,\n )\n\n for name1, name2 in itertools.combinations(categoricals, 2):\n confusion_matrix = pd.crosstab(df[name1], df[name2])\n correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(\n confusion_matrix, correction=True\n )\n correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]\n return correlation_matrix\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_phik_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n intcols = {\n key\n for key, value in summary.items()\n # DateTime currently excluded\n # In some use cases, it makes sense to convert it to interval\n # See https://github.com/KaveIO/PhiK/issues/7\n if value[\"type\"] == \"Numeric\" and 1 < value[\"n_distinct\"]\n }\n\n selcols = {\n key\n for key, value in summary.items()\n if value[\"type\"] != \"Unsupported\"\n and 1 < value[\"n_distinct\"] <= config.categorical_maximum_correlation_distinct\n }\n selcols = selcols.union(intcols)\n\n if len(selcols) <= 1:\n return None\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from phik import phik_matrix\n\n correlation = phik_matrix(df[selcols], interval_cols=list(intcols))\n\n return correlation\n", "path": "src/pandas_profiling/model/pandas/correlations_pandas.py"}], "after_files": [{"content": "\"\"\"Correlations between variables.\"\"\"\nimport itertools\nimport warnings\nfrom typing import Optional\n\nimport numpy as np\nimport pandas as pd\nfrom scipy import stats\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.correlations import (\n Cramers,\n Kendall,\n Pearson,\n PhiK,\n Spearman,\n)\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_spearman_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"spearman\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_pearson_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"pearson\")\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_kendall_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n return df.corr(method=\"kendall\")\n\n\ndef _cramers_corrected_stat(confusion_matrix: pd.DataFrame, correction: bool) -> float:\n \"\"\"Calculate the Cramer's V corrected stat for two variables.\n\n Args:\n confusion_matrix: Crosstab between two variables.\n correction: Should the correction be applied?\n\n Returns:\n The Cramer's V corrected stat for the two variables.\n \"\"\"\n chi2 = stats.chi2_contingency(confusion_matrix, correction=correction)[0]\n n = confusion_matrix.sum().sum()\n phi2 = chi2 / n\n r = confusion_matrix.shape[0]\n k = confusion_matrix.shape[1] if len(confusion_matrix.shape) > 1 else 1\n\n # Deal with NaNs later on\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n phi2corr = max(0.0, phi2 - ((k - 1.0) * (r - 1.0)) / (n - 1.0))\n rcorr = r - ((r - 1.0) ** 2.0) / (n - 1.0)\n kcorr = k - ((k - 1.0) ** 2.0) / (n - 1.0)\n rkcorr = min((kcorr - 1.0), (rcorr - 1.0))\n if rkcorr == 0.0:\n corr = 1.0\n else:\n corr = np.sqrt(phi2corr / rkcorr)\n return corr\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_cramers_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n threshold = config.categorical_maximum_correlation_distinct\n\n categoricals = {\n key\n for key, value in summary.items()\n if value[\"type\"] in {\"Categorical\", \"Boolean\"}\n and value[\"n_distinct\"] <= threshold\n }\n\n if len(categoricals) <= 1:\n return None\n\n matrix = np.zeros((len(categoricals), len(categoricals)))\n np.fill_diagonal(matrix, 1.0)\n correlation_matrix = pd.DataFrame(\n matrix,\n index=categoricals,\n columns=categoricals,\n )\n\n for name1, name2 in itertools.combinations(categoricals, 2):\n confusion_matrix = pd.crosstab(df[name1], df[name2])\n correlation_matrix.loc[name2, name1] = _cramers_corrected_stat(\n confusion_matrix, correction=True\n )\n correlation_matrix.loc[name1, name2] = correlation_matrix.loc[name2, name1]\n return correlation_matrix\n\n\[email protected](Settings, pd.DataFrame, dict)\ndef pandas_phik_compute(\n config: Settings, df: pd.DataFrame, summary: dict\n) -> Optional[pd.DataFrame]:\n df_cols_dict = {i: list(df.columns).index(i) for i in df.columns}\n\n intcols = {\n key\n for key, value in summary.items()\n # DateTime currently excluded\n # In some use cases, it makes sense to convert it to interval\n # See https://github.com/KaveIO/PhiK/issues/7\n if value[\"type\"] == \"Numeric\" and 1 < value[\"n_distinct\"]\n }\n\n selcols = {\n key\n for key, value in summary.items()\n if value[\"type\"] != \"Unsupported\"\n and 1 < value[\"n_distinct\"] <= config.categorical_maximum_correlation_distinct\n }\n selcols = selcols.union(intcols)\n selected_cols = sorted(selcols, key=lambda i: df_cols_dict[i])\n\n if len(selected_cols) <= 1:\n return None\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n from phik import phik_matrix\n\n correlation = phik_matrix(df[selected_cols], interval_cols=list(intcols))\n\n return correlation\n", "path": "src/pandas_profiling/model/pandas/correlations_pandas.py"}]}
1,891
305
gh_patches_debug_439
rasdani/github-patches
git_diff
localstack__localstack-1075
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update code climate and badge https://codeclimate.com/github/atlassian/localstack is the old repo, is there a new code climate check for the new repo? The README is pointing to this old code climate project. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `localstack/constants.py` Content: ``` 1 import os 2 import localstack_client.config 3 4 # LocalStack version 5 VERSION = '0.8.9' 6 7 # default AWS region 8 if 'DEFAULT_REGION' not in os.environ: 9 os.environ['DEFAULT_REGION'] = 'us-east-1' 10 DEFAULT_REGION = os.environ['DEFAULT_REGION'] 11 12 # constant to represent the "local" region, i.e., local machine 13 REGION_LOCAL = 'local' 14 15 # dev environment 16 ENV_DEV = 'dev' 17 18 # backend service ports, for services that are behind a proxy (counting down from 4566) 19 DEFAULT_PORT_APIGATEWAY_BACKEND = 4566 20 DEFAULT_PORT_KINESIS_BACKEND = 4565 21 DEFAULT_PORT_DYNAMODB_BACKEND = 4564 22 DEFAULT_PORT_S3_BACKEND = 4563 23 DEFAULT_PORT_SNS_BACKEND = 4562 24 DEFAULT_PORT_SQS_BACKEND = 4561 25 DEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560 26 DEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559 27 28 DEFAULT_PORT_WEB_UI = 8080 29 30 LOCALHOST = 'localhost' 31 32 # version of the Maven dependency with Java utility code 33 LOCALSTACK_MAVEN_VERSION = '0.1.15' 34 35 # map of default service APIs and ports to be spun up (fetch map from localstack_client) 36 DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports() 37 38 # host to bind to when starting the services 39 BIND_HOST = '0.0.0.0' 40 41 # AWS user account ID used for tests 42 TEST_AWS_ACCOUNT_ID = '000000000000' 43 os.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID 44 45 # root code folder 46 LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')) 47 48 # virtualenv folder 49 LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv') 50 if not os.path.isdir(LOCALSTACK_VENV_FOLDER): 51 # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/ 52 LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..')) 53 54 # API Gateway path to indicate a user request sent to the gateway 55 PATH_USER_REQUEST = '_user_request_' 56 57 # name of LocalStack Docker image 58 DOCKER_IMAGE_NAME = 'localstack/localstack' 59 60 # environment variable name to tag local test runs 61 ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN' 62 63 # content types 64 APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0' 65 APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1' 66 APPLICATION_JSON = 'application/json' 67 68 # Lambda defaults 69 LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID 70 71 # installation constants 72 ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip' 73 DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip' 74 ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar' 75 STS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar' 76 77 # API endpoint for analytics events 78 API_ENDPOINT = 'https://api.localstack.cloud/v1' 79 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/localstack/constants.py b/localstack/constants.py --- a/localstack/constants.py +++ b/localstack/constants.py @@ -2,7 +2,7 @@ import localstack_client.config # LocalStack version -VERSION = '0.8.9' +VERSION = '0.8.10' # default AWS region if 'DEFAULT_REGION' not in os.environ:
{"golden_diff": "diff --git a/localstack/constants.py b/localstack/constants.py\n--- a/localstack/constants.py\n+++ b/localstack/constants.py\n@@ -2,7 +2,7 @@\n import localstack_client.config\n \n # LocalStack version\n-VERSION = '0.8.9'\n+VERSION = '0.8.10'\n \n # default AWS region\n if 'DEFAULT_REGION' not in os.environ:\n", "issue": "Update code climate and badge\nhttps://codeclimate.com/github/atlassian/localstack is the old repo, is there a new code climate check for the new repo? The README is pointing to this old code climate project.\n", "before_files": [{"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.9'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.15'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n", "path": "localstack/constants.py"}], "after_files": [{"content": "import os\nimport localstack_client.config\n\n# LocalStack version\nVERSION = '0.8.10'\n\n# default AWS region\nif 'DEFAULT_REGION' not in os.environ:\n os.environ['DEFAULT_REGION'] = 'us-east-1'\nDEFAULT_REGION = os.environ['DEFAULT_REGION']\n\n# constant to represent the \"local\" region, i.e., local machine\nREGION_LOCAL = 'local'\n\n# dev environment\nENV_DEV = 'dev'\n\n# backend service ports, for services that are behind a proxy (counting down from 4566)\nDEFAULT_PORT_APIGATEWAY_BACKEND = 4566\nDEFAULT_PORT_KINESIS_BACKEND = 4565\nDEFAULT_PORT_DYNAMODB_BACKEND = 4564\nDEFAULT_PORT_S3_BACKEND = 4563\nDEFAULT_PORT_SNS_BACKEND = 4562\nDEFAULT_PORT_SQS_BACKEND = 4561\nDEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560\nDEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559\n\nDEFAULT_PORT_WEB_UI = 8080\n\nLOCALHOST = 'localhost'\n\n# version of the Maven dependency with Java utility code\nLOCALSTACK_MAVEN_VERSION = '0.1.15'\n\n# map of default service APIs and ports to be spun up (fetch map from localstack_client)\nDEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()\n\n# host to bind to when starting the services\nBIND_HOST = '0.0.0.0'\n\n# AWS user account ID used for tests\nTEST_AWS_ACCOUNT_ID = '000000000000'\nos.environ['TEST_AWS_ACCOUNT_ID'] = TEST_AWS_ACCOUNT_ID\n\n# root code folder\nLOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))\n\n# virtualenv folder\nLOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')\nif not os.path.isdir(LOCALSTACK_VENV_FOLDER):\n # assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/\n LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))\n\n# API Gateway path to indicate a user request sent to the gateway\nPATH_USER_REQUEST = '_user_request_'\n\n# name of LocalStack Docker image\nDOCKER_IMAGE_NAME = 'localstack/localstack'\n\n# environment variable name to tag local test runs\nENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'\n\n# content types\nAPPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'\nAPPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'\nAPPLICATION_JSON = 'application/json'\n\n# Lambda defaults\nLAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID\n\n# installation constants\nELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.0.zip'\nDYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'\nELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.14.2.jar'\nSTS_JAR_URL = 'http://central.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'\n\n# API endpoint for analytics events\nAPI_ENDPOINT = 'https://api.localstack.cloud/v1'\n", "path": "localstack/constants.py"}]}
1,251
86
gh_patches_debug_13138
rasdani/github-patches
git_diff
ivy-llc__ivy-15454
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- cosh --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ivy/functional/frontends/paddle/tensor/math.py` Content: ``` 1 # global 2 import ivy 3 from ivy.func_wrapper import with_unsupported_dtypes 4 from ivy.functional.frontends.paddle.func_wrapper import ( 5 to_ivy_arrays_and_back, 6 ) 7 8 9 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 10 @to_ivy_arrays_and_back 11 def sin(x, name=None): 12 return ivy.sin(x) 13 14 15 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 16 @to_ivy_arrays_and_back 17 def cos(x, name=None): 18 return ivy.cos(x) 19 20 21 22 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 23 @to_ivy_arrays_and_back 24 def acos(x, name=None): 25 return ivy.acos(x) 26 27 28 29 @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") 30 @to_ivy_arrays_and_back 31 def tanh(x, name=None): 32 return ivy.tanh(x) 33 34 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py --- a/ivy/functional/frontends/paddle/tensor/math.py +++ b/ivy/functional/frontends/paddle/tensor/math.py @@ -18,16 +18,19 @@ return ivy.cos(x) - @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") @to_ivy_arrays_and_back def acos(x, name=None): return ivy.acos(x) +@with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") +@to_ivy_arrays_and_back +def cosh(x, name=None): + return ivy.cosh(x) + @with_unsupported_dtypes({"2.4.2 and below": ("float16", "bfloat16")}, "paddle") @to_ivy_arrays_and_back def tanh(x, name=None): return ivy.tanh(x) -
{"golden_diff": "diff --git a/ivy/functional/frontends/paddle/tensor/math.py b/ivy/functional/frontends/paddle/tensor/math.py\n--- a/ivy/functional/frontends/paddle/tensor/math.py\n+++ b/ivy/functional/frontends/paddle/tensor/math.py\n@@ -18,16 +18,19 @@\n return ivy.cos(x)\n \n \n-\n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n def acos(x, name=None):\n return ivy.acos(x)\n \n \n+@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n+@to_ivy_arrays_and_back\n+def cosh(x, name=None):\n+ return ivy.cosh(x)\n+\n \n @with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n @to_ivy_arrays_and_back\n def tanh(x, name=None):\n return ivy.tanh(x)\n-\n", "issue": "cosh\n\n", "before_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}], "after_files": [{"content": "# global\nimport ivy\nfrom ivy.func_wrapper import with_unsupported_dtypes\nfrom ivy.functional.frontends.paddle.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef sin(x, name=None):\n return ivy.sin(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cos(x, name=None):\n return ivy.cos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef acos(x, name=None):\n return ivy.acos(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef cosh(x, name=None):\n return ivy.cosh(x)\n\n\n@with_unsupported_dtypes({\"2.4.2 and below\": (\"float16\", \"bfloat16\")}, \"paddle\")\n@to_ivy_arrays_and_back\ndef tanh(x, name=None):\n return ivy.tanh(x)\n", "path": "ivy/functional/frontends/paddle/tensor/math.py"}]}
593
255
gh_patches_debug_569
rasdani/github-patches
git_diff
pex-tool__pex-945
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 2.1.8 On the docket: + [x] Cache pip.pex. #937 + [x] Ensure the interpreter path is a file #938 + [x] Support an unzip toggle for PEXes. #939 + [x] Better support unzip mode PEXes. #941 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/version.py` Content: ``` 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = '2.1.7' 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = '2.1.7' +__version__ = '2.1.8'
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = '2.1.7'\n+__version__ = '2.1.8'\n", "issue": "Release 2.1.8\nOn the docket:\r\n+ [x] Cache pip.pex. #937\r\n+ [x] Ensure the interpreter path is a file #938\r\n+ [x] Support an unzip toggle for PEXes. #939\r\n+ [x] Better support unzip mode PEXes. #941\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.7'\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = '2.1.8'\n", "path": "pex/version.py"}]}
384
94
gh_patches_debug_8594
rasdani/github-patches
git_diff
mozilla__bugbug-1094
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support multiclass classifiers in bug_classifier script The bug_classifier script at scripts/bug_classifier.py currently assumes the model is a binary model. We need to make it work for multiclass models too (e.g. defectenhancementtask). In particular, https://github.com/mozilla/bugbug/blob/65bf1b4604ca55a67490d27adc99c6441bad38c8/scripts/bug_classifier.py#L75-L78 needs to be changed. To test your changes, simply run `python3 -m scripts.bug_classifier defectenhancementtask` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/bug_classifier.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import argparse 4 import os 5 from logging import INFO, basicConfig, getLogger 6 7 import numpy as np 8 import requests 9 10 from bugbug import bugzilla 11 from bugbug.models import get_model_class 12 from bugbug.utils import download_check_etag, zstd_decompress 13 14 MODELS_WITH_TYPE = ("component",) 15 16 basicConfig(level=INFO) 17 logger = getLogger(__name__) 18 19 20 def classify_bugs(model_name, classifier, bug_id): 21 if classifier != "default": 22 assert ( 23 model_name in MODELS_WITH_TYPE 24 ), f"{classifier} is not a valid classifier type for {model_name}" 25 26 model_file_name = f"{model_name}{classifier}model" 27 model_name = f"{model_name}_{classifier}" 28 else: 29 model_file_name = f"{model_name}model" 30 31 if not os.path.exists(model_file_name): 32 logger.info(f"{model_file_name} does not exist. Downloading the model....") 33 try: 34 download_check_etag( 35 f"https://community-tc.services.mozilla.com/api/index/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst", 36 f"{model_file_name}.zst", 37 ) 38 except requests.HTTPError: 39 logger.error( 40 f"A pre-trained model is not available, you will need to train it yourself using the trainer script" 41 ) 42 raise SystemExit(1) 43 44 zstd_decompress(model_file_name) 45 assert os.path.exists(model_file_name), "Decompressed file doesn't exist" 46 47 model_class = get_model_class(model_name) 48 model = model_class.load(model_file_name) 49 50 if bug_id: 51 bugs = bugzilla.get(bug_id).values() 52 assert bugs, f"A bug with a bug id of {bug_id} was not found" 53 else: 54 bugs = bugzilla.get_bugs() 55 56 for bug in bugs: 57 print( 58 f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug["id"]} - {bug["summary"]} ' 59 ) 60 61 if model.calculate_importance: 62 probas, importance = model.classify( 63 bug, probabilities=True, importances=True 64 ) 65 66 model.print_feature_importances( 67 importance["importances"], class_probabilities=probas 68 ) 69 else: 70 probas = model.classify(bug, probabilities=True, importances=False) 71 72 if np.argmax(probas) == 1: 73 print(f"Positive! {probas}") 74 else: 75 print(f"Negative! {probas}") 76 input() 77 78 79 def main(): 80 description = "Perform evaluation on bugs using the specified model" 81 parser = argparse.ArgumentParser(description=description) 82 83 parser.add_argument("model", help="Which model to use for evaluation") 84 parser.add_argument( 85 "--classifier", 86 help="Type of the classifier. Only used for component classification.", 87 choices=["default", "nn"], 88 default="default", 89 ) 90 parser.add_argument("--bug-id", help="Classify the given bug id") 91 92 args = parser.parse_args() 93 94 classify_bugs(args.model, args.classifier, args.bug_id) 95 96 97 if __name__ == "__main__": 98 main() 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py --- a/scripts/bug_classifier.py +++ b/scripts/bug_classifier.py @@ -69,10 +69,13 @@ else: probas = model.classify(bug, probabilities=True, importances=False) - if np.argmax(probas) == 1: - print(f"Positive! {probas}") + probability = probas[0] + pred_index = np.argmax(probability) + if len(probability) > 2: + pred_class = model.le.inverse_transform([pred_index])[0] else: - print(f"Negative! {probas}") + pred_class = "Positive" if pred_index == 1 else "Negative" + print(f"{pred_class} {probability}") input()
{"golden_diff": "diff --git a/scripts/bug_classifier.py b/scripts/bug_classifier.py\n--- a/scripts/bug_classifier.py\n+++ b/scripts/bug_classifier.py\n@@ -69,10 +69,13 @@\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n \n- if np.argmax(probas) == 1:\n- print(f\"Positive! {probas}\")\n+ probability = probas[0]\n+ pred_index = np.argmax(probability)\n+ if len(probability) > 2:\n+ pred_class = model.le.inverse_transform([pred_index])[0]\n else:\n- print(f\"Negative! {probas}\")\n+ pred_class = \"Positive\" if pred_index == 1 else \"Negative\"\n+ print(f\"{pred_class} {probability}\")\n input()\n", "issue": "Support multiclass classifiers in bug_classifier script\nThe bug_classifier script at scripts/bug_classifier.py currently assumes the model is a binary model. We need to make it work for multiclass models too (e.g. defectenhancementtask).\r\n\r\nIn particular, https://github.com/mozilla/bugbug/blob/65bf1b4604ca55a67490d27adc99c6441bad38c8/scripts/bug_classifier.py#L75-L78 needs to be changed.\r\n\r\nTo test your changes, simply run `python3 -m scripts.bug_classifier defectenhancementtask`\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom logging import INFO, basicConfig, getLogger\n\nimport numpy as np\nimport requests\n\nfrom bugbug import bugzilla\nfrom bugbug.models import get_model_class\nfrom bugbug.utils import download_check_etag, zstd_decompress\n\nMODELS_WITH_TYPE = (\"component\",)\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\n\ndef classify_bugs(model_name, classifier, bug_id):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n ), f\"{classifier} is not a valid classifier type for {model_name}\"\n\n model_file_name = f\"{model_name}{classifier}model\"\n model_name = f\"{model_name}_{classifier}\"\n else:\n model_file_name = f\"{model_name}model\"\n\n if not os.path.exists(model_file_name):\n logger.info(f\"{model_file_name} does not exist. Downloading the model....\")\n try:\n download_check_etag(\n f\"https://community-tc.services.mozilla.com/api/index/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst\",\n f\"{model_file_name}.zst\",\n )\n except requests.HTTPError:\n logger.error(\n f\"A pre-trained model is not available, you will need to train it yourself using the trainer script\"\n )\n raise SystemExit(1)\n\n zstd_decompress(model_file_name)\n assert os.path.exists(model_file_name), \"Decompressed file doesn't exist\"\n\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n\n if bug_id:\n bugs = bugzilla.get(bug_id).values()\n assert bugs, f\"A bug with a bug id of {bug_id} was not found\"\n else:\n bugs = bugzilla.get_bugs()\n\n for bug in bugs:\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n model.print_feature_importances(\n importance[\"importances\"], class_probabilities=probas\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n if np.argmax(probas) == 1:\n print(f\"Positive! {probas}\")\n else:\n print(f\"Negative! {probas}\")\n input()\n\n\ndef main():\n description = \"Perform evaluation on bugs using the specified model\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to use for evaluation\")\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--bug-id\", help=\"Classify the given bug id\")\n\n args = parser.parse_args()\n\n classify_bugs(args.model, args.classifier, args.bug_id)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/bug_classifier.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport argparse\nimport os\nfrom logging import INFO, basicConfig, getLogger\n\nimport numpy as np\nimport requests\n\nfrom bugbug import bugzilla\nfrom bugbug.models import get_model_class\nfrom bugbug.utils import download_check_etag, zstd_decompress\n\nMODELS_WITH_TYPE = (\"component\",)\n\nbasicConfig(level=INFO)\nlogger = getLogger(__name__)\n\n\ndef classify_bugs(model_name, classifier, bug_id):\n if classifier != \"default\":\n assert (\n model_name in MODELS_WITH_TYPE\n ), f\"{classifier} is not a valid classifier type for {model_name}\"\n\n model_file_name = f\"{model_name}{classifier}model\"\n model_name = f\"{model_name}_{classifier}\"\n else:\n model_file_name = f\"{model_name}model\"\n\n if not os.path.exists(model_file_name):\n logger.info(f\"{model_file_name} does not exist. Downloading the model....\")\n try:\n download_check_etag(\n f\"https://community-tc.services.mozilla.com/api/index/v1/task/project.relman.bugbug.train_{model_name}.latest/artifacts/public/{model_file_name}.zst\",\n f\"{model_file_name}.zst\",\n )\n except requests.HTTPError:\n logger.error(\n f\"A pre-trained model is not available, you will need to train it yourself using the trainer script\"\n )\n raise SystemExit(1)\n\n zstd_decompress(model_file_name)\n assert os.path.exists(model_file_name), \"Decompressed file doesn't exist\"\n\n model_class = get_model_class(model_name)\n model = model_class.load(model_file_name)\n\n if bug_id:\n bugs = bugzilla.get(bug_id).values()\n assert bugs, f\"A bug with a bug id of {bug_id} was not found\"\n else:\n bugs = bugzilla.get_bugs()\n\n for bug in bugs:\n print(\n f'https://bugzilla.mozilla.org/show_bug.cgi?id={bug[\"id\"]} - {bug[\"summary\"]} '\n )\n\n if model.calculate_importance:\n probas, importance = model.classify(\n bug, probabilities=True, importances=True\n )\n\n model.print_feature_importances(\n importance[\"importances\"], class_probabilities=probas\n )\n else:\n probas = model.classify(bug, probabilities=True, importances=False)\n\n probability = probas[0]\n pred_index = np.argmax(probability)\n if len(probability) > 2:\n pred_class = model.le.inverse_transform([pred_index])[0]\n else:\n pred_class = \"Positive\" if pred_index == 1 else \"Negative\"\n print(f\"{pred_class} {probability}\")\n input()\n\n\ndef main():\n description = \"Perform evaluation on bugs using the specified model\"\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"model\", help=\"Which model to use for evaluation\")\n parser.add_argument(\n \"--classifier\",\n help=\"Type of the classifier. Only used for component classification.\",\n choices=[\"default\", \"nn\"],\n default=\"default\",\n )\n parser.add_argument(\"--bug-id\", help=\"Classify the given bug id\")\n\n args = parser.parse_args()\n\n classify_bugs(args.model, args.classifier, args.bug_id)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/bug_classifier.py"}]}
1,276
181
gh_patches_debug_19749
rasdani/github-patches
git_diff
openstates__openstates-scrapers-2162
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OH: `get_session_list` cannot "see" current session Ohio's `get_session_list` appears to have the current session _manually appended_. Not to mention, it hasn't been updated in a year. This should be fixed, to automatically "guess" the current session instead. https://github.com/openstates/openstates/blob/master/openstates/oh/__init__.py#L91-L92 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openstates/oh/__init__.py` Content: ``` 1 from pupa.scrape import Jurisdiction, Organization 2 from openstates.utils import url_xpath 3 4 from .people import OHLegislatorScraper 5 # from .events import OHEventScraper 6 from .bills import OHBillScraper 7 8 9 class Ohio(Jurisdiction): 10 division_id = "ocd-division/country:us/state:oh" 11 classification = "government" 12 name = "Ohio" 13 url = "http://www.legislature.state.oh.us/" 14 scrapers = { 15 'people': OHLegislatorScraper, 16 # 'events': OHEventScraper, 17 'bills': OHBillScraper, 18 } 19 legislative_sessions = [ 20 { 21 "_scraped_name": "128", 22 "identifier": "128", 23 "name": "128th Legislature (2009-2010)" 24 }, 25 { 26 "_scraped_name": "129", 27 "identifier": "129", 28 "name": "129th Legislature (2011-2012)", 29 "start_date": "2011-01-03" 30 }, 31 { 32 "_scraped_name": "130", 33 "identifier": "130", 34 "name": "130th Legislature (2013-2014)" 35 }, 36 { 37 "_scraped_name": "131", 38 "identifier": "131", 39 "name": "131st Legislature (2015-2016)" 40 }, 41 { 42 "_scraped_name": "132", 43 "identifier": "132", 44 "name": "132st Legislature (2017-2018)", 45 "start_date": "2017-01-02", 46 "end_date": "2017-12-31" 47 } 48 ] 49 ignored_scraped_sessions = [ 50 "127", 51 "126", 52 "125", 53 "124", 54 "123", 55 "122" 56 ] 57 58 def get_organizations(self): 59 legislature_name = "Ohio General Assembly" 60 lower_chamber_name = "House" 61 lower_seats = 99 62 lower_title = "Representative" 63 upper_chamber_name = "Senate" 64 upper_seats = 33 65 upper_title = "Senator" 66 67 legislature = Organization(name=legislature_name, 68 classification="legislature") 69 upper = Organization(upper_chamber_name, classification='upper', 70 parent_id=legislature._id) 71 lower = Organization(lower_chamber_name, classification='lower', 72 parent_id=legislature._id) 73 74 for n in range(1, upper_seats+1): 75 upper.add_post( 76 label=str(n), role=upper_title, 77 division_id='{}/sldu:{}'.format(self.division_id, n)) 78 for n in range(1, lower_seats+1): 79 lower.add_post( 80 label=str(n), role=lower_title, 81 division_id='{}/sldl:{}'.format(self.division_id, n)) 82 83 yield legislature 84 yield upper 85 yield lower 86 87 def get_session_list(self): 88 sessions = url_xpath('http://archives.legislature.state.oh.us', 89 '//form[@action="bill_search.cfm"]//input[@type="radio"' 90 ' and @name="SESSION"]/@value') 91 # Archive does not include current session 92 sessions.append('131') 93 return sessions 94 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openstates/oh/__init__.py b/openstates/oh/__init__.py --- a/openstates/oh/__init__.py +++ b/openstates/oh/__init__.py @@ -46,14 +46,7 @@ "end_date": "2017-12-31" } ] - ignored_scraped_sessions = [ - "127", - "126", - "125", - "124", - "123", - "122" - ] + ignored_scraped_sessions = [] def get_organizations(self): legislature_name = "Ohio General Assembly" @@ -85,9 +78,7 @@ yield lower def get_session_list(self): - sessions = url_xpath('http://archives.legislature.state.oh.us', - '//form[@action="bill_search.cfm"]//input[@type="radio"' - ' and @name="SESSION"]/@value') + sessions = url_xpath('https://www.legislature.ohio.gov/legislation/search-legislation', + '//div[@class="selectedValues"]/ul/span/li/text()') # Archive does not include current session - sessions.append('131') return sessions
{"golden_diff": "diff --git a/openstates/oh/__init__.py b/openstates/oh/__init__.py\n--- a/openstates/oh/__init__.py\n+++ b/openstates/oh/__init__.py\n@@ -46,14 +46,7 @@\n \"end_date\": \"2017-12-31\"\n }\n ]\n- ignored_scraped_sessions = [\n- \"127\",\n- \"126\",\n- \"125\",\n- \"124\",\n- \"123\",\n- \"122\"\n- ]\n+ ignored_scraped_sessions = []\n \n def get_organizations(self):\n legislature_name = \"Ohio General Assembly\"\n@@ -85,9 +78,7 @@\n yield lower\n \n def get_session_list(self):\n- sessions = url_xpath('http://archives.legislature.state.oh.us',\n- '//form[@action=\"bill_search.cfm\"]//input[@type=\"radio\"'\n- ' and @name=\"SESSION\"]/@value')\n+ sessions = url_xpath('https://www.legislature.ohio.gov/legislation/search-legislation',\n+ '//div[@class=\"selectedValues\"]/ul/span/li/text()')\n # Archive does not include current session\n- sessions.append('131')\n return sessions\n", "issue": "OH: `get_session_list` cannot \"see\" current session\nOhio's `get_session_list` appears to have the current session _manually appended_. Not to mention, it hasn't been updated in a year. This should be fixed, to automatically \"guess\" the current session instead.\r\n\r\nhttps://github.com/openstates/openstates/blob/master/openstates/oh/__init__.py#L91-L92\n", "before_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom openstates.utils import url_xpath\n\nfrom .people import OHLegislatorScraper\n# from .events import OHEventScraper\nfrom .bills import OHBillScraper\n\n\nclass Ohio(Jurisdiction):\n division_id = \"ocd-division/country:us/state:oh\"\n classification = \"government\"\n name = \"Ohio\"\n url = \"http://www.legislature.state.oh.us/\"\n scrapers = {\n 'people': OHLegislatorScraper,\n # 'events': OHEventScraper,\n 'bills': OHBillScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"128\",\n \"identifier\": \"128\",\n \"name\": \"128th Legislature (2009-2010)\"\n },\n {\n \"_scraped_name\": \"129\",\n \"identifier\": \"129\",\n \"name\": \"129th Legislature (2011-2012)\",\n \"start_date\": \"2011-01-03\"\n },\n {\n \"_scraped_name\": \"130\",\n \"identifier\": \"130\",\n \"name\": \"130th Legislature (2013-2014)\"\n },\n {\n \"_scraped_name\": \"131\",\n \"identifier\": \"131\",\n \"name\": \"131st Legislature (2015-2016)\"\n },\n {\n \"_scraped_name\": \"132\",\n \"identifier\": \"132\",\n \"name\": \"132st Legislature (2017-2018)\",\n \"start_date\": \"2017-01-02\",\n \"end_date\": \"2017-12-31\"\n }\n ]\n ignored_scraped_sessions = [\n \"127\",\n \"126\",\n \"125\",\n \"124\",\n \"123\",\n \"122\"\n ]\n\n def get_organizations(self):\n legislature_name = \"Ohio General Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 99\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 33\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats+1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats+1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n sessions = url_xpath('http://archives.legislature.state.oh.us',\n '//form[@action=\"bill_search.cfm\"]//input[@type=\"radio\"'\n ' and @name=\"SESSION\"]/@value')\n # Archive does not include current session\n sessions.append('131')\n return sessions\n", "path": "openstates/oh/__init__.py"}], "after_files": [{"content": "from pupa.scrape import Jurisdiction, Organization\nfrom openstates.utils import url_xpath\n\nfrom .people import OHLegislatorScraper\n# from .events import OHEventScraper\nfrom .bills import OHBillScraper\n\n\nclass Ohio(Jurisdiction):\n division_id = \"ocd-division/country:us/state:oh\"\n classification = \"government\"\n name = \"Ohio\"\n url = \"http://www.legislature.state.oh.us/\"\n scrapers = {\n 'people': OHLegislatorScraper,\n # 'events': OHEventScraper,\n 'bills': OHBillScraper,\n }\n legislative_sessions = [\n {\n \"_scraped_name\": \"128\",\n \"identifier\": \"128\",\n \"name\": \"128th Legislature (2009-2010)\"\n },\n {\n \"_scraped_name\": \"129\",\n \"identifier\": \"129\",\n \"name\": \"129th Legislature (2011-2012)\",\n \"start_date\": \"2011-01-03\"\n },\n {\n \"_scraped_name\": \"130\",\n \"identifier\": \"130\",\n \"name\": \"130th Legislature (2013-2014)\"\n },\n {\n \"_scraped_name\": \"131\",\n \"identifier\": \"131\",\n \"name\": \"131st Legislature (2015-2016)\"\n },\n {\n \"_scraped_name\": \"132\",\n \"identifier\": \"132\",\n \"name\": \"132st Legislature (2017-2018)\",\n \"start_date\": \"2017-01-02\",\n \"end_date\": \"2017-12-31\"\n }\n ]\n ignored_scraped_sessions = []\n\n def get_organizations(self):\n legislature_name = \"Ohio General Assembly\"\n lower_chamber_name = \"House\"\n lower_seats = 99\n lower_title = \"Representative\"\n upper_chamber_name = \"Senate\"\n upper_seats = 33\n upper_title = \"Senator\"\n\n legislature = Organization(name=legislature_name,\n classification=\"legislature\")\n upper = Organization(upper_chamber_name, classification='upper',\n parent_id=legislature._id)\n lower = Organization(lower_chamber_name, classification='lower',\n parent_id=legislature._id)\n\n for n in range(1, upper_seats+1):\n upper.add_post(\n label=str(n), role=upper_title,\n division_id='{}/sldu:{}'.format(self.division_id, n))\n for n in range(1, lower_seats+1):\n lower.add_post(\n label=str(n), role=lower_title,\n division_id='{}/sldl:{}'.format(self.division_id, n))\n\n yield legislature\n yield upper\n yield lower\n\n def get_session_list(self):\n sessions = url_xpath('https://www.legislature.ohio.gov/legislation/search-legislation',\n '//div[@class=\"selectedValues\"]/ul/span/li/text()')\n # Archive does not include current session\n return sessions\n", "path": "openstates/oh/__init__.py"}]}
1,321
294
gh_patches_debug_13150
rasdani/github-patches
git_diff
comic__grand-challenge.org-1631
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Show domain name of the email address that was used to verify the account Google Scholar shows on profiles instead of the entire email address only the domain name. Something like "Verified email address at radboudumc.nl". Would be a good feature for grand challenge as well, this would make it possible to check if users used an email address from their institution to verify their account. A similar text to what Google Scholar shows could be displayed when hovering over the check mark icon, for example. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/grandchallenge/profiles/templatetags/profiles.py` Content: ``` 1 from typing import Union 2 3 from django import template 4 from django.contrib.auth import get_user_model 5 from django.contrib.auth.models import AbstractUser 6 from django.core.exceptions import ObjectDoesNotExist 7 from django.utils.html import format_html 8 from django.utils.safestring import mark_safe 9 10 from grandchallenge.subdomains.utils import reverse 11 12 register = template.Library() 13 14 15 @register.filter 16 def user_profile_link(user: Union[AbstractUser, None]) -> str: 17 verified = "" 18 19 if user: 20 username = user.username 21 profile_url = reverse( 22 "userena_profile_detail", kwargs={"username": user.username} 23 ) 24 mugshot = format_html( 25 ( 26 '<img class="mugshot" loading="lazy" src="{0}" ' 27 'alt="User Mugshot" ' 28 # Match the "fa-lg" class style 29 'style="height: 1.33em; vertical-align: -25%;"/>' 30 ), 31 user.user_profile.get_mugshot_url(), 32 ) 33 34 try: 35 if user.verification.is_verified: 36 verified = mark_safe( 37 '<i class="fas fa-user-check text-success" ' 38 'title="Verified User"></i>' 39 ) 40 except ObjectDoesNotExist: 41 # No verification request 42 pass 43 else: 44 username = "Unknown" 45 profile_url = "#" 46 mugshot = mark_safe('<i class="fas fa-user fa-lg"></i>') 47 48 return format_html( 49 '<span class="text-nowrap"><a href="{0}">{1}</a>&nbsp;<a href="{0}">{2}</a>&nbsp;{3}</span>', 50 profile_url, 51 mugshot, 52 username, 53 verified, 54 ) 55 56 57 @register.filter 58 def user_profile_link_username(username: str) -> str: 59 User = get_user_model() # noqa: N806 60 return user_profile_link(User.objects.get(username=username)) 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/grandchallenge/profiles/templatetags/profiles.py b/app/grandchallenge/profiles/templatetags/profiles.py --- a/app/grandchallenge/profiles/templatetags/profiles.py +++ b/app/grandchallenge/profiles/templatetags/profiles.py @@ -33,9 +33,15 @@ try: if user.verification.is_verified: - verified = mark_safe( + email = ( + user.verification.email + if user.verification.email + else user.email + ) + verified = format_html( '<i class="fas fa-user-check text-success" ' - 'title="Verified User"></i>' + 'title="Verified email address at {}"></i>', + email.split("@")[1], ) except ObjectDoesNotExist: # No verification request
{"golden_diff": "diff --git a/app/grandchallenge/profiles/templatetags/profiles.py b/app/grandchallenge/profiles/templatetags/profiles.py\n--- a/app/grandchallenge/profiles/templatetags/profiles.py\n+++ b/app/grandchallenge/profiles/templatetags/profiles.py\n@@ -33,9 +33,15 @@\n \n try:\n if user.verification.is_verified:\n- verified = mark_safe(\n+ email = (\n+ user.verification.email\n+ if user.verification.email\n+ else user.email\n+ )\n+ verified = format_html(\n '<i class=\"fas fa-user-check text-success\" '\n- 'title=\"Verified User\"></i>'\n+ 'title=\"Verified email address at {}\"></i>',\n+ email.split(\"@\")[1],\n )\n except ObjectDoesNotExist:\n # No verification request\n", "issue": "Show domain name of the email address that was used to verify the account\nGoogle Scholar shows on profiles instead of the entire email address only the domain name. Something like \"Verified email address at radboudumc.nl\". Would be a good feature for grand challenge as well, this would make it possible to check if users used an email address from their institution to verify their account. A similar text to what Google Scholar shows could be displayed when hovering over the check mark icon, for example.\n", "before_files": [{"content": "from typing import Union\n\nfrom django import template\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom grandchallenge.subdomains.utils import reverse\n\nregister = template.Library()\n\n\[email protected]\ndef user_profile_link(user: Union[AbstractUser, None]) -> str:\n verified = \"\"\n\n if user:\n username = user.username\n profile_url = reverse(\n \"userena_profile_detail\", kwargs={\"username\": user.username}\n )\n mugshot = format_html(\n (\n '<img class=\"mugshot\" loading=\"lazy\" src=\"{0}\" '\n 'alt=\"User Mugshot\" '\n # Match the \"fa-lg\" class style\n 'style=\"height: 1.33em; vertical-align: -25%;\"/>'\n ),\n user.user_profile.get_mugshot_url(),\n )\n\n try:\n if user.verification.is_verified:\n verified = mark_safe(\n '<i class=\"fas fa-user-check text-success\" '\n 'title=\"Verified User\"></i>'\n )\n except ObjectDoesNotExist:\n # No verification request\n pass\n else:\n username = \"Unknown\"\n profile_url = \"#\"\n mugshot = mark_safe('<i class=\"fas fa-user fa-lg\"></i>')\n\n return format_html(\n '<span class=\"text-nowrap\"><a href=\"{0}\">{1}</a>&nbsp;<a href=\"{0}\">{2}</a>&nbsp;{3}</span>',\n profile_url,\n mugshot,\n username,\n verified,\n )\n\n\[email protected]\ndef user_profile_link_username(username: str) -> str:\n User = get_user_model() # noqa: N806\n return user_profile_link(User.objects.get(username=username))\n", "path": "app/grandchallenge/profiles/templatetags/profiles.py"}], "after_files": [{"content": "from typing import Union\n\nfrom django import template\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.auth.models import AbstractUser\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\n\nfrom grandchallenge.subdomains.utils import reverse\n\nregister = template.Library()\n\n\[email protected]\ndef user_profile_link(user: Union[AbstractUser, None]) -> str:\n verified = \"\"\n\n if user:\n username = user.username\n profile_url = reverse(\n \"userena_profile_detail\", kwargs={\"username\": user.username}\n )\n mugshot = format_html(\n (\n '<img class=\"mugshot\" loading=\"lazy\" src=\"{0}\" '\n 'alt=\"User Mugshot\" '\n # Match the \"fa-lg\" class style\n 'style=\"height: 1.33em; vertical-align: -25%;\"/>'\n ),\n user.user_profile.get_mugshot_url(),\n )\n\n try:\n if user.verification.is_verified:\n email = (\n user.verification.email\n if user.verification.email\n else user.email\n )\n verified = format_html(\n '<i class=\"fas fa-user-check text-success\" '\n 'title=\"Verified email address at {}\"></i>',\n email.split(\"@\")[1],\n )\n except ObjectDoesNotExist:\n # No verification request\n pass\n else:\n username = \"Unknown\"\n profile_url = \"#\"\n mugshot = mark_safe('<i class=\"fas fa-user fa-lg\"></i>')\n\n return format_html(\n '<span class=\"text-nowrap\"><a href=\"{0}\">{1}</a>&nbsp;<a href=\"{0}\">{2}</a>&nbsp;{3}</span>',\n profile_url,\n mugshot,\n username,\n verified,\n )\n\n\[email protected]\ndef user_profile_link_username(username: str) -> str:\n User = get_user_model() # noqa: N806\n return user_profile_link(User.objects.get(username=username))\n", "path": "app/grandchallenge/profiles/templatetags/profiles.py"}]}
889
194
gh_patches_debug_7871
rasdani/github-patches
git_diff
explosion__spaCy-1905
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dummy command argument wasn't removed from auto-link after download Auto-linking of downloaded languages was broken in https://github.com/explosion/spaCy/commit/7f0ab145e95036a55af4802184a4b1c496557d0a. The dummy argument wasn't removed from the `link` call at https://github.com/explosion/spaCy/blob/master/spacy/cli/download.py#L44. I can make a PR for the fix unless it's easier for a maintainer to just fix this quickly. ## Your Environment * Operating System: Docker python:3.6 * Python Version Used: 3.6.2 * spaCy Version Used: Changes made since 2.0.5 * Environment Information: Docker python:3.6 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `spacy/cli/download.py` Content: ``` 1 # coding: utf8 2 from __future__ import unicode_literals 3 4 import plac 5 import requests 6 import os 7 import subprocess 8 import sys 9 10 from .link import link 11 from ..util import prints, get_package_path 12 from .. import about 13 14 15 @plac.annotations( 16 model=("model to download, shortcut or name)", "positional", None, str), 17 direct=("force direct download. Needs model name with version and won't " 18 "perform compatibility check", "flag", "d", bool)) 19 def download(model, direct=False): 20 """ 21 Download compatible model from default download path using pip. Model 22 can be shortcut, model name or, if --direct flag is set, full model name 23 with version. 24 """ 25 if direct: 26 dl = download_model('{m}/{m}.tar.gz'.format(m=model)) 27 else: 28 shortcuts = get_json(about.__shortcuts__, "available shortcuts") 29 model_name = shortcuts.get(model, model) 30 compatibility = get_compatibility() 31 version = get_version(model_name, compatibility) 32 dl = download_model('{m}-{v}/{m}-{v}.tar.gz'.format(m=model_name, 33 v=version)) 34 if dl != 0: 35 # if download subprocess doesn't return 0, exit with the respective 36 # exit code before doing anything else 37 sys.exit(dl) 38 try: 39 # Get package path here because link uses 40 # pip.get_installed_distributions() to check if model is a 41 # package, which fails if model was just installed via 42 # subprocess 43 package_path = get_package_path(model_name) 44 link(None, model_name, model, force=True, 45 model_path=package_path) 46 except: 47 # Dirty, but since spacy.download and the auto-linking is 48 # mostly a convenience wrapper, it's best to show a success 49 # message and loading instructions, even if linking fails. 50 prints( 51 "Creating a shortcut link for 'en' didn't work (maybe " 52 "you don't have admin permissions?), but you can still " 53 "load the model via its full package name:", 54 "nlp = spacy.load('%s')" % model_name, 55 title="Download successful but linking failed") 56 57 58 def get_json(url, desc): 59 r = requests.get(url) 60 if r.status_code != 200: 61 msg = ("Couldn't fetch %s. Please find a model for your spaCy " 62 "installation (v%s), and download it manually.") 63 prints(msg % (desc, about.__version__), about.__docs_models__, 64 title="Server error (%d)" % r.status_code, exits=1) 65 return r.json() 66 67 68 def get_compatibility(): 69 version = about.__version__ 70 version = version.rsplit('.dev', 1)[0] 71 comp_table = get_json(about.__compatibility__, "compatibility table") 72 comp = comp_table['spacy'] 73 if version not in comp: 74 prints("No compatible models found for v%s of spaCy." % version, 75 title="Compatibility error", exits=1) 76 return comp[version] 77 78 79 def get_version(model, comp): 80 model = model.rsplit('.dev', 1)[0] 81 if model not in comp: 82 version = about.__version__ 83 msg = "No compatible model found for '%s' (spaCy v%s)." 84 prints(msg % (model, version), title="Compatibility error", exits=1) 85 return comp[model][0] 86 87 88 def download_model(filename): 89 download_url = about.__download_url__ + '/' + filename 90 return subprocess.call( 91 [sys.executable, '-m', 'pip', 'install', '--no-cache-dir', '--no-deps', 92 download_url], env=os.environ.copy()) 93 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/spacy/cli/download.py b/spacy/cli/download.py --- a/spacy/cli/download.py +++ b/spacy/cli/download.py @@ -41,7 +41,7 @@ # package, which fails if model was just installed via # subprocess package_path = get_package_path(model_name) - link(None, model_name, model, force=True, + link(model_name, model, force=True, model_path=package_path) except: # Dirty, but since spacy.download and the auto-linking is
{"golden_diff": "diff --git a/spacy/cli/download.py b/spacy/cli/download.py\n--- a/spacy/cli/download.py\n+++ b/spacy/cli/download.py\n@@ -41,7 +41,7 @@\n # package, which fails if model was just installed via\n # subprocess\n package_path = get_package_path(model_name)\n- link(None, model_name, model, force=True,\n+ link(model_name, model, force=True,\n model_path=package_path)\n except:\n # Dirty, but since spacy.download and the auto-linking is\n", "issue": "Dummy command argument wasn't removed from auto-link after download\nAuto-linking of downloaded languages was broken in https://github.com/explosion/spaCy/commit/7f0ab145e95036a55af4802184a4b1c496557d0a. The dummy argument wasn't removed from the `link` call at https://github.com/explosion/spaCy/blob/master/spacy/cli/download.py#L44. I can make a PR for the fix unless it's easier for a maintainer to just fix this quickly.\r\n\r\n## Your Environment\r\n* Operating System: Docker python:3.6\r\n* Python Version Used: 3.6.2\r\n* spaCy Version Used: Changes made since 2.0.5\r\n* Environment Information: Docker python:3.6\r\n\n", "before_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nimport plac\nimport requests\nimport os\nimport subprocess\nimport sys\n\nfrom .link import link\nfrom ..util import prints, get_package_path\nfrom .. import about\n\n\[email protected](\n model=(\"model to download, shortcut or name)\", \"positional\", None, str),\n direct=(\"force direct download. Needs model name with version and won't \"\n \"perform compatibility check\", \"flag\", \"d\", bool))\ndef download(model, direct=False):\n \"\"\"\n Download compatible model from default download path using pip. Model\n can be shortcut, model name or, if --direct flag is set, full model name\n with version.\n \"\"\"\n if direct:\n dl = download_model('{m}/{m}.tar.gz'.format(m=model))\n else:\n shortcuts = get_json(about.__shortcuts__, \"available shortcuts\")\n model_name = shortcuts.get(model, model)\n compatibility = get_compatibility()\n version = get_version(model_name, compatibility)\n dl = download_model('{m}-{v}/{m}-{v}.tar.gz'.format(m=model_name,\n v=version))\n if dl != 0:\n # if download subprocess doesn't return 0, exit with the respective\n # exit code before doing anything else\n sys.exit(dl)\n try:\n # Get package path here because link uses\n # pip.get_installed_distributions() to check if model is a\n # package, which fails if model was just installed via\n # subprocess\n package_path = get_package_path(model_name)\n link(None, model_name, model, force=True,\n model_path=package_path)\n except:\n # Dirty, but since spacy.download and the auto-linking is\n # mostly a convenience wrapper, it's best to show a success\n # message and loading instructions, even if linking fails.\n prints(\n \"Creating a shortcut link for 'en' didn't work (maybe \"\n \"you don't have admin permissions?), but you can still \"\n \"load the model via its full package name:\",\n \"nlp = spacy.load('%s')\" % model_name,\n title=\"Download successful but linking failed\")\n\n\ndef get_json(url, desc):\n r = requests.get(url)\n if r.status_code != 200:\n msg = (\"Couldn't fetch %s. Please find a model for your spaCy \"\n \"installation (v%s), and download it manually.\")\n prints(msg % (desc, about.__version__), about.__docs_models__,\n title=\"Server error (%d)\" % r.status_code, exits=1)\n return r.json()\n\n\ndef get_compatibility():\n version = about.__version__\n version = version.rsplit('.dev', 1)[0]\n comp_table = get_json(about.__compatibility__, \"compatibility table\")\n comp = comp_table['spacy']\n if version not in comp:\n prints(\"No compatible models found for v%s of spaCy.\" % version,\n title=\"Compatibility error\", exits=1)\n return comp[version]\n\n\ndef get_version(model, comp):\n model = model.rsplit('.dev', 1)[0]\n if model not in comp:\n version = about.__version__\n msg = \"No compatible model found for '%s' (spaCy v%s).\"\n prints(msg % (model, version), title=\"Compatibility error\", exits=1)\n return comp[model][0]\n\n\ndef download_model(filename):\n download_url = about.__download_url__ + '/' + filename\n return subprocess.call(\n [sys.executable, '-m', 'pip', 'install', '--no-cache-dir', '--no-deps',\n download_url], env=os.environ.copy())\n", "path": "spacy/cli/download.py"}], "after_files": [{"content": "# coding: utf8\nfrom __future__ import unicode_literals\n\nimport plac\nimport requests\nimport os\nimport subprocess\nimport sys\n\nfrom .link import link\nfrom ..util import prints, get_package_path\nfrom .. import about\n\n\[email protected](\n model=(\"model to download, shortcut or name)\", \"positional\", None, str),\n direct=(\"force direct download. Needs model name with version and won't \"\n \"perform compatibility check\", \"flag\", \"d\", bool))\ndef download(model, direct=False):\n \"\"\"\n Download compatible model from default download path using pip. Model\n can be shortcut, model name or, if --direct flag is set, full model name\n with version.\n \"\"\"\n if direct:\n dl = download_model('{m}/{m}.tar.gz'.format(m=model))\n else:\n shortcuts = get_json(about.__shortcuts__, \"available shortcuts\")\n model_name = shortcuts.get(model, model)\n compatibility = get_compatibility()\n version = get_version(model_name, compatibility)\n dl = download_model('{m}-{v}/{m}-{v}.tar.gz'.format(m=model_name,\n v=version))\n if dl != 0:\n # if download subprocess doesn't return 0, exit with the respective\n # exit code before doing anything else\n sys.exit(dl)\n try:\n # Get package path here because link uses\n # pip.get_installed_distributions() to check if model is a\n # package, which fails if model was just installed via\n # subprocess\n package_path = get_package_path(model_name)\n link(model_name, model, force=True,\n model_path=package_path)\n except:\n # Dirty, but since spacy.download and the auto-linking is\n # mostly a convenience wrapper, it's best to show a success\n # message and loading instructions, even if linking fails.\n prints(\n \"Creating a shortcut link for 'en' didn't work (maybe \"\n \"you don't have admin permissions?), but you can still \"\n \"load the model via its full package name:\",\n \"nlp = spacy.load('%s')\" % model_name,\n title=\"Download successful but linking failed\")\n\n\ndef get_json(url, desc):\n r = requests.get(url)\n if r.status_code != 200:\n msg = (\"Couldn't fetch %s. Please find a model for your spaCy \"\n \"installation (v%s), and download it manually.\")\n prints(msg % (desc, about.__version__), about.__docs_models__,\n title=\"Server error (%d)\" % r.status_code, exits=1)\n return r.json()\n\n\ndef get_compatibility():\n version = about.__version__\n version = version.rsplit('.dev', 1)[0]\n comp_table = get_json(about.__compatibility__, \"compatibility table\")\n comp = comp_table['spacy']\n if version not in comp:\n prints(\"No compatible models found for v%s of spaCy.\" % version,\n title=\"Compatibility error\", exits=1)\n return comp[version]\n\n\ndef get_version(model, comp):\n model = model.rsplit('.dev', 1)[0]\n if model not in comp:\n version = about.__version__\n msg = \"No compatible model found for '%s' (spaCy v%s).\"\n prints(msg % (model, version), title=\"Compatibility error\", exits=1)\n return comp[model][0]\n\n\ndef download_model(filename):\n download_url = about.__download_url__ + '/' + filename\n return subprocess.call(\n [sys.executable, '-m', 'pip', 'install', '--no-cache-dir', '--no-deps',\n download_url], env=os.environ.copy())\n", "path": "spacy/cli/download.py"}]}
1,424
121
gh_patches_debug_30536
rasdani/github-patches
git_diff
mdn__kuma-7776
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Search results presents the locale in the wrong case **Summary** E.g. https://developer.mozilla.org/en-US/search?q=mdn+contribute+ See screenshot: <img width="932" alt="Screen Shot 2021-02-04 at 10 59 44 AM" src="https://user-images.githubusercontent.com/26739/106919753-6cd80e80-66d8-11eb-97a1-d409dfc2e36b.png"> **Additional context** The Yari site-search is coming. But this might be easy to fix. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kuma/search/views.py` Content: ``` 1 from urllib.parse import parse_qs, urlencode 2 3 from django.conf import settings 4 from django.shortcuts import render 5 from django.urls import reverse_lazy 6 from django.views.decorators.cache import never_cache 7 from django.views.decorators.http import require_GET 8 from django.views.generic import RedirectView 9 from ratelimit.decorators import ratelimit 10 11 from kuma.api.v1.search import search as search_api 12 from kuma.core.decorators import shared_cache_control 13 14 15 # Since the search endpoint accepts user input (via query parameters) and its 16 # response is compressed, use rate limiting to mitigate the BREACH attack 17 # (see http://breachattack.com/). It still needs to allow a user to click 18 # the filter switches (bug 1426968). 19 # Alternate: forbid gzip by setting Content-Encoding: identity 20 @never_cache 21 @require_GET 22 @ratelimit(key="user_or_ip", rate="25/m", block=True) 23 def search(request, *args, **kwargs): 24 """ 25 The search view. 26 27 --2021-- THIS VIEW IS A HACK! --2021-- 28 This Django view exists to server-side render the search results page. 29 But we're moving the search result page to Yari and that one will use a XHR 30 request (to /api/v1/search) from a skeleton page (aka. SPA). 31 But as a way to get to that, we need to transition from the old to the new. 32 So, this page uses the Django view in kuma.api.v1.search.search, which 33 returns a special `JsonResponse` instance whose data we can pluck out 34 to our needs for this old view. 35 Once we've fully moved to the Yari (static + XHR to v1 API) site-search, 36 we can comfortably delete this view. 37 """ 38 # The underlying v1 API supports searching without a 'q' but the web 39 # UI doesn't. For example, the search input field requires a value. 40 # So we match that here too. 41 if not request.GET.get("q", "").strip(): 42 status = 400 43 context = {"results": {}} 44 else: 45 # TODO consider, if the current locale is *not* en-US, that we force 46 # it to do a search in both locales. 47 # This might come in handy for people searching in a locale where 48 # there's very little results but they'd be happy to get the en-US ones. 49 response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs) 50 results = response.data 51 52 error = None 53 status = response.status_code 54 55 # Determine if there were validation errors 56 if status == 400: 57 error = "" 58 for key, messages in results["errors"].items(): 59 for message in messages: 60 error += f"{key}: {message['message']}\n" 61 else: 62 # Have to rearrange the 'results' in a way the old search expects it. 63 # ...which is as follows: 64 # - `count`: integer number of matched documents 65 # - `previous`: a URL or empty string 66 # - `next`: a URL or empty string 67 # - `query`: string 68 # - `start`: pagination number 69 # - `end`: pagination number 70 # - `documents`: 71 # - `title` 72 # - `locale` 73 # - `slug` 74 # - `excerpt`: string of safe HTML 75 next_url = "" 76 previous_url = "" 77 page = results["metadata"]["page"] 78 size = results["metadata"]["size"] 79 count = results["metadata"]["total"]["value"] 80 query_string = request.META.get("QUERY_STRING") 81 query_string_parsed = parse_qs(query_string) 82 if (page + 1) * size < count: 83 query_string_parsed["page"] = f"{page + 1}" 84 next_url = f"?{urlencode(query_string_parsed, True)}" 85 if page > 1: 86 if page == 2: 87 del query_string_parsed["page"] 88 else: 89 query_string_parsed["page"] = f"{page - 1}" 90 previous_url = f"?{urlencode(query_string_parsed, True)}" 91 92 results = { 93 "count": count, 94 "next": next_url, 95 "previous": previous_url, 96 "query": request.GET.get("q"), 97 "start": (page - 1) * size + 1, 98 "end": page * size, 99 "documents": [ 100 { 101 "title": x["title"], 102 "slug": x["slug"], 103 "locale": x["locale"], 104 "summary": x["summary"], 105 "excerpt": "<br>".join(x["highlight"].get("body", [])), 106 } 107 for x in results["documents"] 108 ], 109 } 110 111 context = {"results": {"results": None if error else results, "error": error}} 112 return render(request, "search/react.html", context, status=status) 113 114 115 class SearchRedirectView(RedirectView): 116 permanent = True 117 118 def get_redirect_url(self, *args, **kwargs): 119 query_string = self.request.META.get("QUERY_STRING") 120 url = reverse_lazy("api.v1.search") 121 qs = parse_qs(query_string) 122 # If you used `/en-Us/search.json` you can skip the `?locale=` 123 # because the default locale in `/api/v1/search` is `en-US`. 124 if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower(): 125 qs["locale"] = self.request.LANGUAGE_CODE 126 if qs: 127 url += "?" + urlencode(qs, True) 128 return url 129 130 131 @shared_cache_control(s_maxage=60 * 60 * 24 * 7) 132 def plugin(request): 133 """Render an OpenSearch Plugin.""" 134 return render( 135 request, 136 "search/plugin.html", 137 {"locale": request.LANGUAGE_CODE}, 138 content_type="application/opensearchdescription+xml", 139 ) 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kuma/search/views.py b/kuma/search/views.py --- a/kuma/search/views.py +++ b/kuma/search/views.py @@ -89,6 +89,21 @@ query_string_parsed["page"] = f"{page - 1}" previous_url = f"?{urlencode(query_string_parsed, True)}" + def package_document(document): + # The `results['documents']` will have the `locale` in lowercase. + # That's good for searching but now what we want to display. + # Here in Kuma we can't use the `mdn_url` so to get that right + # we have to manually correct that. + locale, slug = document["mdn_url"][1:].split("/docs/") + data = { + "title": document["title"], + "slug": slug, + "locale": locale, + "summary": document["summary"], + "excerpt": "<br>".join(document["highlight"].get("body", [])), + } + return data + results = { "count": count, "next": next_url, @@ -96,16 +111,7 @@ "query": request.GET.get("q"), "start": (page - 1) * size + 1, "end": page * size, - "documents": [ - { - "title": x["title"], - "slug": x["slug"], - "locale": x["locale"], - "summary": x["summary"], - "excerpt": "<br>".join(x["highlight"].get("body", [])), - } - for x in results["documents"] - ], + "documents": [package_document(x) for x in results["documents"]], } context = {"results": {"results": None if error else results, "error": error}}
{"golden_diff": "diff --git a/kuma/search/views.py b/kuma/search/views.py\n--- a/kuma/search/views.py\n+++ b/kuma/search/views.py\n@@ -89,6 +89,21 @@\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n \n+ def package_document(document):\n+ # The `results['documents']` will have the `locale` in lowercase.\n+ # That's good for searching but now what we want to display.\n+ # Here in Kuma we can't use the `mdn_url` so to get that right\n+ # we have to manually correct that.\n+ locale, slug = document[\"mdn_url\"][1:].split(\"/docs/\")\n+ data = {\n+ \"title\": document[\"title\"],\n+ \"slug\": slug,\n+ \"locale\": locale,\n+ \"summary\": document[\"summary\"],\n+ \"excerpt\": \"<br>\".join(document[\"highlight\"].get(\"body\", [])),\n+ }\n+ return data\n+\n results = {\n \"count\": count,\n \"next\": next_url,\n@@ -96,16 +111,7 @@\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n- \"documents\": [\n- {\n- \"title\": x[\"title\"],\n- \"slug\": x[\"slug\"],\n- \"locale\": x[\"locale\"],\n- \"summary\": x[\"summary\"],\n- \"excerpt\": \"<br>\".join(x[\"highlight\"].get(\"body\", [])),\n- }\n- for x in results[\"documents\"]\n- ],\n+ \"documents\": [package_document(x) for x in results[\"documents\"]],\n }\n \n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n", "issue": "Search results presents the locale in the wrong case\n**Summary**\r\nE.g. https://developer.mozilla.org/en-US/search?q=mdn+contribute+\r\nSee screenshot:\r\n<img width=\"932\" alt=\"Screen Shot 2021-02-04 at 10 59 44 AM\" src=\"https://user-images.githubusercontent.com/26739/106919753-6cd80e80-66d8-11eb-97a1-d409dfc2e36b.png\">\r\n\r\n\r\n**Additional context**\r\n\r\nThe Yari site-search is coming. But this might be easy to fix. \r\n\n", "before_files": [{"content": "from urllib.parse import parse_qs, urlencode\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.search import search as search_api\nfrom kuma.core.decorators import shared_cache_control\n\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n\n --2021-- THIS VIEW IS A HACK! --2021--\n This Django view exists to server-side render the search results page.\n But we're moving the search result page to Yari and that one will use a XHR\n request (to /api/v1/search) from a skeleton page (aka. SPA).\n But as a way to get to that, we need to transition from the old to the new.\n So, this page uses the Django view in kuma.api.v1.search.search, which\n returns a special `JsonResponse` instance whose data we can pluck out\n to our needs for this old view.\n Once we've fully moved to the Yari (static + XHR to v1 API) site-search,\n we can comfortably delete this view.\n \"\"\"\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n # TODO consider, if the current locale is *not* en-US, that we force\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)\n results = response.data\n\n error = None\n status = response.status_code\n\n # Determine if there were validation errors\n if status == 400:\n error = \"\"\n for key, messages in results[\"errors\"].items():\n for message in messages:\n error += f\"{key}: {message['message']}\\n\"\n else:\n # Have to rearrange the 'results' in a way the old search expects it.\n # ...which is as follows:\n # - `count`: integer number of matched documents\n # - `previous`: a URL or empty string\n # - `next`: a URL or empty string\n # - `query`: string\n # - `start`: pagination number\n # - `end`: pagination number\n # - `documents`:\n # - `title`\n # - `locale`\n # - `slug`\n # - `excerpt`: string of safe HTML\n next_url = \"\"\n previous_url = \"\"\n page = results[\"metadata\"][\"page\"]\n size = results[\"metadata\"][\"size\"]\n count = results[\"metadata\"][\"total\"][\"value\"]\n query_string = request.META.get(\"QUERY_STRING\")\n query_string_parsed = parse_qs(query_string)\n if (page + 1) * size < count:\n query_string_parsed[\"page\"] = f\"{page + 1}\"\n next_url = f\"?{urlencode(query_string_parsed, True)}\"\n if page > 1:\n if page == 2:\n del query_string_parsed[\"page\"]\n else:\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n\n results = {\n \"count\": count,\n \"next\": next_url,\n \"previous\": previous_url,\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n \"documents\": [\n {\n \"title\": x[\"title\"],\n \"slug\": x[\"slug\"],\n \"locale\": x[\"locale\"],\n \"summary\": x[\"summary\"],\n \"excerpt\": \"<br>\".join(x[\"highlight\"].get(\"body\", [])),\n }\n for x in results[\"documents\"]\n ],\n }\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\"api.v1.search\")\n qs = parse_qs(query_string)\n # If you used `/en-Us/search.json` you can skip the `?locale=`\n # because the default locale in `/api/v1/search` is `en-US`.\n if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():\n qs[\"locale\"] = self.request.LANGUAGE_CODE\n if qs:\n url += \"?\" + urlencode(qs, True)\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}], "after_files": [{"content": "from urllib.parse import parse_qs, urlencode\n\nfrom django.conf import settings\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.decorators.cache import never_cache\nfrom django.views.decorators.http import require_GET\nfrom django.views.generic import RedirectView\nfrom ratelimit.decorators import ratelimit\n\nfrom kuma.api.v1.search import search as search_api\nfrom kuma.core.decorators import shared_cache_control\n\n\n# Since the search endpoint accepts user input (via query parameters) and its\n# response is compressed, use rate limiting to mitigate the BREACH attack\n# (see http://breachattack.com/). It still needs to allow a user to click\n# the filter switches (bug 1426968).\n# Alternate: forbid gzip by setting Content-Encoding: identity\n@never_cache\n@require_GET\n@ratelimit(key=\"user_or_ip\", rate=\"25/m\", block=True)\ndef search(request, *args, **kwargs):\n \"\"\"\n The search view.\n\n --2021-- THIS VIEW IS A HACK! --2021--\n This Django view exists to server-side render the search results page.\n But we're moving the search result page to Yari and that one will use a XHR\n request (to /api/v1/search) from a skeleton page (aka. SPA).\n But as a way to get to that, we need to transition from the old to the new.\n So, this page uses the Django view in kuma.api.v1.search.search, which\n returns a special `JsonResponse` instance whose data we can pluck out\n to our needs for this old view.\n Once we've fully moved to the Yari (static + XHR to v1 API) site-search,\n we can comfortably delete this view.\n \"\"\"\n # The underlying v1 API supports searching without a 'q' but the web\n # UI doesn't. For example, the search input field requires a value.\n # So we match that here too.\n if not request.GET.get(\"q\", \"\").strip():\n status = 400\n context = {\"results\": {}}\n else:\n # TODO consider, if the current locale is *not* en-US, that we force\n # it to do a search in both locales.\n # This might come in handy for people searching in a locale where\n # there's very little results but they'd be happy to get the en-US ones.\n response = search_api(request, locale=request.LANGUAGE_CODE, *args, **kwargs)\n results = response.data\n\n error = None\n status = response.status_code\n\n # Determine if there were validation errors\n if status == 400:\n error = \"\"\n for key, messages in results[\"errors\"].items():\n for message in messages:\n error += f\"{key}: {message['message']}\\n\"\n else:\n # Have to rearrange the 'results' in a way the old search expects it.\n # ...which is as follows:\n # - `count`: integer number of matched documents\n # - `previous`: a URL or empty string\n # - `next`: a URL or empty string\n # - `query`: string\n # - `start`: pagination number\n # - `end`: pagination number\n # - `documents`:\n # - `title`\n # - `locale`\n # - `slug`\n # - `excerpt`: string of safe HTML\n next_url = \"\"\n previous_url = \"\"\n page = results[\"metadata\"][\"page\"]\n size = results[\"metadata\"][\"size\"]\n count = results[\"metadata\"][\"total\"][\"value\"]\n query_string = request.META.get(\"QUERY_STRING\")\n query_string_parsed = parse_qs(query_string)\n if (page + 1) * size < count:\n query_string_parsed[\"page\"] = f\"{page + 1}\"\n next_url = f\"?{urlencode(query_string_parsed, True)}\"\n if page > 1:\n if page == 2:\n del query_string_parsed[\"page\"]\n else:\n query_string_parsed[\"page\"] = f\"{page - 1}\"\n previous_url = f\"?{urlencode(query_string_parsed, True)}\"\n\n def package_document(document):\n # The `results['documents']` will have the `locale` in lowercase.\n # That's good for searching but now what we want to display.\n # Here in Kuma we can't use the `mdn_url` so to get that right\n # we have to manually correct that.\n locale, slug = document[\"mdn_url\"][1:].split(\"/docs/\")\n data = {\n \"title\": document[\"title\"],\n \"slug\": slug,\n \"locale\": locale,\n \"summary\": document[\"summary\"],\n \"excerpt\": \"<br>\".join(document[\"highlight\"].get(\"body\", [])),\n }\n return data\n\n results = {\n \"count\": count,\n \"next\": next_url,\n \"previous\": previous_url,\n \"query\": request.GET.get(\"q\"),\n \"start\": (page - 1) * size + 1,\n \"end\": page * size,\n \"documents\": [package_document(x) for x in results[\"documents\"]],\n }\n\n context = {\"results\": {\"results\": None if error else results, \"error\": error}}\n return render(request, \"search/react.html\", context, status=status)\n\n\nclass SearchRedirectView(RedirectView):\n permanent = True\n\n def get_redirect_url(self, *args, **kwargs):\n query_string = self.request.META.get(\"QUERY_STRING\")\n url = reverse_lazy(\"api.v1.search\")\n qs = parse_qs(query_string)\n # If you used `/en-Us/search.json` you can skip the `?locale=`\n # because the default locale in `/api/v1/search` is `en-US`.\n if self.request.LANGUAGE_CODE.lower() != settings.LANGUAGE_CODE.lower():\n qs[\"locale\"] = self.request.LANGUAGE_CODE\n if qs:\n url += \"?\" + urlencode(qs, True)\n return url\n\n\n@shared_cache_control(s_maxage=60 * 60 * 24 * 7)\ndef plugin(request):\n \"\"\"Render an OpenSearch Plugin.\"\"\"\n return render(\n request,\n \"search/plugin.html\",\n {\"locale\": request.LANGUAGE_CODE},\n content_type=\"application/opensearchdescription+xml\",\n )\n", "path": "kuma/search/views.py"}]}
2,028
417
gh_patches_debug_13436
rasdani/github-patches
git_diff
pyca__cryptography-3584
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Deprecate Whirlpool and RIPEMD --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cryptography/hazmat/primitives/hashes.py` Content: ``` 1 # This file is dual licensed under the terms of the Apache License, Version 2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository 3 # for complete details. 4 5 from __future__ import absolute_import, division, print_function 6 7 import abc 8 9 import six 10 11 from cryptography import utils 12 from cryptography.exceptions import ( 13 AlreadyFinalized, UnsupportedAlgorithm, _Reasons 14 ) 15 from cryptography.hazmat.backends.interfaces import HashBackend 16 17 18 @six.add_metaclass(abc.ABCMeta) 19 class HashAlgorithm(object): 20 @abc.abstractproperty 21 def name(self): 22 """ 23 A string naming this algorithm (e.g. "sha256", "md5"). 24 """ 25 26 @abc.abstractproperty 27 def digest_size(self): 28 """ 29 The size of the resulting digest in bytes. 30 """ 31 32 @abc.abstractproperty 33 def block_size(self): 34 """ 35 The internal block size of the hash algorithm in bytes. 36 """ 37 38 39 @six.add_metaclass(abc.ABCMeta) 40 class HashContext(object): 41 @abc.abstractproperty 42 def algorithm(self): 43 """ 44 A HashAlgorithm that will be used by this context. 45 """ 46 47 @abc.abstractmethod 48 def update(self, data): 49 """ 50 Processes the provided bytes through the hash. 51 """ 52 53 @abc.abstractmethod 54 def finalize(self): 55 """ 56 Finalizes the hash context and returns the hash digest as bytes. 57 """ 58 59 @abc.abstractmethod 60 def copy(self): 61 """ 62 Return a HashContext that is a copy of the current context. 63 """ 64 65 66 @utils.register_interface(HashContext) 67 class Hash(object): 68 def __init__(self, algorithm, backend, ctx=None): 69 if not isinstance(backend, HashBackend): 70 raise UnsupportedAlgorithm( 71 "Backend object does not implement HashBackend.", 72 _Reasons.BACKEND_MISSING_INTERFACE 73 ) 74 75 if not isinstance(algorithm, HashAlgorithm): 76 raise TypeError("Expected instance of hashes.HashAlgorithm.") 77 self._algorithm = algorithm 78 79 self._backend = backend 80 81 if ctx is None: 82 self._ctx = self._backend.create_hash_ctx(self.algorithm) 83 else: 84 self._ctx = ctx 85 86 algorithm = utils.read_only_property("_algorithm") 87 88 def update(self, data): 89 if self._ctx is None: 90 raise AlreadyFinalized("Context was already finalized.") 91 if not isinstance(data, bytes): 92 raise TypeError("data must be bytes.") 93 self._ctx.update(data) 94 95 def copy(self): 96 if self._ctx is None: 97 raise AlreadyFinalized("Context was already finalized.") 98 return Hash( 99 self.algorithm, backend=self._backend, ctx=self._ctx.copy() 100 ) 101 102 def finalize(self): 103 if self._ctx is None: 104 raise AlreadyFinalized("Context was already finalized.") 105 digest = self._ctx.finalize() 106 self._ctx = None 107 return digest 108 109 110 @utils.register_interface(HashAlgorithm) 111 class SHA1(object): 112 name = "sha1" 113 digest_size = 20 114 block_size = 64 115 116 117 @utils.register_interface(HashAlgorithm) 118 class SHA224(object): 119 name = "sha224" 120 digest_size = 28 121 block_size = 64 122 123 124 @utils.register_interface(HashAlgorithm) 125 class SHA256(object): 126 name = "sha256" 127 digest_size = 32 128 block_size = 64 129 130 131 @utils.register_interface(HashAlgorithm) 132 class SHA384(object): 133 name = "sha384" 134 digest_size = 48 135 block_size = 128 136 137 138 @utils.register_interface(HashAlgorithm) 139 class SHA512(object): 140 name = "sha512" 141 digest_size = 64 142 block_size = 128 143 144 145 @utils.register_interface(HashAlgorithm) 146 class RIPEMD160(object): 147 name = "ripemd160" 148 digest_size = 20 149 block_size = 64 150 151 152 @utils.register_interface(HashAlgorithm) 153 class Whirlpool(object): 154 name = "whirlpool" 155 digest_size = 64 156 block_size = 64 157 158 159 @utils.register_interface(HashAlgorithm) 160 class MD5(object): 161 name = "md5" 162 digest_size = 16 163 block_size = 64 164 165 166 @utils.register_interface(HashAlgorithm) 167 class BLAKE2b(object): 168 name = "blake2b" 169 _max_digest_size = 64 170 _min_digest_size = 1 171 block_size = 128 172 173 def __init__(self, digest_size): 174 if ( 175 digest_size > self._max_digest_size or 176 digest_size < self._min_digest_size 177 ): 178 raise ValueError("Digest size must be {0}-{1}".format( 179 self._min_digest_size, self._max_digest_size) 180 ) 181 182 self._digest_size = digest_size 183 184 digest_size = utils.read_only_property("_digest_size") 185 186 187 @utils.register_interface(HashAlgorithm) 188 class BLAKE2s(object): 189 name = "blake2s" 190 block_size = 64 191 _max_digest_size = 32 192 _min_digest_size = 1 193 194 def __init__(self, digest_size): 195 if ( 196 digest_size > self._max_digest_size or 197 digest_size < self._min_digest_size 198 ): 199 raise ValueError("Digest size must be {0}-{1}".format( 200 self._min_digest_size, self._max_digest_size) 201 ) 202 203 self._digest_size = digest_size 204 205 digest_size = utils.read_only_property("_digest_size") 206 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cryptography/hazmat/primitives/hashes.py b/src/cryptography/hazmat/primitives/hashes.py --- a/src/cryptography/hazmat/primitives/hashes.py +++ b/src/cryptography/hazmat/primitives/hashes.py @@ -149,6 +149,14 @@ block_size = 64 +RIPEMD160 = utils.deprecated( + RIPEMD160, + __name__, + "The RIPEMD160 hash was deprecated in version 1.9.", + utils.DeprecatedIn19 +) + + @utils.register_interface(HashAlgorithm) class Whirlpool(object): name = "whirlpool" @@ -156,6 +164,14 @@ block_size = 64 +Whirlpool = utils.deprecated( + Whirlpool, + __name__, + "The Whirlpool hash was deprecated in version 1.9.", + utils.DeprecatedIn19 +) + + @utils.register_interface(HashAlgorithm) class MD5(object): name = "md5"
{"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/hashes.py b/src/cryptography/hazmat/primitives/hashes.py\n--- a/src/cryptography/hazmat/primitives/hashes.py\n+++ b/src/cryptography/hazmat/primitives/hashes.py\n@@ -149,6 +149,14 @@\n block_size = 64\n \n \n+RIPEMD160 = utils.deprecated(\n+ RIPEMD160,\n+ __name__,\n+ \"The RIPEMD160 hash was deprecated in version 1.9.\",\n+ utils.DeprecatedIn19\n+)\n+\n+\n @utils.register_interface(HashAlgorithm)\n class Whirlpool(object):\n name = \"whirlpool\"\n@@ -156,6 +164,14 @@\n block_size = 64\n \n \n+Whirlpool = utils.deprecated(\n+ Whirlpool,\n+ __name__,\n+ \"The Whirlpool hash was deprecated in version 1.9.\",\n+ utils.DeprecatedIn19\n+)\n+\n+\n @utils.register_interface(HashAlgorithm)\n class MD5(object):\n name = \"md5\"\n", "issue": "Deprecate Whirlpool and RIPEMD\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HashBackend\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HashAlgorithm(object):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this algorithm (e.g. \"sha256\", \"md5\").\n \"\"\"\n\n @abc.abstractproperty\n def digest_size(self):\n \"\"\"\n The size of the resulting digest in bytes.\n \"\"\"\n\n @abc.abstractproperty\n def block_size(self):\n \"\"\"\n The internal block size of the hash algorithm in bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HashContext(object):\n @abc.abstractproperty\n def algorithm(self):\n \"\"\"\n A HashAlgorithm that will be used by this context.\n \"\"\"\n\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Processes the provided bytes through the hash.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Finalizes the hash context and returns the hash digest as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def copy(self):\n \"\"\"\n Return a HashContext that is a copy of the current context.\n \"\"\"\n\n\[email protected]_interface(HashContext)\nclass Hash(object):\n def __init__(self, algorithm, backend, ctx=None):\n if not isinstance(backend, HashBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HashBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n if not isinstance(algorithm, HashAlgorithm):\n raise TypeError(\"Expected instance of hashes.HashAlgorithm.\")\n self._algorithm = algorithm\n\n self._backend = backend\n\n if ctx is None:\n self._ctx = self._backend.create_hash_ctx(self.algorithm)\n else:\n self._ctx = ctx\n\n algorithm = utils.read_only_property(\"_algorithm\")\n\n def update(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes.\")\n self._ctx.update(data)\n\n def copy(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n return Hash(\n self.algorithm, backend=self._backend, ctx=self._ctx.copy()\n )\n\n def finalize(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n digest = self._ctx.finalize()\n self._ctx = None\n return digest\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA1(object):\n name = \"sha1\"\n digest_size = 20\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA224(object):\n name = \"sha224\"\n digest_size = 28\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA256(object):\n name = \"sha256\"\n digest_size = 32\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA384(object):\n name = \"sha384\"\n digest_size = 48\n block_size = 128\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA512(object):\n name = \"sha512\"\n digest_size = 64\n block_size = 128\n\n\[email protected]_interface(HashAlgorithm)\nclass RIPEMD160(object):\n name = \"ripemd160\"\n digest_size = 20\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass Whirlpool(object):\n name = \"whirlpool\"\n digest_size = 64\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass MD5(object):\n name = \"md5\"\n digest_size = 16\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass BLAKE2b(object):\n name = \"blake2b\"\n _max_digest_size = 64\n _min_digest_size = 1\n block_size = 128\n\n def __init__(self, digest_size):\n if (\n digest_size > self._max_digest_size or\n digest_size < self._min_digest_size\n ):\n raise ValueError(\"Digest size must be {0}-{1}\".format(\n self._min_digest_size, self._max_digest_size)\n )\n\n self._digest_size = digest_size\n\n digest_size = utils.read_only_property(\"_digest_size\")\n\n\[email protected]_interface(HashAlgorithm)\nclass BLAKE2s(object):\n name = \"blake2s\"\n block_size = 64\n _max_digest_size = 32\n _min_digest_size = 1\n\n def __init__(self, digest_size):\n if (\n digest_size > self._max_digest_size or\n digest_size < self._min_digest_size\n ):\n raise ValueError(\"Digest size must be {0}-{1}\".format(\n self._min_digest_size, self._max_digest_size)\n )\n\n self._digest_size = digest_size\n\n digest_size = utils.read_only_property(\"_digest_size\")\n", "path": "src/cryptography/hazmat/primitives/hashes.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\n\nimport six\n\nfrom cryptography import utils\nfrom cryptography.exceptions import (\n AlreadyFinalized, UnsupportedAlgorithm, _Reasons\n)\nfrom cryptography.hazmat.backends.interfaces import HashBackend\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HashAlgorithm(object):\n @abc.abstractproperty\n def name(self):\n \"\"\"\n A string naming this algorithm (e.g. \"sha256\", \"md5\").\n \"\"\"\n\n @abc.abstractproperty\n def digest_size(self):\n \"\"\"\n The size of the resulting digest in bytes.\n \"\"\"\n\n @abc.abstractproperty\n def block_size(self):\n \"\"\"\n The internal block size of the hash algorithm in bytes.\n \"\"\"\n\n\[email protected]_metaclass(abc.ABCMeta)\nclass HashContext(object):\n @abc.abstractproperty\n def algorithm(self):\n \"\"\"\n A HashAlgorithm that will be used by this context.\n \"\"\"\n\n @abc.abstractmethod\n def update(self, data):\n \"\"\"\n Processes the provided bytes through the hash.\n \"\"\"\n\n @abc.abstractmethod\n def finalize(self):\n \"\"\"\n Finalizes the hash context and returns the hash digest as bytes.\n \"\"\"\n\n @abc.abstractmethod\n def copy(self):\n \"\"\"\n Return a HashContext that is a copy of the current context.\n \"\"\"\n\n\[email protected]_interface(HashContext)\nclass Hash(object):\n def __init__(self, algorithm, backend, ctx=None):\n if not isinstance(backend, HashBackend):\n raise UnsupportedAlgorithm(\n \"Backend object does not implement HashBackend.\",\n _Reasons.BACKEND_MISSING_INTERFACE\n )\n\n if not isinstance(algorithm, HashAlgorithm):\n raise TypeError(\"Expected instance of hashes.HashAlgorithm.\")\n self._algorithm = algorithm\n\n self._backend = backend\n\n if ctx is None:\n self._ctx = self._backend.create_hash_ctx(self.algorithm)\n else:\n self._ctx = ctx\n\n algorithm = utils.read_only_property(\"_algorithm\")\n\n def update(self, data):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n if not isinstance(data, bytes):\n raise TypeError(\"data must be bytes.\")\n self._ctx.update(data)\n\n def copy(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n return Hash(\n self.algorithm, backend=self._backend, ctx=self._ctx.copy()\n )\n\n def finalize(self):\n if self._ctx is None:\n raise AlreadyFinalized(\"Context was already finalized.\")\n digest = self._ctx.finalize()\n self._ctx = None\n return digest\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA1(object):\n name = \"sha1\"\n digest_size = 20\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA224(object):\n name = \"sha224\"\n digest_size = 28\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA256(object):\n name = \"sha256\"\n digest_size = 32\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA384(object):\n name = \"sha384\"\n digest_size = 48\n block_size = 128\n\n\[email protected]_interface(HashAlgorithm)\nclass SHA512(object):\n name = \"sha512\"\n digest_size = 64\n block_size = 128\n\n\[email protected]_interface(HashAlgorithm)\nclass RIPEMD160(object):\n name = \"ripemd160\"\n digest_size = 20\n block_size = 64\n\n\nRIPEMD160 = utils.deprecated(\n RIPEMD160,\n __name__,\n \"The RIPEMD160 hash was deprecated in version 1.9.\",\n utils.DeprecatedIn19\n)\n\n\[email protected]_interface(HashAlgorithm)\nclass Whirlpool(object):\n name = \"whirlpool\"\n digest_size = 64\n block_size = 64\n\n\nWhirlpool = utils.deprecated(\n Whirlpool,\n __name__,\n \"The Whirlpool hash was deprecated in version 1.9.\",\n utils.DeprecatedIn19\n)\n\n\[email protected]_interface(HashAlgorithm)\nclass MD5(object):\n name = \"md5\"\n digest_size = 16\n block_size = 64\n\n\[email protected]_interface(HashAlgorithm)\nclass BLAKE2b(object):\n name = \"blake2b\"\n _max_digest_size = 64\n _min_digest_size = 1\n block_size = 128\n\n def __init__(self, digest_size):\n if (\n digest_size > self._max_digest_size or\n digest_size < self._min_digest_size\n ):\n raise ValueError(\"Digest size must be {0}-{1}\".format(\n self._min_digest_size, self._max_digest_size)\n )\n\n self._digest_size = digest_size\n\n digest_size = utils.read_only_property(\"_digest_size\")\n\n\[email protected]_interface(HashAlgorithm)\nclass BLAKE2s(object):\n name = \"blake2s\"\n block_size = 64\n _max_digest_size = 32\n _min_digest_size = 1\n\n def __init__(self, digest_size):\n if (\n digest_size > self._max_digest_size or\n digest_size < self._min_digest_size\n ):\n raise ValueError(\"Digest size must be {0}-{1}\".format(\n self._min_digest_size, self._max_digest_size)\n )\n\n self._digest_size = digest_size\n\n digest_size = utils.read_only_property(\"_digest_size\")\n", "path": "src/cryptography/hazmat/primitives/hashes.py"}]}
2,030
255
gh_patches_debug_7920
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-2888
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Verbose mode not working in v3.0.0 RC2 ##### Steps to reproduce the problem: 1. Start mitmdump with -v or --verbose flag 2. No DEBUG level logs prints on standard output ##### Any other comments? What have you tried so far? In old stable version (2.0.2) the same steps produce desired output. ##### System information Mitmproxy: 3.0.0.dev1136 (commit 15f525e) Python: 3.6.3 OpenSSL: OpenSSL 1.1.0g 2 Nov 2017 Platform: Linux-3.16.0-5-amd64-x86_64-with-debian-8.9 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/tools/main.py` Content: ``` 1 from __future__ import print_function # this is here for the version check to work on Python 2. 2 3 import sys 4 5 if sys.version_info < (3, 5): 6 # This must be before any mitmproxy imports, as they already break! 7 # Keep all other imports below with the 'noqa' magic comment. 8 print("#" * 49, file=sys.stderr) 9 print("# mitmproxy only supports Python 3.5 and above! #", file=sys.stderr) 10 print("#" * 49, file=sys.stderr) 11 12 import argparse # noqa 13 import os # noqa 14 import signal # noqa 15 import typing # noqa 16 17 from mitmproxy.tools import cmdline # noqa 18 from mitmproxy import exceptions, master # noqa 19 from mitmproxy import options # noqa 20 from mitmproxy import optmanager # noqa 21 from mitmproxy import proxy # noqa 22 from mitmproxy import log # noqa 23 from mitmproxy.utils import debug, arg_check # noqa 24 25 26 def assert_utf8_env(): 27 spec = "" 28 for i in ["LANG", "LC_CTYPE", "LC_ALL"]: 29 spec += os.environ.get(i, "").lower() 30 if "utf" not in spec: 31 print( 32 "Error: mitmproxy requires a UTF console environment.", 33 file=sys.stderr 34 ) 35 print( 36 "Set your LANG environment variable to something like en_US.UTF-8", 37 file=sys.stderr 38 ) 39 sys.exit(1) 40 41 42 def process_options(parser, opts, args): 43 if args.version: 44 print(debug.dump_system_info()) 45 sys.exit(0) 46 if args.quiet or args.options or args.commands: 47 args.verbosity = 'error' 48 args.flow_detail = 0 49 50 adict = {} 51 for n in dir(args): 52 if n in opts: 53 adict[n] = getattr(args, n) 54 opts.merge(adict) 55 56 return proxy.config.ProxyConfig(opts) 57 58 59 def run( 60 master_cls: typing.Type[master.Master], 61 make_parser: typing.Callable[[options.Options], argparse.ArgumentParser], 62 arguments: typing.Sequence[str], 63 extra: typing.Callable[[typing.Any], dict] = None 64 ): # pragma: no cover 65 """ 66 extra: Extra argument processing callable which returns a dict of 67 options. 68 """ 69 debug.register_info_dumpers() 70 71 opts = options.Options() 72 master = master_cls(opts) 73 74 parser = make_parser(opts) 75 76 # To make migration from 2.x to 3.0 bearable. 77 if "-R" in sys.argv and sys.argv[sys.argv.index("-R") + 1].startswith("http"): 78 print("-R is used for specifying replacements.\n" 79 "To use mitmproxy in reverse mode please use --mode reverse:SPEC instead") 80 81 try: 82 args = parser.parse_args(arguments) 83 except SystemExit: 84 arg_check.check() 85 sys.exit(1) 86 try: 87 unknown = optmanager.load_paths(opts, args.conf) 88 pconf = process_options(parser, opts, args) 89 server = None # type: typing.Any 90 if pconf.options.server: 91 try: 92 server = proxy.server.ProxyServer(pconf) 93 except exceptions.ServerException as v: 94 print(str(v), file=sys.stderr) 95 sys.exit(1) 96 else: 97 server = proxy.server.DummyServer(pconf) 98 99 master.server = server 100 master.addons.trigger("configure", opts.keys()) 101 master.addons.trigger("tick") 102 remaining = opts.update_known(**unknown) 103 if remaining and log.log_tier(opts.verbosity) > 1: 104 print("Ignored options: %s" % remaining) 105 if args.options: 106 print(optmanager.dump_defaults(opts)) 107 sys.exit(0) 108 if args.commands: 109 master.commands.dump() 110 sys.exit(0) 111 opts.set(*args.setoptions) 112 if extra: 113 opts.update(**extra(args)) 114 115 def cleankill(*args, **kwargs): 116 master.shutdown() 117 118 signal.signal(signal.SIGTERM, cleankill) 119 master.run() 120 except exceptions.OptionsError as e: 121 print("%s: %s" % (sys.argv[0], e), file=sys.stderr) 122 sys.exit(1) 123 except (KeyboardInterrupt, RuntimeError) as e: 124 pass 125 return master 126 127 128 def mitmproxy(args=None): # pragma: no cover 129 if os.name == "nt": 130 print("Error: mitmproxy's console interface is not supported on Windows. " 131 "You can run mitmdump or mitmweb instead.", file=sys.stderr) 132 sys.exit(1) 133 134 assert_utf8_env() 135 136 from mitmproxy.tools import console 137 run(console.master.ConsoleMaster, cmdline.mitmproxy, args) 138 139 140 def mitmdump(args=None): # pragma: no cover 141 from mitmproxy.tools import dump 142 143 def extra(args): 144 if args.filter_args: 145 v = " ".join(args.filter_args) 146 return dict( 147 view_filter=v, 148 save_stream_filter=v, 149 ) 150 return {} 151 152 m = run(dump.DumpMaster, cmdline.mitmdump, args, extra) 153 if m and m.errorcheck.has_errored: 154 sys.exit(1) 155 156 157 def mitmweb(args=None): # pragma: no cover 158 from mitmproxy.tools import web 159 run(web.master.WebMaster, cmdline.mitmweb, args) 160 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/tools/main.py b/mitmproxy/tools/main.py --- a/mitmproxy/tools/main.py +++ b/mitmproxy/tools/main.py @@ -44,8 +44,13 @@ print(debug.dump_system_info()) sys.exit(0) if args.quiet or args.options or args.commands: + # also reduce log verbosity if --options or --commands is passed, + # we don't want log messages from regular startup then. args.verbosity = 'error' args.flow_detail = 0 + if args.verbose: + args.verbosity = 'debug' + args.flow_detail = 2 adict = {} for n in dir(args):
{"golden_diff": "diff --git a/mitmproxy/tools/main.py b/mitmproxy/tools/main.py\n--- a/mitmproxy/tools/main.py\n+++ b/mitmproxy/tools/main.py\n@@ -44,8 +44,13 @@\n print(debug.dump_system_info())\n sys.exit(0)\n if args.quiet or args.options or args.commands:\n+ # also reduce log verbosity if --options or --commands is passed,\n+ # we don't want log messages from regular startup then.\n args.verbosity = 'error'\n args.flow_detail = 0\n+ if args.verbose:\n+ args.verbosity = 'debug'\n+ args.flow_detail = 2\n \n adict = {}\n for n in dir(args):\n", "issue": "Verbose mode not working in v3.0.0 RC2\n##### Steps to reproduce the problem:\r\n\r\n1. Start mitmdump with -v or --verbose flag\r\n2. No DEBUG level logs prints on standard output\r\n\r\n##### Any other comments? What have you tried so far?\r\n\r\nIn old stable version (2.0.2) the same steps produce desired output.\r\n\r\n##### System information\r\n\r\nMitmproxy: 3.0.0.dev1136 (commit 15f525e)\r\nPython: 3.6.3\r\nOpenSSL: OpenSSL 1.1.0g 2 Nov 2017\r\nPlatform: Linux-3.16.0-5-amd64-x86_64-with-debian-8.9\n", "before_files": [{"content": "from __future__ import print_function # this is here for the version check to work on Python 2.\n\nimport sys\n\nif sys.version_info < (3, 5):\n # This must be before any mitmproxy imports, as they already break!\n # Keep all other imports below with the 'noqa' magic comment.\n print(\"#\" * 49, file=sys.stderr)\n print(\"# mitmproxy only supports Python 3.5 and above! #\", file=sys.stderr)\n print(\"#\" * 49, file=sys.stderr)\n\nimport argparse # noqa\nimport os # noqa\nimport signal # noqa\nimport typing # noqa\n\nfrom mitmproxy.tools import cmdline # noqa\nfrom mitmproxy import exceptions, master # noqa\nfrom mitmproxy import options # noqa\nfrom mitmproxy import optmanager # noqa\nfrom mitmproxy import proxy # noqa\nfrom mitmproxy import log # noqa\nfrom mitmproxy.utils import debug, arg_check # noqa\n\n\ndef assert_utf8_env():\n spec = \"\"\n for i in [\"LANG\", \"LC_CTYPE\", \"LC_ALL\"]:\n spec += os.environ.get(i, \"\").lower()\n if \"utf\" not in spec:\n print(\n \"Error: mitmproxy requires a UTF console environment.\",\n file=sys.stderr\n )\n print(\n \"Set your LANG environment variable to something like en_US.UTF-8\",\n file=sys.stderr\n )\n sys.exit(1)\n\n\ndef process_options(parser, opts, args):\n if args.version:\n print(debug.dump_system_info())\n sys.exit(0)\n if args.quiet or args.options or args.commands:\n args.verbosity = 'error'\n args.flow_detail = 0\n\n adict = {}\n for n in dir(args):\n if n in opts:\n adict[n] = getattr(args, n)\n opts.merge(adict)\n\n return proxy.config.ProxyConfig(opts)\n\n\ndef run(\n master_cls: typing.Type[master.Master],\n make_parser: typing.Callable[[options.Options], argparse.ArgumentParser],\n arguments: typing.Sequence[str],\n extra: typing.Callable[[typing.Any], dict] = None\n): # pragma: no cover\n \"\"\"\n extra: Extra argument processing callable which returns a dict of\n options.\n \"\"\"\n debug.register_info_dumpers()\n\n opts = options.Options()\n master = master_cls(opts)\n\n parser = make_parser(opts)\n\n # To make migration from 2.x to 3.0 bearable.\n if \"-R\" in sys.argv and sys.argv[sys.argv.index(\"-R\") + 1].startswith(\"http\"):\n print(\"-R is used for specifying replacements.\\n\"\n \"To use mitmproxy in reverse mode please use --mode reverse:SPEC instead\")\n\n try:\n args = parser.parse_args(arguments)\n except SystemExit:\n arg_check.check()\n sys.exit(1)\n try:\n unknown = optmanager.load_paths(opts, args.conf)\n pconf = process_options(parser, opts, args)\n server = None # type: typing.Any\n if pconf.options.server:\n try:\n server = proxy.server.ProxyServer(pconf)\n except exceptions.ServerException as v:\n print(str(v), file=sys.stderr)\n sys.exit(1)\n else:\n server = proxy.server.DummyServer(pconf)\n\n master.server = server\n master.addons.trigger(\"configure\", opts.keys())\n master.addons.trigger(\"tick\")\n remaining = opts.update_known(**unknown)\n if remaining and log.log_tier(opts.verbosity) > 1:\n print(\"Ignored options: %s\" % remaining)\n if args.options:\n print(optmanager.dump_defaults(opts))\n sys.exit(0)\n if args.commands:\n master.commands.dump()\n sys.exit(0)\n opts.set(*args.setoptions)\n if extra:\n opts.update(**extra(args))\n\n def cleankill(*args, **kwargs):\n master.shutdown()\n\n signal.signal(signal.SIGTERM, cleankill)\n master.run()\n except exceptions.OptionsError as e:\n print(\"%s: %s\" % (sys.argv[0], e), file=sys.stderr)\n sys.exit(1)\n except (KeyboardInterrupt, RuntimeError) as e:\n pass\n return master\n\n\ndef mitmproxy(args=None): # pragma: no cover\n if os.name == \"nt\":\n print(\"Error: mitmproxy's console interface is not supported on Windows. \"\n \"You can run mitmdump or mitmweb instead.\", file=sys.stderr)\n sys.exit(1)\n\n assert_utf8_env()\n\n from mitmproxy.tools import console\n run(console.master.ConsoleMaster, cmdline.mitmproxy, args)\n\n\ndef mitmdump(args=None): # pragma: no cover\n from mitmproxy.tools import dump\n\n def extra(args):\n if args.filter_args:\n v = \" \".join(args.filter_args)\n return dict(\n view_filter=v,\n save_stream_filter=v,\n )\n return {}\n\n m = run(dump.DumpMaster, cmdline.mitmdump, args, extra)\n if m and m.errorcheck.has_errored:\n sys.exit(1)\n\n\ndef mitmweb(args=None): # pragma: no cover\n from mitmproxy.tools import web\n run(web.master.WebMaster, cmdline.mitmweb, args)\n", "path": "mitmproxy/tools/main.py"}], "after_files": [{"content": "from __future__ import print_function # this is here for the version check to work on Python 2.\n\nimport sys\n\nif sys.version_info < (3, 5):\n # This must be before any mitmproxy imports, as they already break!\n # Keep all other imports below with the 'noqa' magic comment.\n print(\"#\" * 49, file=sys.stderr)\n print(\"# mitmproxy only supports Python 3.5 and above! #\", file=sys.stderr)\n print(\"#\" * 49, file=sys.stderr)\n\nimport argparse # noqa\nimport os # noqa\nimport signal # noqa\nimport typing # noqa\n\nfrom mitmproxy.tools import cmdline # noqa\nfrom mitmproxy import exceptions, master # noqa\nfrom mitmproxy import options # noqa\nfrom mitmproxy import optmanager # noqa\nfrom mitmproxy import proxy # noqa\nfrom mitmproxy import log # noqa\nfrom mitmproxy.utils import debug, arg_check # noqa\n\n\ndef assert_utf8_env():\n spec = \"\"\n for i in [\"LANG\", \"LC_CTYPE\", \"LC_ALL\"]:\n spec += os.environ.get(i, \"\").lower()\n if \"utf\" not in spec:\n print(\n \"Error: mitmproxy requires a UTF console environment.\",\n file=sys.stderr\n )\n print(\n \"Set your LANG environment variable to something like en_US.UTF-8\",\n file=sys.stderr\n )\n sys.exit(1)\n\n\ndef process_options(parser, opts, args):\n if args.version:\n print(debug.dump_system_info())\n sys.exit(0)\n if args.quiet or args.options or args.commands:\n # also reduce log verbosity if --options or --commands is passed,\n # we don't want log messages from regular startup then.\n args.verbosity = 'error'\n args.flow_detail = 0\n if args.verbose:\n args.verbosity = 'debug'\n args.flow_detail = 2\n\n adict = {}\n for n in dir(args):\n if n in opts:\n adict[n] = getattr(args, n)\n opts.merge(adict)\n\n return proxy.config.ProxyConfig(opts)\n\n\ndef run(\n master_cls: typing.Type[master.Master],\n make_parser: typing.Callable[[options.Options], argparse.ArgumentParser],\n arguments: typing.Sequence[str],\n extra: typing.Callable[[typing.Any], dict] = None\n): # pragma: no cover\n \"\"\"\n extra: Extra argument processing callable which returns a dict of\n options.\n \"\"\"\n debug.register_info_dumpers()\n\n opts = options.Options()\n master = master_cls(opts)\n\n parser = make_parser(opts)\n\n # To make migration from 2.x to 3.0 bearable.\n if \"-R\" in sys.argv and sys.argv[sys.argv.index(\"-R\") + 1].startswith(\"http\"):\n print(\"-R is used for specifying replacements.\\n\"\n \"To use mitmproxy in reverse mode please use --mode reverse:SPEC instead\")\n\n try:\n args = parser.parse_args(arguments)\n except SystemExit:\n arg_check.check()\n sys.exit(1)\n try:\n unknown = optmanager.load_paths(opts, args.conf)\n pconf = process_options(parser, opts, args)\n server = None # type: typing.Any\n if pconf.options.server:\n try:\n server = proxy.server.ProxyServer(pconf)\n except exceptions.ServerException as v:\n print(str(v), file=sys.stderr)\n sys.exit(1)\n else:\n server = proxy.server.DummyServer(pconf)\n\n master.server = server\n master.addons.trigger(\"configure\", opts.keys())\n master.addons.trigger(\"tick\")\n remaining = opts.update_known(**unknown)\n if remaining and log.log_tier(opts.verbosity) > 1:\n print(\"Ignored options: %s\" % remaining)\n if args.options:\n print(optmanager.dump_defaults(opts))\n sys.exit(0)\n if args.commands:\n master.commands.dump()\n sys.exit(0)\n opts.set(*args.setoptions)\n if extra:\n opts.update(**extra(args))\n\n def cleankill(*args, **kwargs):\n master.shutdown()\n\n signal.signal(signal.SIGTERM, cleankill)\n master.run()\n except exceptions.OptionsError as e:\n print(\"%s: %s\" % (sys.argv[0], e), file=sys.stderr)\n sys.exit(1)\n except (KeyboardInterrupt, RuntimeError) as e:\n pass\n return master\n\n\ndef mitmproxy(args=None): # pragma: no cover\n if os.name == \"nt\":\n print(\"Error: mitmproxy's console interface is not supported on Windows. \"\n \"You can run mitmdump or mitmweb instead.\", file=sys.stderr)\n sys.exit(1)\n\n assert_utf8_env()\n\n from mitmproxy.tools import console\n run(console.master.ConsoleMaster, cmdline.mitmproxy, args)\n\n\ndef mitmdump(args=None): # pragma: no cover\n from mitmproxy.tools import dump\n\n def extra(args):\n if args.filter_args:\n v = \" \".join(args.filter_args)\n return dict(\n view_filter=v,\n save_stream_filter=v,\n )\n return {}\n\n m = run(dump.DumpMaster, cmdline.mitmdump, args, extra)\n if m and m.errorcheck.has_errored:\n sys.exit(1)\n\n\ndef mitmweb(args=None): # pragma: no cover\n from mitmproxy.tools import web\n run(web.master.WebMaster, cmdline.mitmweb, args)\n", "path": "mitmproxy/tools/main.py"}]}
1,981
156
gh_patches_debug_33436
rasdani/github-patches
git_diff
DataDog__dd-trace-py-281
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dd-trace-py messes with the root logger ``` has anyone else run into the issue of ddtrace-py turning on all default loggers for everything when running `patch_all()`? Weirdly, I can only replicate it within Docker, but it's definitely the `patch_all()` command that's causing it [8:50 PM] same thing happens if i run a single `patch()` on any library, it seems [8:52 PM] thinking it might be caused by this line: https://github.com/DataDog/dd-trace-py/blob/a50b5f5422716fae1c54b589cd448dc295b32757/ddtrace/monkey.py#L77 [8:53 PM] any reason that's `logging.info(...)` on the `logging` module instead of getting a logger and calling `.info()` on that? ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/contrib/mysql/__init__.py` Content: ``` 1 """Instrumeent mysql to report MySQL queries. 2 3 ``patch_all`` will automatically patch your mysql connection to make it work. 4 :: 5 6 from ddtrace import Pin, patch 7 from mysql.connector import connect 8 9 # If not patched yet, you can patch mysql specifically 10 patch(mysql=True) 11 12 # This will report a span with the default settings 13 conn = connect(user="alice", password="b0b", host="localhost", port=3306, database="test") 14 cursor = conn.cursor() 15 cursor.execute("SELECT 6*7 AS the_answer;") 16 17 # Use a pin to specify metadata related to this connection 18 Pin.override(conn, service='mysql-users') 19 20 This package works for mysql.connector version 2.1.x. 21 Only the default full-Python integration works. The binary C connector, 22 provided by _mysql_connector, is not supported yet. 23 24 Help on mysql.connector can be found on: 25 https://dev.mysql.com/doc/connector-python/en/ 26 """ 27 import logging 28 29 from ..util import require_modules 30 31 # check `MySQL-python` availability 32 required_modules = ['_mysql'] 33 34 with require_modules(required_modules) as missing_modules: 35 if not missing_modules: 36 # MySQL-python package is not supported at the moment 37 logging.debug('failed to patch mysql-python: integration not available') 38 39 # check `mysql-connector` availability 40 required_modules = ['mysql.connector'] 41 42 with require_modules(required_modules) as missing_modules: 43 if not missing_modules: 44 from .patch import patch 45 from .tracers import get_traced_mysql_connection 46 47 __all__ = ['get_traced_mysql_connection', 'patch'] 48 ``` Path: `ddtrace/monkey.py` Content: ``` 1 """Patch librairies to be automatically instrumented. 2 3 It can monkey patch supported standard libraries and third party modules. 4 A patched module will automatically report spans with its default configuration. 5 6 A library instrumentation can be configured (for instance, to report as another service) 7 using Pin. For that, check its documentation. 8 """ 9 import logging 10 import importlib 11 import threading 12 13 14 # Default set of modules to automatically patch or not 15 PATCH_MODULES = { 16 'boto': False, 17 'botocore': False, 18 'bottle': False, 19 'cassandra': True, 20 'celery': True, 21 'elasticsearch': True, 22 'mongoengine': True, 23 'mysql': True, 24 'psycopg': True, 25 'pylibmc': True, 26 'pymongo': True, 27 'redis': True, 28 'requests': False, # Not ready yet 29 'sqlalchemy': False, # Prefer DB client instrumentation 30 'sqlite3': True, 31 'aiohttp': True, # requires asyncio (Python 3.4+) 32 33 # Ignore some web framework integrations that might be configured explicitly in code 34 "django": False, 35 "flask": False, 36 "falcon": False, 37 "pylons": False, 38 "pyramid": False, 39 } 40 41 _LOCK = threading.Lock() 42 _PATCHED_MODULES = set() 43 44 45 class PatchException(Exception): 46 """Wraps regular `Exception` class when patching modules""" 47 pass 48 49 50 def patch_all(**patch_modules): 51 """Automatically patches all available modules. 52 53 :param dict \**patch_modules: Override whether particular modules are patched or not. 54 55 >>> patch_all({'redis': False, 'cassandra': False}) 56 """ 57 modules = PATCH_MODULES.copy() 58 modules.update(patch_modules) 59 60 patch(raise_errors=False, **modules) 61 62 def patch(raise_errors=True, **patch_modules): 63 """Patch only a set of given modules. 64 65 :param bool raise_errors: Raise error if one patch fail. 66 :param dict \**patch_modules: List of modules to patch. 67 68 >>> patch({'psycopg': True, 'elasticsearch': True}) 69 """ 70 modules = [m for (m, should_patch) in patch_modules.items() if should_patch] 71 count = 0 72 for module in modules: 73 patched = patch_module(module, raise_errors=raise_errors) 74 if patched: 75 count += 1 76 77 logging.info("patched %s/%s modules (%s)", 78 count, 79 len(modules), 80 ",".join(get_patched_modules())) 81 82 83 def patch_module(module, raise_errors=True): 84 """Patch a single module 85 86 Returns if the module got properly patched. 87 """ 88 try: 89 return _patch_module(module) 90 except Exception as exc: 91 if raise_errors: 92 raise 93 logging.debug("failed to patch %s: %s", module, exc) 94 return False 95 96 def get_patched_modules(): 97 """Get the list of patched modules""" 98 with _LOCK: 99 return sorted(_PATCHED_MODULES) 100 101 def _patch_module(module): 102 """_patch_module will attempt to monkey patch the module. 103 104 Returns if the module got patched. 105 Can also raise errors if it fails. 106 """ 107 path = 'ddtrace.contrib.%s' % module 108 with _LOCK: 109 if module in _PATCHED_MODULES: 110 logging.debug("already patched: %s", path) 111 return False 112 113 try: 114 imported_module = importlib.import_module(path) 115 imported_module.patch() 116 except ImportError: 117 # if the import fails, the integration is not available 118 raise PatchException('integration not available') 119 except AttributeError: 120 # if patch() is not available in the module, it means 121 # that the library is not installed in the environment 122 raise PatchException('module not installed') 123 124 _PATCHED_MODULES.add(module) 125 return True 126 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/contrib/mysql/__init__.py b/ddtrace/contrib/mysql/__init__.py --- a/ddtrace/contrib/mysql/__init__.py +++ b/ddtrace/contrib/mysql/__init__.py @@ -28,13 +28,16 @@ from ..util import require_modules + +log = logging.getLogger(__name__) + # check `MySQL-python` availability required_modules = ['_mysql'] with require_modules(required_modules) as missing_modules: if not missing_modules: # MySQL-python package is not supported at the moment - logging.debug('failed to patch mysql-python: integration not available') + log.debug('failed to patch mysql-python: integration not available') # check `mysql-connector` availability required_modules = ['mysql.connector'] diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py --- a/ddtrace/monkey.py +++ b/ddtrace/monkey.py @@ -11,6 +11,8 @@ import threading +log = logging.getLogger(__name__) + # Default set of modules to automatically patch or not PATCH_MODULES = { 'boto': False, @@ -74,7 +76,7 @@ if patched: count += 1 - logging.info("patched %s/%s modules (%s)", + log.info("patched %s/%s modules (%s)", count, len(modules), ",".join(get_patched_modules())) @@ -90,7 +92,7 @@ except Exception as exc: if raise_errors: raise - logging.debug("failed to patch %s: %s", module, exc) + log.debug("failed to patch %s: %s", module, exc) return False def get_patched_modules(): @@ -107,7 +109,7 @@ path = 'ddtrace.contrib.%s' % module with _LOCK: if module in _PATCHED_MODULES: - logging.debug("already patched: %s", path) + log.debug("already patched: %s", path) return False try:
{"golden_diff": "diff --git a/ddtrace/contrib/mysql/__init__.py b/ddtrace/contrib/mysql/__init__.py\n--- a/ddtrace/contrib/mysql/__init__.py\n+++ b/ddtrace/contrib/mysql/__init__.py\n@@ -28,13 +28,16 @@\n \n from ..util import require_modules\n \n+\n+log = logging.getLogger(__name__)\n+\n # check `MySQL-python` availability\n required_modules = ['_mysql']\n \n with require_modules(required_modules) as missing_modules:\n if not missing_modules:\n # MySQL-python package is not supported at the moment\n- logging.debug('failed to patch mysql-python: integration not available')\n+ log.debug('failed to patch mysql-python: integration not available')\n \n # check `mysql-connector` availability\n required_modules = ['mysql.connector']\ndiff --git a/ddtrace/monkey.py b/ddtrace/monkey.py\n--- a/ddtrace/monkey.py\n+++ b/ddtrace/monkey.py\n@@ -11,6 +11,8 @@\n import threading\n \n \n+log = logging.getLogger(__name__)\n+\n # Default set of modules to automatically patch or not\n PATCH_MODULES = {\n 'boto': False,\n@@ -74,7 +76,7 @@\n if patched:\n count += 1\n \n- logging.info(\"patched %s/%s modules (%s)\",\n+ log.info(\"patched %s/%s modules (%s)\",\n count,\n len(modules),\n \",\".join(get_patched_modules()))\n@@ -90,7 +92,7 @@\n except Exception as exc:\n if raise_errors:\n raise\n- logging.debug(\"failed to patch %s: %s\", module, exc)\n+ log.debug(\"failed to patch %s: %s\", module, exc)\n return False\n \n def get_patched_modules():\n@@ -107,7 +109,7 @@\n path = 'ddtrace.contrib.%s' % module\n with _LOCK:\n if module in _PATCHED_MODULES:\n- logging.debug(\"already patched: %s\", path)\n+ log.debug(\"already patched: %s\", path)\n return False\n \n try:\n", "issue": "dd-trace-py messes with the root logger\n```\r\nhas anyone else run into the issue of ddtrace-py turning on all default loggers for everything when running `patch_all()`? Weirdly, I can only replicate it within Docker, but it's definitely the `patch_all()` command that's causing it\r\n\r\n[8:50 PM] \r\nsame thing happens if i run a single `patch()` on any library, it seems\r\n\r\n[8:52 PM] \r\nthinking it might be caused by this line: https://github.com/DataDog/dd-trace-py/blob/a50b5f5422716fae1c54b589cd448dc295b32757/ddtrace/monkey.py#L77\r\n\r\n[8:53 PM] \r\nany reason that's `logging.info(...)` on the `logging` module instead of getting a logger and calling `.info()` on that?\r\n```\n", "before_files": [{"content": "\"\"\"Instrumeent mysql to report MySQL queries.\n\n``patch_all`` will automatically patch your mysql connection to make it work.\n::\n\n from ddtrace import Pin, patch\n from mysql.connector import connect\n\n # If not patched yet, you can patch mysql specifically\n patch(mysql=True)\n\n # This will report a span with the default settings\n conn = connect(user=\"alice\", password=\"b0b\", host=\"localhost\", port=3306, database=\"test\")\n cursor = conn.cursor()\n cursor.execute(\"SELECT 6*7 AS the_answer;\")\n\n # Use a pin to specify metadata related to this connection\n Pin.override(conn, service='mysql-users')\n\nThis package works for mysql.connector version 2.1.x.\nOnly the default full-Python integration works. The binary C connector,\nprovided by _mysql_connector, is not supported yet.\n\nHelp on mysql.connector can be found on:\nhttps://dev.mysql.com/doc/connector-python/en/\n\"\"\"\nimport logging\n\nfrom ..util import require_modules\n\n# check `MySQL-python` availability\nrequired_modules = ['_mysql']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n # MySQL-python package is not supported at the moment\n logging.debug('failed to patch mysql-python: integration not available')\n\n# check `mysql-connector` availability\nrequired_modules = ['mysql.connector']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .patch import patch\n from .tracers import get_traced_mysql_connection\n\n __all__ = ['get_traced_mysql_connection', 'patch']\n", "path": "ddtrace/contrib/mysql/__init__.py"}, {"content": "\"\"\"Patch librairies to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport logging\nimport importlib\nimport threading\n\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n 'boto': False,\n 'botocore': False,\n 'bottle': False,\n 'cassandra': True,\n 'celery': True,\n 'elasticsearch': True,\n 'mongoengine': True,\n 'mysql': True,\n 'psycopg': True,\n 'pylibmc': True,\n 'pymongo': True,\n 'redis': True,\n 'requests': False, # Not ready yet\n 'sqlalchemy': False, # Prefer DB client instrumentation\n 'sqlite3': True,\n 'aiohttp': True, # requires asyncio (Python 3.4+)\n\n # Ignore some web framework integrations that might be configured explicitly in code\n \"django\": False,\n \"flask\": False,\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n pass\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n :param dict \\**patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all({'redis': False, 'cassandra': False})\n \"\"\"\n modules = PATCH_MODULES.copy()\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict \\**patch_modules: List of modules to patch.\n\n >>> patch({'psycopg': True, 'elasticsearch': True})\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n count = 0\n for module in modules:\n patched = patch_module(module, raise_errors=raise_errors)\n if patched:\n count += 1\n\n logging.info(\"patched %s/%s modules (%s)\",\n count,\n len(modules),\n \",\".join(get_patched_modules()))\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except Exception as exc:\n if raise_errors:\n raise\n logging.debug(\"failed to patch %s: %s\", module, exc)\n return False\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = 'ddtrace.contrib.%s' % module\n with _LOCK:\n if module in _PATCHED_MODULES:\n logging.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n imported_module.patch()\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException('integration not available')\n except AttributeError:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n raise PatchException('module not installed')\n\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}], "after_files": [{"content": "\"\"\"Instrumeent mysql to report MySQL queries.\n\n``patch_all`` will automatically patch your mysql connection to make it work.\n::\n\n from ddtrace import Pin, patch\n from mysql.connector import connect\n\n # If not patched yet, you can patch mysql specifically\n patch(mysql=True)\n\n # This will report a span with the default settings\n conn = connect(user=\"alice\", password=\"b0b\", host=\"localhost\", port=3306, database=\"test\")\n cursor = conn.cursor()\n cursor.execute(\"SELECT 6*7 AS the_answer;\")\n\n # Use a pin to specify metadata related to this connection\n Pin.override(conn, service='mysql-users')\n\nThis package works for mysql.connector version 2.1.x.\nOnly the default full-Python integration works. The binary C connector,\nprovided by _mysql_connector, is not supported yet.\n\nHelp on mysql.connector can be found on:\nhttps://dev.mysql.com/doc/connector-python/en/\n\"\"\"\nimport logging\n\nfrom ..util import require_modules\n\n\nlog = logging.getLogger(__name__)\n\n# check `MySQL-python` availability\nrequired_modules = ['_mysql']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n # MySQL-python package is not supported at the moment\n log.debug('failed to patch mysql-python: integration not available')\n\n# check `mysql-connector` availability\nrequired_modules = ['mysql.connector']\n\nwith require_modules(required_modules) as missing_modules:\n if not missing_modules:\n from .patch import patch\n from .tracers import get_traced_mysql_connection\n\n __all__ = ['get_traced_mysql_connection', 'patch']\n", "path": "ddtrace/contrib/mysql/__init__.py"}, {"content": "\"\"\"Patch librairies to be automatically instrumented.\n\nIt can monkey patch supported standard libraries and third party modules.\nA patched module will automatically report spans with its default configuration.\n\nA library instrumentation can be configured (for instance, to report as another service)\nusing Pin. For that, check its documentation.\n\"\"\"\nimport logging\nimport importlib\nimport threading\n\n\nlog = logging.getLogger(__name__)\n\n# Default set of modules to automatically patch or not\nPATCH_MODULES = {\n 'boto': False,\n 'botocore': False,\n 'bottle': False,\n 'cassandra': True,\n 'celery': True,\n 'elasticsearch': True,\n 'mongoengine': True,\n 'mysql': True,\n 'psycopg': True,\n 'pylibmc': True,\n 'pymongo': True,\n 'redis': True,\n 'requests': False, # Not ready yet\n 'sqlalchemy': False, # Prefer DB client instrumentation\n 'sqlite3': True,\n 'aiohttp': True, # requires asyncio (Python 3.4+)\n\n # Ignore some web framework integrations that might be configured explicitly in code\n \"django\": False,\n \"flask\": False,\n \"falcon\": False,\n \"pylons\": False,\n \"pyramid\": False,\n}\n\n_LOCK = threading.Lock()\n_PATCHED_MODULES = set()\n\n\nclass PatchException(Exception):\n \"\"\"Wraps regular `Exception` class when patching modules\"\"\"\n pass\n\n\ndef patch_all(**patch_modules):\n \"\"\"Automatically patches all available modules.\n\n :param dict \\**patch_modules: Override whether particular modules are patched or not.\n\n >>> patch_all({'redis': False, 'cassandra': False})\n \"\"\"\n modules = PATCH_MODULES.copy()\n modules.update(patch_modules)\n\n patch(raise_errors=False, **modules)\n\ndef patch(raise_errors=True, **patch_modules):\n \"\"\"Patch only a set of given modules.\n\n :param bool raise_errors: Raise error if one patch fail.\n :param dict \\**patch_modules: List of modules to patch.\n\n >>> patch({'psycopg': True, 'elasticsearch': True})\n \"\"\"\n modules = [m for (m, should_patch) in patch_modules.items() if should_patch]\n count = 0\n for module in modules:\n patched = patch_module(module, raise_errors=raise_errors)\n if patched:\n count += 1\n\n log.info(\"patched %s/%s modules (%s)\",\n count,\n len(modules),\n \",\".join(get_patched_modules()))\n\n\ndef patch_module(module, raise_errors=True):\n \"\"\"Patch a single module\n\n Returns if the module got properly patched.\n \"\"\"\n try:\n return _patch_module(module)\n except Exception as exc:\n if raise_errors:\n raise\n log.debug(\"failed to patch %s: %s\", module, exc)\n return False\n\ndef get_patched_modules():\n \"\"\"Get the list of patched modules\"\"\"\n with _LOCK:\n return sorted(_PATCHED_MODULES)\n\ndef _patch_module(module):\n \"\"\"_patch_module will attempt to monkey patch the module.\n\n Returns if the module got patched.\n Can also raise errors if it fails.\n \"\"\"\n path = 'ddtrace.contrib.%s' % module\n with _LOCK:\n if module in _PATCHED_MODULES:\n log.debug(\"already patched: %s\", path)\n return False\n\n try:\n imported_module = importlib.import_module(path)\n imported_module.patch()\n except ImportError:\n # if the import fails, the integration is not available\n raise PatchException('integration not available')\n except AttributeError:\n # if patch() is not available in the module, it means\n # that the library is not installed in the environment\n raise PatchException('module not installed')\n\n _PATCHED_MODULES.add(module)\n return True\n", "path": "ddtrace/monkey.py"}]}
2,034
465
gh_patches_debug_433
rasdani/github-patches
git_diff
kornia__kornia-2476
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Next release? ## 🚀 Feature Hi, when will the next kornia release on conda or pypi be? ## Motivation Last conda release was in April, and new features have landed since then, but are unavailable in wheels. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kornia/__init__.py` Content: ``` 1 # NOTE: kornia filters and geometry must go first since are the core of the library 2 # and by changing the import order you might get into a circular dependencies issue. 3 from . import filters 4 from . import geometry 5 from . import grad_estimator 6 7 # import the other modules for convenience 8 from . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x 9 10 # NOTE: we are going to expose to top level very few things 11 from kornia.constants import pi 12 from kornia.testing import xla_is_available 13 from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image 14 15 # Version variable 16 __version__ = "0.6.13-dev" 17 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kornia/__init__.py b/kornia/__init__.py --- a/kornia/__init__.py +++ b/kornia/__init__.py @@ -13,4 +13,4 @@ from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image # Version variable -__version__ = "0.6.13-dev" +__version__ = "0.7.0"
{"golden_diff": "diff --git a/kornia/__init__.py b/kornia/__init__.py\n--- a/kornia/__init__.py\n+++ b/kornia/__init__.py\n@@ -13,4 +13,4 @@\n from kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n \n # Version variable\n-__version__ = \"0.6.13-dev\"\n+__version__ = \"0.7.0\"\n", "issue": "Next release?\n## \ud83d\ude80 Feature\r\nHi, when will the next kornia release on conda or pypi be?\r\n\r\n## Motivation\r\n\r\nLast conda release was in April, and new features have landed since then, but are unavailable in wheels.\n", "before_files": [{"content": "# NOTE: kornia filters and geometry must go first since are the core of the library\n# and by changing the import order you might get into a circular dependencies issue.\nfrom . import filters\nfrom . import geometry\nfrom . import grad_estimator\n\n# import the other modules for convenience\nfrom . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x\n\n# NOTE: we are going to expose to top level very few things\nfrom kornia.constants import pi\nfrom kornia.testing import xla_is_available\nfrom kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n\n# Version variable\n__version__ = \"0.6.13-dev\"\n", "path": "kornia/__init__.py"}], "after_files": [{"content": "# NOTE: kornia filters and geometry must go first since are the core of the library\n# and by changing the import order you might get into a circular dependencies issue.\nfrom . import filters\nfrom . import geometry\nfrom . import grad_estimator\n\n# import the other modules for convenience\nfrom . import augmentation, color, contrib, core, enhance, feature, io, losses, metrics, morphology, tracking, utils, x\n\n# NOTE: we are going to expose to top level very few things\nfrom kornia.constants import pi\nfrom kornia.testing import xla_is_available\nfrom kornia.utils import eye_like, vec_like, create_meshgrid, image_to_tensor, tensor_to_image\n\n# Version variable\n__version__ = \"0.7.0\"\n", "path": "kornia/__init__.py"}]}
507
106
gh_patches_debug_22898
rasdani/github-patches
git_diff
gratipay__gratipay.com-3485
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- revenue model is '' for everyone cf. #3479 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gratipay/models/team.py` Content: ``` 1 """Teams on Gratipay are plural participants with members. 2 """ 3 from postgres.orm import Model 4 5 6 class Team(Model): 7 """Represent a Gratipay team. 8 """ 9 10 typname = 'teams' 11 12 def __eq__(self, other): 13 if not isinstance(other, Team): 14 return False 15 return self.id == other.id 16 17 def __ne__(self, other): 18 if not isinstance(other, Team): 19 return True 20 return self.id != other.id 21 22 23 # Constructors 24 # ============ 25 26 @classmethod 27 def from_id(cls, id): 28 """Return an existing team based on id. 29 """ 30 return cls._from_thing("id", id) 31 32 @classmethod 33 def from_slug(cls, slug): 34 """Return an existing team based on slug. 35 """ 36 return cls._from_thing("slug_lower", slug.lower()) 37 38 @classmethod 39 def _from_thing(cls, thing, value): 40 assert thing in ("id", "slug_lower") 41 return cls.db.one(""" 42 43 SELECT teams.*::teams 44 FROM teams 45 WHERE {}=%s 46 47 """.format(thing), (value,)) 48 49 @classmethod 50 def create_new(cls, owner, fields): 51 return cls.db.one(""" 52 53 INSERT INTO teams 54 (slug, slug_lower, name, homepage, product_or_service, 55 getting_involved, getting_paid, owner) 56 VALUES (%s, %s, %s, %s, %s, %s, %s, %s) 57 RETURNING teams.*::teams 58 59 """, (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'], 60 fields['product_or_service'], fields['getting_involved'], fields['getting_paid'], 61 owner.username)) 62 63 def get_og_title(self): 64 out = self.name 65 receiving = self.receiving 66 if receiving > 0: 67 out += " receives $%.2f/wk" % receiving 68 else: 69 out += " is" 70 return out + " on Gratipay" 71 72 73 def update_receiving(self, cursor=None): 74 # Stubbed out for now. Migrate this over from Participant. 75 pass 76 77 78 @property 79 def status(self): 80 return { None: 'unreviewed' 81 , False: 'rejected' 82 , True: 'approved' 83 }[self.is_approved] 84 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gratipay/models/team.py b/gratipay/models/team.py --- a/gratipay/models/team.py +++ b/gratipay/models/team.py @@ -47,18 +47,22 @@ """.format(thing), (value,)) @classmethod - def create_new(cls, owner, fields): + def insert(cls, owner, **fields): + fields['slug_lower'] = fields['slug'].lower() + fields['owner'] = owner.username return cls.db.one(""" INSERT INTO teams - (slug, slug_lower, name, homepage, product_or_service, - getting_involved, getting_paid, owner) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s) + (slug, slug_lower, name, homepage, + product_or_service, revenue_model, getting_involved, getting_paid, + owner) + VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s, + %(product_or_service)s, %(revenue_model)s, %(getting_involved)s, + %(getting_paid)s, + %(owner)s) RETURNING teams.*::teams - """, (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'], - fields['product_or_service'], fields['getting_involved'], fields['getting_paid'], - owner.username)) + """, fields) def get_og_title(self): out = self.name
{"golden_diff": "diff --git a/gratipay/models/team.py b/gratipay/models/team.py\n--- a/gratipay/models/team.py\n+++ b/gratipay/models/team.py\n@@ -47,18 +47,22 @@\n \"\"\".format(thing), (value,))\n \n @classmethod\n- def create_new(cls, owner, fields):\n+ def insert(cls, owner, **fields):\n+ fields['slug_lower'] = fields['slug'].lower()\n+ fields['owner'] = owner.username\n return cls.db.one(\"\"\"\n \n INSERT INTO teams\n- (slug, slug_lower, name, homepage, product_or_service,\n- getting_involved, getting_paid, owner)\n- VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n+ (slug, slug_lower, name, homepage,\n+ product_or_service, revenue_model, getting_involved, getting_paid,\n+ owner)\n+ VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,\n+ %(product_or_service)s, %(revenue_model)s, %(getting_involved)s,\n+ %(getting_paid)s,\n+ %(owner)s)\n RETURNING teams.*::teams\n \n- \"\"\", (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'],\n- fields['product_or_service'], fields['getting_involved'], fields['getting_paid'],\n- owner.username))\n+ \"\"\", fields)\n \n def get_og_title(self):\n out = self.name\n", "issue": "revenue model is '' for everyone\ncf. #3479\n\n", "before_files": [{"content": "\"\"\"Teams on Gratipay are plural participants with members.\n\"\"\"\nfrom postgres.orm import Model\n\n\nclass Team(Model):\n \"\"\"Represent a Gratipay team.\n \"\"\"\n\n typname = 'teams'\n\n def __eq__(self, other):\n if not isinstance(other, Team):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Team):\n return True\n return self.id != other.id\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing team based on id.\n \"\"\"\n return cls._from_thing(\"id\", id)\n\n @classmethod\n def from_slug(cls, slug):\n \"\"\"Return an existing team based on slug.\n \"\"\"\n return cls._from_thing(\"slug_lower\", slug.lower())\n\n @classmethod\n def _from_thing(cls, thing, value):\n assert thing in (\"id\", \"slug_lower\")\n return cls.db.one(\"\"\"\n\n SELECT teams.*::teams\n FROM teams\n WHERE {}=%s\n\n \"\"\".format(thing), (value,))\n\n @classmethod\n def create_new(cls, owner, fields):\n return cls.db.one(\"\"\"\n\n INSERT INTO teams\n (slug, slug_lower, name, homepage, product_or_service,\n getting_involved, getting_paid, owner)\n VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\n RETURNING teams.*::teams\n\n \"\"\", (fields['slug'], fields['slug'].lower(), fields['name'], fields['homepage'],\n fields['product_or_service'], fields['getting_involved'], fields['getting_paid'],\n owner.username))\n\n def get_og_title(self):\n out = self.name\n receiving = self.receiving\n if receiving > 0:\n out += \" receives $%.2f/wk\" % receiving\n else:\n out += \" is\"\n return out + \" on Gratipay\"\n\n\n def update_receiving(self, cursor=None):\n # Stubbed out for now. Migrate this over from Participant.\n pass\n\n\n @property\n def status(self):\n return { None: 'unreviewed'\n , False: 'rejected'\n , True: 'approved'\n }[self.is_approved]\n", "path": "gratipay/models/team.py"}], "after_files": [{"content": "\"\"\"Teams on Gratipay are plural participants with members.\n\"\"\"\nfrom postgres.orm import Model\n\n\nclass Team(Model):\n \"\"\"Represent a Gratipay team.\n \"\"\"\n\n typname = 'teams'\n\n def __eq__(self, other):\n if not isinstance(other, Team):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Team):\n return True\n return self.id != other.id\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing team based on id.\n \"\"\"\n return cls._from_thing(\"id\", id)\n\n @classmethod\n def from_slug(cls, slug):\n \"\"\"Return an existing team based on slug.\n \"\"\"\n return cls._from_thing(\"slug_lower\", slug.lower())\n\n @classmethod\n def _from_thing(cls, thing, value):\n assert thing in (\"id\", \"slug_lower\")\n return cls.db.one(\"\"\"\n\n SELECT teams.*::teams\n FROM teams\n WHERE {}=%s\n\n \"\"\".format(thing), (value,))\n\n @classmethod\n def insert(cls, owner, **fields):\n fields['slug_lower'] = fields['slug'].lower()\n fields['owner'] = owner.username\n return cls.db.one(\"\"\"\n\n INSERT INTO teams\n (slug, slug_lower, name, homepage,\n product_or_service, revenue_model, getting_involved, getting_paid,\n owner)\n VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,\n %(product_or_service)s, %(revenue_model)s, %(getting_involved)s,\n %(getting_paid)s,\n %(owner)s)\n RETURNING teams.*::teams\n\n \"\"\", fields)\n\n def get_og_title(self):\n out = self.name\n receiving = self.receiving\n if receiving > 0:\n out += \" receives $%.2f/wk\" % receiving\n else:\n out += \" is\"\n return out + \" on Gratipay\"\n\n\n def update_receiving(self, cursor=None):\n # Stubbed out for now. Migrate this over from Participant.\n pass\n\n\n @property\n def status(self):\n return { None: 'unreviewed'\n , False: 'rejected'\n , True: 'approved'\n }[self.is_approved]\n", "path": "gratipay/models/team.py"}]}
949
337
gh_patches_debug_22373
rasdani/github-patches
git_diff
zulip__zulip-10098
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- slash commands: Add /dark and /light commands. We have /night and /day, and people are starting to use them. We should add the aliases /dark and /light. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `zerver/lib/zcommand.py` Content: ``` 1 from typing import Any, Dict 2 from django.utils.translation import ugettext as _ 3 4 from zerver.models import UserProfile 5 from zerver.lib.actions import do_set_user_display_setting 6 from zerver.lib.exceptions import JsonableError 7 8 def process_zcommands(content: str, user_profile: UserProfile) -> Dict[str, Any]: 9 if not content.startswith('/'): 10 raise JsonableError(_('There should be a leading slash in the zcommand.')) 11 command = content[1:] 12 13 if command == 'ping': 14 ret = dict() # type: Dict[str, Any] 15 return ret 16 17 if command == 'night': 18 if user_profile.night_mode: 19 msg = 'You are still in night mode.' 20 else: 21 msg = 'Changed to night mode! To revert night mode, type `/day`.' 22 do_set_user_display_setting(user_profile, 'night_mode', True) 23 ret = dict(msg=msg) 24 return ret 25 26 if command == 'day': 27 if user_profile.night_mode: 28 msg = 'Changed to day mode! To revert day mode, type `/night`.' 29 do_set_user_display_setting(user_profile, 'night_mode', False) 30 else: 31 msg = 'You are still in day mode.' 32 ret = dict(msg=msg) 33 return ret 34 35 raise JsonableError(_('No such command: %s') % (command,)) 36 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/zerver/lib/zcommand.py b/zerver/lib/zcommand.py --- a/zerver/lib/zcommand.py +++ b/zerver/lib/zcommand.py @@ -14,18 +14,21 @@ ret = dict() # type: Dict[str, Any] return ret - if command == 'night': + night_commands = ['night', 'dark'] + day_commands = ['day', 'light'] + + if command in night_commands: if user_profile.night_mode: msg = 'You are still in night mode.' else: - msg = 'Changed to night mode! To revert night mode, type `/day`.' + msg = 'Changed to night mode! To revert night mode, type `%s`.' % (content,) do_set_user_display_setting(user_profile, 'night_mode', True) ret = dict(msg=msg) return ret - if command == 'day': + if command in day_commands: if user_profile.night_mode: - msg = 'Changed to day mode! To revert day mode, type `/night`.' + msg = 'Changed to day mode! To revert day mode, type `%s`.' % (content,) do_set_user_display_setting(user_profile, 'night_mode', False) else: msg = 'You are still in day mode.'
{"golden_diff": "diff --git a/zerver/lib/zcommand.py b/zerver/lib/zcommand.py\n--- a/zerver/lib/zcommand.py\n+++ b/zerver/lib/zcommand.py\n@@ -14,18 +14,21 @@\n ret = dict() # type: Dict[str, Any]\n return ret\n \n- if command == 'night':\n+ night_commands = ['night', 'dark']\n+ day_commands = ['day', 'light']\n+\n+ if command in night_commands:\n if user_profile.night_mode:\n msg = 'You are still in night mode.'\n else:\n- msg = 'Changed to night mode! To revert night mode, type `/day`.'\n+ msg = 'Changed to night mode! To revert night mode, type `%s`.' % (content,)\n do_set_user_display_setting(user_profile, 'night_mode', True)\n ret = dict(msg=msg)\n return ret\n \n- if command == 'day':\n+ if command in day_commands:\n if user_profile.night_mode:\n- msg = 'Changed to day mode! To revert day mode, type `/night`.'\n+ msg = 'Changed to day mode! To revert day mode, type `%s`.' % (content,)\n do_set_user_display_setting(user_profile, 'night_mode', False)\n else:\n msg = 'You are still in day mode.'\n", "issue": "slash commands: Add /dark and /light commands.\nWe have /night and /day, and people are starting to use them. We should add the aliases /dark and /light.\n", "before_files": [{"content": "from typing import Any, Dict\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.models import UserProfile\nfrom zerver.lib.actions import do_set_user_display_setting\nfrom zerver.lib.exceptions import JsonableError\n\ndef process_zcommands(content: str, user_profile: UserProfile) -> Dict[str, Any]:\n if not content.startswith('/'):\n raise JsonableError(_('There should be a leading slash in the zcommand.'))\n command = content[1:]\n\n if command == 'ping':\n ret = dict() # type: Dict[str, Any]\n return ret\n\n if command == 'night':\n if user_profile.night_mode:\n msg = 'You are still in night mode.'\n else:\n msg = 'Changed to night mode! To revert night mode, type `/day`.'\n do_set_user_display_setting(user_profile, 'night_mode', True)\n ret = dict(msg=msg)\n return ret\n\n if command == 'day':\n if user_profile.night_mode:\n msg = 'Changed to day mode! To revert day mode, type `/night`.'\n do_set_user_display_setting(user_profile, 'night_mode', False)\n else:\n msg = 'You are still in day mode.'\n ret = dict(msg=msg)\n return ret\n\n raise JsonableError(_('No such command: %s') % (command,))\n", "path": "zerver/lib/zcommand.py"}], "after_files": [{"content": "from typing import Any, Dict\nfrom django.utils.translation import ugettext as _\n\nfrom zerver.models import UserProfile\nfrom zerver.lib.actions import do_set_user_display_setting\nfrom zerver.lib.exceptions import JsonableError\n\ndef process_zcommands(content: str, user_profile: UserProfile) -> Dict[str, Any]:\n if not content.startswith('/'):\n raise JsonableError(_('There should be a leading slash in the zcommand.'))\n command = content[1:]\n\n if command == 'ping':\n ret = dict() # type: Dict[str, Any]\n return ret\n\n night_commands = ['night', 'dark']\n day_commands = ['day', 'light']\n\n if command in night_commands:\n if user_profile.night_mode:\n msg = 'You are still in night mode.'\n else:\n msg = 'Changed to night mode! To revert night mode, type `%s`.' % (content,)\n do_set_user_display_setting(user_profile, 'night_mode', True)\n ret = dict(msg=msg)\n return ret\n\n if command in day_commands:\n if user_profile.night_mode:\n msg = 'Changed to day mode! To revert day mode, type `%s`.' % (content,)\n do_set_user_display_setting(user_profile, 'night_mode', False)\n else:\n msg = 'You are still in day mode.'\n ret = dict(msg=msg)\n return ret\n\n raise JsonableError(_('No such command: %s') % (command,))\n", "path": "zerver/lib/zcommand.py"}]}
654
296
gh_patches_debug_39460
rasdani/github-patches
git_diff
mathesar-foundation__mathesar-151
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Get filtered set of records from table **Problem** <!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.--> At the moment, when we request records from a table, it's _all_ of the records or none. We should be able to filter a way that supports retrieving "groups" of records, where a group is defined as a set of records where some subset of the columns match a set of values, equivalent to the SQL: ```sql WHERE col1 = val1 AND col2 = val2 AND ... AND coln = valn ``` **Proposed solution** <!-- A clear and concise description of your proposed solution or feature. --> We should create a function that lets us filter string columns using the pattern above, and another function that retrieves the distinct tuples for a set of columns (i.e., the groups in this context). **Additional context** <!-- Add any other context or screenshots about the feature request here.--> The interesting bit will be figuring out how to paginate the results, but without having to reperform the (costly) filtering query each time. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `db/records.py` Content: ``` 1 from sqlalchemy import delete, select 2 from sqlalchemy.inspection import inspect 3 4 5 def _get_primary_key_column(table): 6 primary_key_list = list(inspect(table).primary_key) 7 # We do not support getting by composite primary keys 8 assert len(primary_key_list) == 1 9 return primary_key_list[0] 10 11 12 def get_record(table, engine, id_value): 13 primary_key_column = _get_primary_key_column(table) 14 query = select(table).where(primary_key_column == id_value) 15 with engine.begin() as conn: 16 result = conn.execute(query).fetchall() 17 assert len(result) <= 1 18 return result[0] if result else None 19 20 21 def get_records(table, engine, limit=None, offset=None, order_by=[]): 22 """ 23 Returns records from a table. 24 25 Args: 26 table: SQLAlchemy table object 27 engine: SQLAlchemy engine object 28 limit: int, gives number of rows to return 29 offset: int, gives number of rows to skip 30 order_by: list of SQLAlchemy ColumnElements to order by. Should 31 usually be either a list of string column names, or a 32 list of columns from the given table. 33 """ 34 query = select(table).order_by(*order_by).limit(limit).offset(offset) 35 with engine.begin() as conn: 36 return conn.execute(query).fetchall() 37 38 39 def create_record_or_records(table, engine, record_data): 40 """ 41 record_data can be a dictionary, tuple, or list of dictionaries or tuples. 42 if record_data is a list, it creates multiple records. 43 """ 44 id_value = None 45 with engine.begin() as connection: 46 result = connection.execute(table.insert(), record_data) 47 # If there was only a single record created, return the record. 48 if result.rowcount == 1: 49 # We need to manually commit insertion so that we can retrieve the record. 50 connection.commit() 51 id_value = result.inserted_primary_key[0] 52 if id_value is not None: 53 return get_record(table, engine, id_value) 54 # Do not return any records if multiple rows were added. 55 return None 56 57 58 def create_records_from_csv(table, engine, csv_filename, column_names): 59 with open(csv_filename, 'rb') as csv_file: 60 with engine.begin() as conn: 61 cursor = conn.connection.cursor() 62 relation = '.'.join('"{}"'.format(part) for part in (table.schema, table.name)) 63 formatted_columns = '({})'.format(','.join([f'"{column_name}"' for column_name in column_names])) 64 copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER' 65 cursor.copy_expert(copy_sql, csv_file) 66 67 68 def update_record(table, engine, id_value, record_data): 69 primary_key_column = _get_primary_key_column(table) 70 with engine.begin() as connection: 71 connection.execute( 72 table.update().where(primary_key_column == id_value).values(record_data) 73 ) 74 return get_record(table, engine, id_value) 75 76 77 def delete_record(table, engine, id_value): 78 primary_key_column = _get_primary_key_column(table) 79 query = delete(table).where(primary_key_column == id_value) 80 with engine.begin() as conn: 81 return conn.execute(query) 82 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/db/records.py b/db/records.py --- a/db/records.py +++ b/db/records.py @@ -1,6 +1,9 @@ -from sqlalchemy import delete, select +import logging +from sqlalchemy import delete, select, and_, Column from sqlalchemy.inspection import inspect +logger = logging.getLogger(__name__) + def _get_primary_key_column(table): primary_key_list = list(inspect(table).primary_key) @@ -18,7 +21,9 @@ return result[0] if result else None -def get_records(table, engine, limit=None, offset=None, order_by=[]): +def get_records( + table, engine, limit=None, offset=None, order_by=[], filters=[] +): """ Returns records from a table. @@ -30,12 +35,72 @@ order_by: list of SQLAlchemy ColumnElements to order by. Should usually be either a list of string column names, or a list of columns from the given table. + filters: list of tuples of type (ColumnElement, value), where + ColumnElement is an SQLAlchemy ColumnElement, and value + is a valid value for the associated column (i.e., the + type must be correct) """ - query = select(table).order_by(*order_by).limit(limit).offset(offset) + query = ( + select(table) + .order_by(*order_by) + .limit(limit) + .offset(offset) + .where(_build_filter_conjunction(table, filters)) + ) with engine.begin() as conn: return conn.execute(query).fetchall() +def _build_filter_conjunction(table, filters): + refined_filters = [ + (table.columns[col] if type(col) == str else col, value) + for col, value in filters + ] + # We need a default of True (rather than empty), since invoking and_ + # without arguments is deprecated. + return and_(True, *[col == value for col, value in refined_filters]) + + +def get_distinct_tuple_values( + column_list, engine, table=None, limit=None, offset=None, +): + """ + Returns distinct tuples from a given list of columns. + + Args: + column_list: list of column names or SQLAlchemy column objects + engine: SQLAlchemy engine object + table: SQLAlchemy table object + limit: int, gives number of rows to return + offset: int, gives number of rows to skip + + If no table is given, the column_list must consist entirely of + SQLAlchemy column objects associated with a table. + """ + if table is not None: + column_objects = [ + table.columns[col] if type(col) == str else col + for col in column_list + ] + else: + column_objects = column_list + try: + assert all([type(col) == Column for col in column_objects]) + except AssertionError as e: + logger.error("All columns must be str or sqlalchemy.Column type") + raise e + + query = ( + select(*column_objects) + .distinct() + .limit(limit) + .offset(offset) + ) + with engine.begin() as conn: + res = conn.execute(query).fetchall() + return [tuple(zip(column_objects, row)) for row in res] + + def create_record_or_records(table, engine, record_data): """ record_data can be a dictionary, tuple, or list of dictionaries or tuples.
{"golden_diff": "diff --git a/db/records.py b/db/records.py\n--- a/db/records.py\n+++ b/db/records.py\n@@ -1,6 +1,9 @@\n-from sqlalchemy import delete, select\n+import logging\n+from sqlalchemy import delete, select, and_, Column\n from sqlalchemy.inspection import inspect\n \n+logger = logging.getLogger(__name__)\n+\n \n def _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n@@ -18,7 +21,9 @@\n return result[0] if result else None\n \n \n-def get_records(table, engine, limit=None, offset=None, order_by=[]):\n+def get_records(\n+ table, engine, limit=None, offset=None, order_by=[], filters=[]\n+):\n \"\"\"\n Returns records from a table.\n \n@@ -30,12 +35,72 @@\n order_by: list of SQLAlchemy ColumnElements to order by. Should\n usually be either a list of string column names, or a\n list of columns from the given table.\n+ filters: list of tuples of type (ColumnElement, value), where\n+ ColumnElement is an SQLAlchemy ColumnElement, and value\n+ is a valid value for the associated column (i.e., the\n+ type must be correct)\n \"\"\"\n- query = select(table).order_by(*order_by).limit(limit).offset(offset)\n+ query = (\n+ select(table)\n+ .order_by(*order_by)\n+ .limit(limit)\n+ .offset(offset)\n+ .where(_build_filter_conjunction(table, filters))\n+ )\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n \n \n+def _build_filter_conjunction(table, filters):\n+ refined_filters = [\n+ (table.columns[col] if type(col) == str else col, value)\n+ for col, value in filters\n+ ]\n+ # We need a default of True (rather than empty), since invoking and_\n+ # without arguments is deprecated.\n+ return and_(True, *[col == value for col, value in refined_filters])\n+\n+\n+def get_distinct_tuple_values(\n+ column_list, engine, table=None, limit=None, offset=None,\n+):\n+ \"\"\"\n+ Returns distinct tuples from a given list of columns.\n+\n+ Args:\n+ column_list: list of column names or SQLAlchemy column objects\n+ engine: SQLAlchemy engine object\n+ table: SQLAlchemy table object\n+ limit: int, gives number of rows to return\n+ offset: int, gives number of rows to skip\n+\n+ If no table is given, the column_list must consist entirely of\n+ SQLAlchemy column objects associated with a table.\n+ \"\"\"\n+ if table is not None:\n+ column_objects = [\n+ table.columns[col] if type(col) == str else col\n+ for col in column_list\n+ ]\n+ else:\n+ column_objects = column_list\n+ try:\n+ assert all([type(col) == Column for col in column_objects])\n+ except AssertionError as e:\n+ logger.error(\"All columns must be str or sqlalchemy.Column type\")\n+ raise e\n+\n+ query = (\n+ select(*column_objects)\n+ .distinct()\n+ .limit(limit)\n+ .offset(offset)\n+ )\n+ with engine.begin() as conn:\n+ res = conn.execute(query).fetchall()\n+ return [tuple(zip(column_objects, row)) for row in res]\n+\n+\n def create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n", "issue": "Get filtered set of records from table\n**Problem**\r\n<!-- Please provide a clear and concise description of the problem that this feature request is designed to solve.-->\r\n\r\nAt the moment, when we request records from a table, it's _all_ of the records or none. We should be able to filter a way that supports retrieving \"groups\" of records, where a group is defined as a set of records where some subset of the columns match a set of values, equivalent to the SQL:\r\n\r\n```sql\r\nWHERE col1 = val1 AND col2 = val2 AND ... AND coln = valn\r\n```\r\n\r\n**Proposed solution**\r\n<!-- A clear and concise description of your proposed solution or feature. -->\r\n\r\nWe should create a function that lets us filter string columns using the pattern above, and another function that retrieves the distinct tuples for a set of columns (i.e., the groups in this context).\r\n\r\n**Additional context**\r\n<!-- Add any other context or screenshots about the feature request here.-->\r\n\r\nThe interesting bit will be figuring out how to paginate the results, but without having to reperform the (costly) filtering query each time.\n", "before_files": [{"content": "from sqlalchemy import delete, select\nfrom sqlalchemy.inspection import inspect\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n result = conn.execute(query).fetchall()\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(table, engine, limit=None, offset=None, order_by=[]):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of SQLAlchemy ColumnElements to order by. Should\n usually be either a list of string column names, or a\n list of columns from the given table.\n \"\"\"\n query = select(table).order_by(*order_by).limit(limit).offset(offset)\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef create_records_from_csv(table, engine, csv_filename, column_names):\n with open(csv_filename, 'rb') as csv_file:\n with engine.begin() as conn:\n cursor = conn.connection.cursor()\n relation = '.'.join('\"{}\"'.format(part) for part in (table.schema, table.name))\n formatted_columns = '({})'.format(','.join([f'\"{column_name}\"' for column_name in column_names]))\n copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER'\n cursor.copy_expert(copy_sql, csv_file)\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}], "after_files": [{"content": "import logging\nfrom sqlalchemy import delete, select, and_, Column\nfrom sqlalchemy.inspection import inspect\n\nlogger = logging.getLogger(__name__)\n\n\ndef _get_primary_key_column(table):\n primary_key_list = list(inspect(table).primary_key)\n # We do not support getting by composite primary keys\n assert len(primary_key_list) == 1\n return primary_key_list[0]\n\n\ndef get_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = select(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n result = conn.execute(query).fetchall()\n assert len(result) <= 1\n return result[0] if result else None\n\n\ndef get_records(\n table, engine, limit=None, offset=None, order_by=[], filters=[]\n):\n \"\"\"\n Returns records from a table.\n\n Args:\n table: SQLAlchemy table object\n engine: SQLAlchemy engine object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n order_by: list of SQLAlchemy ColumnElements to order by. Should\n usually be either a list of string column names, or a\n list of columns from the given table.\n filters: list of tuples of type (ColumnElement, value), where\n ColumnElement is an SQLAlchemy ColumnElement, and value\n is a valid value for the associated column (i.e., the\n type must be correct)\n \"\"\"\n query = (\n select(table)\n .order_by(*order_by)\n .limit(limit)\n .offset(offset)\n .where(_build_filter_conjunction(table, filters))\n )\n with engine.begin() as conn:\n return conn.execute(query).fetchall()\n\n\ndef _build_filter_conjunction(table, filters):\n refined_filters = [\n (table.columns[col] if type(col) == str else col, value)\n for col, value in filters\n ]\n # We need a default of True (rather than empty), since invoking and_\n # without arguments is deprecated.\n return and_(True, *[col == value for col, value in refined_filters])\n\n\ndef get_distinct_tuple_values(\n column_list, engine, table=None, limit=None, offset=None,\n):\n \"\"\"\n Returns distinct tuples from a given list of columns.\n\n Args:\n column_list: list of column names or SQLAlchemy column objects\n engine: SQLAlchemy engine object\n table: SQLAlchemy table object\n limit: int, gives number of rows to return\n offset: int, gives number of rows to skip\n\n If no table is given, the column_list must consist entirely of\n SQLAlchemy column objects associated with a table.\n \"\"\"\n if table is not None:\n column_objects = [\n table.columns[col] if type(col) == str else col\n for col in column_list\n ]\n else:\n column_objects = column_list\n try:\n assert all([type(col) == Column for col in column_objects])\n except AssertionError as e:\n logger.error(\"All columns must be str or sqlalchemy.Column type\")\n raise e\n\n query = (\n select(*column_objects)\n .distinct()\n .limit(limit)\n .offset(offset)\n )\n with engine.begin() as conn:\n res = conn.execute(query).fetchall()\n return [tuple(zip(column_objects, row)) for row in res]\n\n\ndef create_record_or_records(table, engine, record_data):\n \"\"\"\n record_data can be a dictionary, tuple, or list of dictionaries or tuples.\n if record_data is a list, it creates multiple records.\n \"\"\"\n id_value = None\n with engine.begin() as connection:\n result = connection.execute(table.insert(), record_data)\n # If there was only a single record created, return the record.\n if result.rowcount == 1:\n # We need to manually commit insertion so that we can retrieve the record.\n connection.commit()\n id_value = result.inserted_primary_key[0]\n if id_value is not None:\n return get_record(table, engine, id_value)\n # Do not return any records if multiple rows were added.\n return None\n\n\ndef create_records_from_csv(table, engine, csv_filename, column_names):\n with open(csv_filename, 'rb') as csv_file:\n with engine.begin() as conn:\n cursor = conn.connection.cursor()\n relation = '.'.join('\"{}\"'.format(part) for part in (table.schema, table.name))\n formatted_columns = '({})'.format(','.join([f'\"{column_name}\"' for column_name in column_names]))\n copy_sql = f'COPY {relation} {formatted_columns} FROM STDIN CSV HEADER'\n cursor.copy_expert(copy_sql, csv_file)\n\n\ndef update_record(table, engine, id_value, record_data):\n primary_key_column = _get_primary_key_column(table)\n with engine.begin() as connection:\n connection.execute(\n table.update().where(primary_key_column == id_value).values(record_data)\n )\n return get_record(table, engine, id_value)\n\n\ndef delete_record(table, engine, id_value):\n primary_key_column = _get_primary_key_column(table)\n query = delete(table).where(primary_key_column == id_value)\n with engine.begin() as conn:\n return conn.execute(query)\n", "path": "db/records.py"}]}
1,350
798
gh_patches_debug_2891
rasdani/github-patches
git_diff
getsentry__sentry-5094
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Webhook data does not have event id Webhook data contains issue id only. It would be nice to have event id as well. Discussed with @mattrobenolt on IRC. Documenting it here with this issue. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/sentry/plugins/sentry_webhooks/plugin.py` Content: ``` 1 from __future__ import absolute_import 2 3 import logging 4 import six 5 import sentry 6 7 from django import forms 8 from django.conf import settings 9 from django.utils.translation import ugettext_lazy as _ 10 11 from sentry.exceptions import PluginError 12 from sentry.plugins.bases import notify 13 from sentry.http import is_valid_url, safe_urlopen 14 from sentry.utils.safe import safe_execute 15 16 17 def validate_urls(value, **kwargs): 18 output = [] 19 for url in value.split('\n'): 20 url = url.strip() 21 if not url: 22 continue 23 if not url.startswith(('http://', 'https://')): 24 raise PluginError('Not a valid URL.') 25 if not is_valid_url(url): 26 raise PluginError('Not a valid URL.') 27 output.append(url) 28 return '\n'.join(output) 29 30 31 class WebHooksOptionsForm(notify.NotificationConfigurationForm): 32 urls = forms.CharField( 33 label=_('Callback URLs'), 34 widget=forms.Textarea(attrs={ 35 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}), 36 help_text=_('Enter callback URLs to POST new events to (one per line).')) 37 38 def clean_url(self): 39 value = self.cleaned_data.get('url') 40 return validate_urls(value) 41 42 43 class WebHooksPlugin(notify.NotificationPlugin): 44 author = 'Sentry Team' 45 author_url = 'https://github.com/getsentry/sentry' 46 version = sentry.VERSION 47 description = "Integrates web hooks." 48 resource_links = [ 49 ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'), 50 ('Source', 'https://github.com/getsentry/sentry'), 51 ] 52 53 slug = 'webhooks' 54 title = 'WebHooks' 55 conf_title = title 56 conf_key = 'webhooks' 57 # TODO(dcramer): remove when this is migrated to React 58 project_conf_form = WebHooksOptionsForm 59 timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3) 60 logger = logging.getLogger('sentry.plugins.webhooks') 61 user_agent = 'sentry-webhooks/%s' % version 62 63 def is_configured(self, project, **kwargs): 64 return bool(self.get_option('urls', project)) 65 66 def get_config(self, project, **kwargs): 67 return [{ 68 'name': 'urls', 69 'label': 'Callback URLs', 70 'type': 'textarea', 71 'help': 'Enter callback URLs to POST new events to (one per line).', 72 'placeholder': 'https://sentry.io/callback/url', 73 'validators': [validate_urls], 74 'required': False 75 }] 76 77 def get_group_data(self, group, event): 78 data = { 79 'id': six.text_type(group.id), 80 'project': group.project.slug, 81 'project_name': group.project.name, 82 'logger': event.get_tag('logger'), 83 'level': event.get_tag('level'), 84 'culprit': group.culprit, 85 'message': event.get_legacy_message(), 86 'url': group.get_absolute_url(), 87 } 88 data['event'] = dict(event.data or {}) 89 data['event']['tags'] = event.get_tags() 90 return data 91 92 def get_webhook_urls(self, project): 93 urls = self.get_option('urls', project) 94 if not urls: 95 return () 96 return filter(bool, urls.strip().splitlines()) 97 98 def send_webhook(self, url, payload): 99 return safe_urlopen( 100 url=url, 101 json=payload, 102 timeout=self.timeout, 103 verify_ssl=False, 104 ) 105 106 def notify_users(self, group, event, fail_silently=False): 107 payload = self.get_group_data(group, event) 108 for url in self.get_webhook_urls(group.project): 109 safe_execute(self.send_webhook, url, payload, _with_transaction=False) 110 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/sentry/plugins/sentry_webhooks/plugin.py b/src/sentry/plugins/sentry_webhooks/plugin.py --- a/src/sentry/plugins/sentry_webhooks/plugin.py +++ b/src/sentry/plugins/sentry_webhooks/plugin.py @@ -87,6 +87,8 @@ } data['event'] = dict(event.data or {}) data['event']['tags'] = event.get_tags() + data['event']['event_id'] = event.event_id + data['event']['id'] = event.id return data def get_webhook_urls(self, project):
{"golden_diff": "diff --git a/src/sentry/plugins/sentry_webhooks/plugin.py b/src/sentry/plugins/sentry_webhooks/plugin.py\n--- a/src/sentry/plugins/sentry_webhooks/plugin.py\n+++ b/src/sentry/plugins/sentry_webhooks/plugin.py\n@@ -87,6 +87,8 @@\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n+ data['event']['event_id'] = event.event_id\n+ data['event']['id'] = event.id\n return data\n \n def get_webhook_urls(self, project):\n", "issue": "Webhook data does not have event id\nWebhook data contains issue id only. It would be nice to have event id as well.\r\n\r\nDiscussed with @mattrobenolt on IRC. Documenting it here with this issue.\n", "before_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport six\nimport sentry\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.exceptions import PluginError\nfrom sentry.plugins.bases import notify\nfrom sentry.http import is_valid_url, safe_urlopen\nfrom sentry.utils.safe import safe_execute\n\n\ndef validate_urls(value, **kwargs):\n output = []\n for url in value.split('\\n'):\n url = url.strip()\n if not url:\n continue\n if not url.startswith(('http://', 'https://')):\n raise PluginError('Not a valid URL.')\n if not is_valid_url(url):\n raise PluginError('Not a valid URL.')\n output.append(url)\n return '\\n'.join(output)\n\n\nclass WebHooksOptionsForm(notify.NotificationConfigurationForm):\n urls = forms.CharField(\n label=_('Callback URLs'),\n widget=forms.Textarea(attrs={\n 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}),\n help_text=_('Enter callback URLs to POST new events to (one per line).'))\n\n def clean_url(self):\n value = self.cleaned_data.get('url')\n return validate_urls(value)\n\n\nclass WebHooksPlugin(notify.NotificationPlugin):\n author = 'Sentry Team'\n author_url = 'https://github.com/getsentry/sentry'\n version = sentry.VERSION\n description = \"Integrates web hooks.\"\n resource_links = [\n ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),\n ('Source', 'https://github.com/getsentry/sentry'),\n ]\n\n slug = 'webhooks'\n title = 'WebHooks'\n conf_title = title\n conf_key = 'webhooks'\n # TODO(dcramer): remove when this is migrated to React\n project_conf_form = WebHooksOptionsForm\n timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3)\n logger = logging.getLogger('sentry.plugins.webhooks')\n user_agent = 'sentry-webhooks/%s' % version\n\n def is_configured(self, project, **kwargs):\n return bool(self.get_option('urls', project))\n\n def get_config(self, project, **kwargs):\n return [{\n 'name': 'urls',\n 'label': 'Callback URLs',\n 'type': 'textarea',\n 'help': 'Enter callback URLs to POST new events to (one per line).',\n 'placeholder': 'https://sentry.io/callback/url',\n 'validators': [validate_urls],\n 'required': False\n }]\n\n def get_group_data(self, group, event):\n data = {\n 'id': six.text_type(group.id),\n 'project': group.project.slug,\n 'project_name': group.project.name,\n 'logger': event.get_tag('logger'),\n 'level': event.get_tag('level'),\n 'culprit': group.culprit,\n 'message': event.get_legacy_message(),\n 'url': group.get_absolute_url(),\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n return data\n\n def get_webhook_urls(self, project):\n urls = self.get_option('urls', project)\n if not urls:\n return ()\n return filter(bool, urls.strip().splitlines())\n\n def send_webhook(self, url, payload):\n return safe_urlopen(\n url=url,\n json=payload,\n timeout=self.timeout,\n verify_ssl=False,\n )\n\n def notify_users(self, group, event, fail_silently=False):\n payload = self.get_group_data(group, event)\n for url in self.get_webhook_urls(group.project):\n safe_execute(self.send_webhook, url, payload, _with_transaction=False)\n", "path": "src/sentry/plugins/sentry_webhooks/plugin.py"}], "after_files": [{"content": "from __future__ import absolute_import\n\nimport logging\nimport six\nimport sentry\n\nfrom django import forms\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom sentry.exceptions import PluginError\nfrom sentry.plugins.bases import notify\nfrom sentry.http import is_valid_url, safe_urlopen\nfrom sentry.utils.safe import safe_execute\n\n\ndef validate_urls(value, **kwargs):\n output = []\n for url in value.split('\\n'):\n url = url.strip()\n if not url:\n continue\n if not url.startswith(('http://', 'https://')):\n raise PluginError('Not a valid URL.')\n if not is_valid_url(url):\n raise PluginError('Not a valid URL.')\n output.append(url)\n return '\\n'.join(output)\n\n\nclass WebHooksOptionsForm(notify.NotificationConfigurationForm):\n urls = forms.CharField(\n label=_('Callback URLs'),\n widget=forms.Textarea(attrs={\n 'class': 'span6', 'placeholder': 'https://sentry.io/callback/url'}),\n help_text=_('Enter callback URLs to POST new events to (one per line).'))\n\n def clean_url(self):\n value = self.cleaned_data.get('url')\n return validate_urls(value)\n\n\nclass WebHooksPlugin(notify.NotificationPlugin):\n author = 'Sentry Team'\n author_url = 'https://github.com/getsentry/sentry'\n version = sentry.VERSION\n description = \"Integrates web hooks.\"\n resource_links = [\n ('Bug Tracker', 'https://github.com/getsentry/sentry/issues'),\n ('Source', 'https://github.com/getsentry/sentry'),\n ]\n\n slug = 'webhooks'\n title = 'WebHooks'\n conf_title = title\n conf_key = 'webhooks'\n # TODO(dcramer): remove when this is migrated to React\n project_conf_form = WebHooksOptionsForm\n timeout = getattr(settings, 'SENTRY_WEBHOOK_TIMEOUT', 3)\n logger = logging.getLogger('sentry.plugins.webhooks')\n user_agent = 'sentry-webhooks/%s' % version\n\n def is_configured(self, project, **kwargs):\n return bool(self.get_option('urls', project))\n\n def get_config(self, project, **kwargs):\n return [{\n 'name': 'urls',\n 'label': 'Callback URLs',\n 'type': 'textarea',\n 'help': 'Enter callback URLs to POST new events to (one per line).',\n 'placeholder': 'https://sentry.io/callback/url',\n 'validators': [validate_urls],\n 'required': False\n }]\n\n def get_group_data(self, group, event):\n data = {\n 'id': six.text_type(group.id),\n 'project': group.project.slug,\n 'project_name': group.project.name,\n 'logger': event.get_tag('logger'),\n 'level': event.get_tag('level'),\n 'culprit': group.culprit,\n 'message': event.get_legacy_message(),\n 'url': group.get_absolute_url(),\n }\n data['event'] = dict(event.data or {})\n data['event']['tags'] = event.get_tags()\n data['event']['event_id'] = event.event_id\n data['event']['id'] = event.id\n return data\n\n def get_webhook_urls(self, project):\n urls = self.get_option('urls', project)\n if not urls:\n return ()\n return filter(bool, urls.strip().splitlines())\n\n def send_webhook(self, url, payload):\n return safe_urlopen(\n url=url,\n json=payload,\n timeout=self.timeout,\n verify_ssl=False,\n )\n\n def notify_users(self, group, event, fail_silently=False):\n payload = self.get_group_data(group, event)\n for url in self.get_webhook_urls(group.project):\n safe_execute(self.send_webhook, url, payload, _with_transaction=False)\n", "path": "src/sentry/plugins/sentry_webhooks/plugin.py"}]}
1,350
129
gh_patches_debug_604
rasdani/github-patches
git_diff
pex-tool__pex-1419
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Release 2.1.46 On the docket: + [x] Fix Pip proprietary URL env marker handling. #1417 + [x] Un-reify installed wheel script shebangs. #1410 + [x] Support deterministic repository extract tool. #1411 + [x] support setuptools scripts #1379 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pex/version.py` Content: ``` 1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). 2 # Licensed under the Apache License, Version 2.0 (see LICENSE). 3 4 __version__ = "2.1.45" 5 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pex/version.py b/pex/version.py --- a/pex/version.py +++ b/pex/version.py @@ -1,4 +1,4 @@ # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). -__version__ = "2.1.45" +__version__ = "2.1.46"
{"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.45\"\n+__version__ = \"2.1.46\"\n", "issue": "Release 2.1.46\nOn the docket:\r\n+ [x] Fix Pip proprietary URL env marker handling. #1417 \r\n+ [x] Un-reify installed wheel script shebangs. #1410\r\n+ [x] Support deterministic repository extract tool. #1411\r\n+ [x] support setuptools scripts #1379\r\n\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.45\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.46\"\n", "path": "pex/version.py"}]}
389
96
gh_patches_debug_17600
rasdani/github-patches
git_diff
akvo__akvo-rsr-3513
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Creating new organisations from the project editor fails --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `akvo/rest/views/organisation.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 # Akvo RSR is covered by the GNU Affero General Public License. 4 # See more details in the license.txt file located at the root folder of the Akvo RSR module. 5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >. 6 7 from django.conf import settings 8 from django.db.models import Q 9 from django.utils import six 10 from rest_framework.decorators import api_view 11 from rest_framework.exceptions import ParseError 12 from rest_framework.parsers import JSONParser 13 from rest_framework.response import Response 14 from rest_framework_xml.parsers import XMLParser 15 from rest_framework_xml.compat import etree 16 17 from akvo.rest.views.utils import int_or_none, get_qs_elements_for_page 18 from akvo.rsr.filters import location_choices, get_m49_filter 19 from akvo.rsr.models import Project, Organisation, Country 20 from akvo.rsr.views.utils import apply_keywords, org_projects 21 from ..serializers import OrganisationSerializer, OrganisationDirectorySerializer 22 from ..viewsets import BaseRSRViewSet 23 24 25 class AkvoOrganisationParser(XMLParser): 26 def parse(self, stream, media_type=None, parser_context=None): 27 assert etree, 'XMLParser requires defusedxml to be installed' 28 29 parser_context = parser_context or {} 30 encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET) 31 parser = etree.DefusedXMLParser(encoding=encoding) 32 try: 33 tree = etree.parse(stream, parser=parser, forbid_dtd=True) 34 except (etree.ParseError, ValueError) as exc: 35 raise ParseError('XML parse error - %s' % six.text_type(exc)) 36 return self.organisation_data_from_etree(tree.getroot()) 37 38 def organisation_data_from_etree(self, tree): 39 def find_text(tree, str): 40 element = tree.find(str) 41 if element is None: 42 return '' 43 return element.text.strip() if element.text else "" 44 45 def location_data(location_tree): 46 if location_tree is None: 47 return [] 48 iso_code = find_text(location_tree, 'iso_code').lower() 49 country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code)) 50 country = country.id 51 latitude = find_text(location_tree, 'latitude') or 0 52 longitude = find_text(location_tree, 'longitude') or 0 53 primary = True 54 return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)] 55 56 long_name = find_text(tree, 'name') 57 name = long_name[:25] 58 description = find_text(tree, 'description') 59 url = find_text(tree, 'url') 60 iati_type = find_text(tree, 'iati_organisation_type') 61 new_organisation_type = int(iati_type) if iati_type else 22 62 organisation_type = Organisation.org_type_from_iati_type(new_organisation_type) 63 locations = location_data(tree.find('location/object')) 64 return dict( 65 name=name, long_name=long_name, description=description, url=url, 66 organisation_type=organisation_type, new_organisation_type=new_organisation_type, 67 locations=locations 68 ) 69 70 71 class OrganisationViewSet(BaseRSRViewSet): 72 """ 73 API endpoint that allows organisations to be viewed or edited. 74 """ 75 queryset = Organisation.objects.all() 76 serializer_class = OrganisationSerializer 77 parser_classes = (AkvoOrganisationParser, JSONParser,) 78 79 80 @api_view(['GET']) 81 def organisation_directory(request): 82 """REST view for the update directory.""" 83 84 page = request.rsr_page 85 all_organisations = Organisation.objects.all() if not page else _page_organisations(page) 86 87 # Filter updates based on query parameters 88 filter_, text_filter = _create_filters_query(request) 89 organisations = ( 90 all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations 91 ) 92 organisations_text_filtered = ( 93 organisations.filter(text_filter) if text_filter is not None else organisations 94 ) 95 if organisations_text_filtered.exists(): 96 organisations = organisations_text_filtered 97 98 # Get the relevant data for typeaheads based on filtered organisations (minus 99 # text filtering, if no organisations were found) 100 locations = [ 101 {'id': choice[0], 'name': choice[1]} 102 for choice in location_choices(organisations) 103 ] 104 105 display_organisations = get_qs_elements_for_page(organisations_text_filtered, request) 106 107 # Get related objects of page at once 108 response = { 109 'project_count': organisations_text_filtered.count(), 110 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data, 111 'location': locations, 112 'page_size_default': settings.PROJECT_DIRECTORY_PAGE_SIZES[0], 113 } 114 return Response(response) 115 116 117 def _public_projects(): 118 """Return all public projects.""" 119 return Project.objects.public().published().select_related('partners') 120 121 122 def _page_organisations(page): 123 """Dig out the list or organisations to use.""" 124 projects = org_projects(page.organisation) if page.partner_projects else _public_projects() 125 keyword_projects = apply_keywords(page, projects) 126 return keyword_projects.all_partners() 127 128 129 def _create_filters_query(request): 130 """Returns a Q object expression based on query parameters.""" 131 location_param = int_or_none(request.GET.get('location')) 132 title_or_subtitle_param = request.GET.get('title_or_subtitle') 133 134 location_filter = ( 135 get_m49_filter(location_param, use_recipient_country=False) if location_param else None 136 ) 137 title_filter = ( 138 Q(name__icontains=title_or_subtitle_param) | 139 Q(long_name__icontains=title_or_subtitle_param) 140 ) if title_or_subtitle_param else None 141 all_filters = [ 142 location_filter, 143 ] 144 filters = filter(None, all_filters) 145 return reduce(lambda x, y: x & y, filters) if filters else None, title_filter 146 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py --- a/akvo/rest/views/organisation.py +++ b/akvo/rest/views/organisation.py @@ -9,8 +9,8 @@ from django.utils import six from rest_framework.decorators import api_view from rest_framework.exceptions import ParseError -from rest_framework.parsers import JSONParser from rest_framework.response import Response +from rest_framework.settings import api_settings from rest_framework_xml.parsers import XMLParser from rest_framework_xml.compat import etree @@ -74,7 +74,7 @@ """ queryset = Organisation.objects.all() serializer_class = OrganisationSerializer - parser_classes = (AkvoOrganisationParser, JSONParser,) + parser_classes = [AkvoOrganisationParser] + api_settings.DEFAULT_PARSER_CLASSES @api_view(['GET'])
{"golden_diff": "diff --git a/akvo/rest/views/organisation.py b/akvo/rest/views/organisation.py\n--- a/akvo/rest/views/organisation.py\n+++ b/akvo/rest/views/organisation.py\n@@ -9,8 +9,8 @@\n from django.utils import six\n from rest_framework.decorators import api_view\n from rest_framework.exceptions import ParseError\n-from rest_framework.parsers import JSONParser\n from rest_framework.response import Response\n+from rest_framework.settings import api_settings\n from rest_framework_xml.parsers import XMLParser\n from rest_framework_xml.compat import etree\n \n@@ -74,7 +74,7 @@\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n- parser_classes = (AkvoOrganisationParser, JSONParser,)\n+ parser_classes = [AkvoOrganisationParser] + api_settings.DEFAULT_PARSER_CLASSES\n \n \n @api_view(['GET'])\n", "issue": "Creating new organisations from the project editor fails\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import six\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.response import Response\nfrom rest_framework_xml.parsers import XMLParser\nfrom rest_framework_xml.compat import etree\n\nfrom akvo.rest.views.utils import int_or_none, get_qs_elements_for_page\nfrom akvo.rsr.filters import location_choices, get_m49_filter\nfrom akvo.rsr.models import Project, Organisation, Country\nfrom akvo.rsr.views.utils import apply_keywords, org_projects\nfrom ..serializers import OrganisationSerializer, OrganisationDirectorySerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = (AkvoOrganisationParser, JSONParser,)\n\n\n@api_view(['GET'])\ndef organisation_directory(request):\n \"\"\"REST view for the update directory.\"\"\"\n\n page = request.rsr_page\n all_organisations = Organisation.objects.all() if not page else _page_organisations(page)\n\n # Filter updates based on query parameters\n filter_, text_filter = _create_filters_query(request)\n organisations = (\n all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations\n )\n organisations_text_filtered = (\n organisations.filter(text_filter) if text_filter is not None else organisations\n )\n if organisations_text_filtered.exists():\n organisations = organisations_text_filtered\n\n # Get the relevant data for typeaheads based on filtered organisations (minus\n # text filtering, if no organisations were found)\n locations = [\n {'id': choice[0], 'name': choice[1]}\n for choice in location_choices(organisations)\n ]\n\n display_organisations = get_qs_elements_for_page(organisations_text_filtered, request)\n\n # Get related objects of page at once\n response = {\n 'project_count': organisations_text_filtered.count(),\n 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,\n 'location': locations,\n 'page_size_default': settings.PROJECT_DIRECTORY_PAGE_SIZES[0],\n }\n return Response(response)\n\n\ndef _public_projects():\n \"\"\"Return all public projects.\"\"\"\n return Project.objects.public().published().select_related('partners')\n\n\ndef _page_organisations(page):\n \"\"\"Dig out the list or organisations to use.\"\"\"\n projects = org_projects(page.organisation) if page.partner_projects else _public_projects()\n keyword_projects = apply_keywords(page, projects)\n return keyword_projects.all_partners()\n\n\ndef _create_filters_query(request):\n \"\"\"Returns a Q object expression based on query parameters.\"\"\"\n location_param = int_or_none(request.GET.get('location'))\n title_or_subtitle_param = request.GET.get('title_or_subtitle')\n\n location_filter = (\n get_m49_filter(location_param, use_recipient_country=False) if location_param else None\n )\n title_filter = (\n Q(name__icontains=title_or_subtitle_param) |\n Q(long_name__icontains=title_or_subtitle_param)\n ) if title_or_subtitle_param else None\n all_filters = [\n location_filter,\n ]\n filters = filter(None, all_filters)\n return reduce(lambda x, y: x & y, filters) if filters else None, title_filter\n", "path": "akvo/rest/views/organisation.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.utils import six\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import ParseError\nfrom rest_framework.response import Response\nfrom rest_framework.settings import api_settings\nfrom rest_framework_xml.parsers import XMLParser\nfrom rest_framework_xml.compat import etree\n\nfrom akvo.rest.views.utils import int_or_none, get_qs_elements_for_page\nfrom akvo.rsr.filters import location_choices, get_m49_filter\nfrom akvo.rsr.models import Project, Organisation, Country\nfrom akvo.rsr.views.utils import apply_keywords, org_projects\nfrom ..serializers import OrganisationSerializer, OrganisationDirectorySerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass AkvoOrganisationParser(XMLParser):\n def parse(self, stream, media_type=None, parser_context=None):\n assert etree, 'XMLParser requires defusedxml to be installed'\n\n parser_context = parser_context or {}\n encoding = parser_context.get('encoding', settings.DEFAULT_CHARSET)\n parser = etree.DefusedXMLParser(encoding=encoding)\n try:\n tree = etree.parse(stream, parser=parser, forbid_dtd=True)\n except (etree.ParseError, ValueError) as exc:\n raise ParseError('XML parse error - %s' % six.text_type(exc))\n return self.organisation_data_from_etree(tree.getroot())\n\n def organisation_data_from_etree(self, tree):\n def find_text(tree, str):\n element = tree.find(str)\n if element is None:\n return ''\n return element.text.strip() if element.text else \"\"\n\n def location_data(location_tree):\n if location_tree is None:\n return []\n iso_code = find_text(location_tree, 'iso_code').lower()\n country, created = Country.objects.get_or_create(**Country.fields_from_iso_code(iso_code))\n country = country.id\n latitude = find_text(location_tree, 'latitude') or 0\n longitude = find_text(location_tree, 'longitude') or 0\n primary = True\n return [dict(latitude=latitude, longitude=longitude, country=country, primary=primary)]\n\n long_name = find_text(tree, 'name')\n name = long_name[:25]\n description = find_text(tree, 'description')\n url = find_text(tree, 'url')\n iati_type = find_text(tree, 'iati_organisation_type')\n new_organisation_type = int(iati_type) if iati_type else 22\n organisation_type = Organisation.org_type_from_iati_type(new_organisation_type)\n locations = location_data(tree.find('location/object'))\n return dict(\n name=name, long_name=long_name, description=description, url=url,\n organisation_type=organisation_type, new_organisation_type=new_organisation_type,\n locations=locations\n )\n\n\nclass OrganisationViewSet(BaseRSRViewSet):\n \"\"\"\n API endpoint that allows organisations to be viewed or edited.\n \"\"\"\n queryset = Organisation.objects.all()\n serializer_class = OrganisationSerializer\n parser_classes = [AkvoOrganisationParser] + api_settings.DEFAULT_PARSER_CLASSES\n\n\n@api_view(['GET'])\ndef organisation_directory(request):\n \"\"\"REST view for the update directory.\"\"\"\n\n page = request.rsr_page\n all_organisations = Organisation.objects.all() if not page else _page_organisations(page)\n\n # Filter updates based on query parameters\n filter_, text_filter = _create_filters_query(request)\n organisations = (\n all_organisations.filter(filter_).distinct() if filter_ is not None else all_organisations\n )\n organisations_text_filtered = (\n organisations.filter(text_filter) if text_filter is not None else organisations\n )\n if organisations_text_filtered.exists():\n organisations = organisations_text_filtered\n\n # Get the relevant data for typeaheads based on filtered organisations (minus\n # text filtering, if no organisations were found)\n locations = [\n {'id': choice[0], 'name': choice[1]}\n for choice in location_choices(organisations)\n ]\n\n display_organisations = get_qs_elements_for_page(organisations_text_filtered, request)\n\n # Get related objects of page at once\n response = {\n 'project_count': organisations_text_filtered.count(),\n 'projects': OrganisationDirectorySerializer(display_organisations, many=True).data,\n 'location': locations,\n 'page_size_default': settings.PROJECT_DIRECTORY_PAGE_SIZES[0],\n }\n return Response(response)\n\n\ndef _public_projects():\n \"\"\"Return all public projects.\"\"\"\n return Project.objects.public().published().select_related('partners')\n\n\ndef _page_organisations(page):\n \"\"\"Dig out the list or organisations to use.\"\"\"\n projects = org_projects(page.organisation) if page.partner_projects else _public_projects()\n keyword_projects = apply_keywords(page, projects)\n return keyword_projects.all_partners()\n\n\ndef _create_filters_query(request):\n \"\"\"Returns a Q object expression based on query parameters.\"\"\"\n location_param = int_or_none(request.GET.get('location'))\n title_or_subtitle_param = request.GET.get('title_or_subtitle')\n\n location_filter = (\n get_m49_filter(location_param, use_recipient_country=False) if location_param else None\n )\n title_filter = (\n Q(name__icontains=title_or_subtitle_param) |\n Q(long_name__icontains=title_or_subtitle_param)\n ) if title_or_subtitle_param else None\n all_filters = [\n location_filter,\n ]\n filters = filter(None, all_filters)\n return reduce(lambda x, y: x & y, filters) if filters else None, title_filter\n", "path": "akvo/rest/views/organisation.py"}]}
1,851
186
gh_patches_debug_22416
rasdani/github-patches
git_diff
privacyidea__privacyidea-2563
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Container audit fails in 3.5 The container audit will fail in version 3.5 due to a missing parameter in the constructor. https://community.privacyidea.org/t/logging-error-after-update-to-3-5/1811/2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `privacyidea/lib/auditmodules/containeraudit.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # 3 # 2019-11-07 Cornelius Kölbel <[email protected]> 4 # initial code for writing audit information to a file 5 # 6 # This code is free software; you can redistribute it and/or 7 # modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE 8 # License as published by the Free Software Foundation; either 9 # version 3 of the License, or any later version. 10 # 11 # This code is distributed in the hope that it will be useful, 12 # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 # GNU AFFERO GENERAL PUBLIC LICENSE for more details. 15 # 16 # You should have received a copy of the GNU Affero General Public 17 # License along with this program. If not, see <http://www.gnu.org/licenses/>. 18 # 19 # 20 __doc__ = """The Container Audit Module allows to write audit information to several different 21 audit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the 22 Logger Audit Module. This way audit information can be saved in the SQL database and at the same time 23 be passed to a file or external services via the Python logging facility. 24 25 The Container Audit Module is configured like this: 26 27 PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit' 28 PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit'] 29 PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit' 30 31 You also have to provide the configuration parameters for the referenced audit modules. 32 33 """ 34 35 import logging 36 from privacyidea.lib.auditmodules.base import (Audit as AuditBase) 37 from privacyidea.lib.utils import get_module_class 38 39 40 log = logging.getLogger(__name__) 41 42 43 class Audit(AuditBase): 44 """ 45 This is the ContainerAudit module, which writes the audit entries 46 to a list of audit modules. 47 """ 48 49 def __init__(self, config=None): 50 super(Audit, self).__init__(config) 51 self.name = "containeraudit" 52 write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE') 53 read_conf = self.config.get('PI_AUDIT_CONTAINER_READ') 54 # Initialize all modules 55 self.write_modules = [get_module_class(audit_module, "Audit", "log")(config) for audit_module in write_conf] 56 self.read_module = get_module_class(read_conf, "Audit", "log")(config) 57 if not self.read_module.is_readable: 58 log.warning(u"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.".format(self.read_module)) 59 60 @property 61 def has_data(self): 62 return any([x.has_data for x in self.write_modules]) 63 64 def log(self, param): 65 """ 66 Call the log method for all writeable modules 67 """ 68 for module in self.write_modules: 69 module.log(param) 70 71 def add_to_log(self, param, add_with_comma=False): 72 """ 73 Call the add_to_log method for all writeable modules 74 """ 75 for module in self.write_modules: 76 module.add_to_log(param, add_with_comma) 77 78 def add_policy(self, policyname): 79 """ 80 Call the add_policy method for all writeable modules 81 """ 82 for module in self.write_modules: 83 module.add_policy(policyname) 84 85 def search(self, search_dict, page_size=15, page=1, sortorder="asc", 86 timelimit=None): 87 """ 88 Call the search method for the one readable module 89 """ 90 return self.read_module.search(search_dict, page_size=page_size, page=page, 91 sortorder=sortorder, timelimit=timelimit) 92 93 def get_count(self, search_dict, timedelta=None, success=None): 94 """ 95 Call the count method for the one readable module 96 """ 97 return self.read_module.get_count(search_dict, timedelta=timedelta, success=success) 98 99 def csv_generator(self, param=None, user=None, timelimit=None): 100 """ 101 Call the csv_generator method for the one readable module 102 """ 103 return self.read_module.csv_generator(param=param, user=user, 104 timelimit=timelimit) 105 106 def get_total(self, param, AND=True, display_error=True, timelimit=None): 107 """ 108 Call the total method for the one readable module 109 """ 110 return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit) 111 112 def finalize_log(self): 113 """ 114 Call the finalize method of all writeable audit modules 115 """ 116 for module in self.write_modules: 117 module.finalize_log() 118 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/privacyidea/lib/auditmodules/containeraudit.py b/privacyidea/lib/auditmodules/containeraudit.py --- a/privacyidea/lib/auditmodules/containeraudit.py +++ b/privacyidea/lib/auditmodules/containeraudit.py @@ -46,14 +46,15 @@ to a list of audit modules. """ - def __init__(self, config=None): - super(Audit, self).__init__(config) + def __init__(self, config=None, startdate=None): + super(Audit, self).__init__(config, startdate) self.name = "containeraudit" write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE') read_conf = self.config.get('PI_AUDIT_CONTAINER_READ') # Initialize all modules - self.write_modules = [get_module_class(audit_module, "Audit", "log")(config) for audit_module in write_conf] - self.read_module = get_module_class(read_conf, "Audit", "log")(config) + self.write_modules = [get_module_class(audit_module, "Audit", "log")(config, startdate) + for audit_module in write_conf] + self.read_module = get_module_class(read_conf, "Audit", "log")(config, startdate) if not self.read_module.is_readable: log.warning(u"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.".format(self.read_module))
{"golden_diff": "diff --git a/privacyidea/lib/auditmodules/containeraudit.py b/privacyidea/lib/auditmodules/containeraudit.py\n--- a/privacyidea/lib/auditmodules/containeraudit.py\n+++ b/privacyidea/lib/auditmodules/containeraudit.py\n@@ -46,14 +46,15 @@\n to a list of audit modules.\n \"\"\"\n \n- def __init__(self, config=None):\n- super(Audit, self).__init__(config)\n+ def __init__(self, config=None, startdate=None):\n+ super(Audit, self).__init__(config, startdate)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n- self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config) for audit_module in write_conf]\n- self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config)\n+ self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config, startdate)\n+ for audit_module in write_conf]\n+ self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config, startdate)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n", "issue": "Container audit fails in 3.5\nThe container audit will fail in version 3.5 due to a missing parameter in the constructor.\r\n\r\nhttps://community.privacyidea.org/t/logging-error-after-update-to-3-5/1811/2\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2019-11-07 Cornelius K\u00f6lbel <[email protected]>\n# initial code for writing audit information to a file\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n#\n__doc__ = \"\"\"The Container Audit Module allows to write audit information to several different\naudit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the \nLogger Audit Module. This way audit information can be saved in the SQL database and at the same time\nbe passed to a file or external services via the Python logging facility. \n\nThe Container Audit Module is configured like this:\n\n PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit'\n PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit']\n PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit'\n\nYou also have to provide the configuration parameters for the referenced audit modules.\n\n\"\"\"\n\nimport logging\nfrom privacyidea.lib.auditmodules.base import (Audit as AuditBase)\nfrom privacyidea.lib.utils import get_module_class\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Audit(AuditBase):\n \"\"\"\n This is the ContainerAudit module, which writes the audit entries\n to a list of audit modules.\n \"\"\"\n\n def __init__(self, config=None):\n super(Audit, self).__init__(config)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config) for audit_module in write_conf]\n self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n\n @property\n def has_data(self):\n return any([x.has_data for x in self.write_modules])\n\n def log(self, param):\n \"\"\"\n Call the log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.log(param)\n\n def add_to_log(self, param, add_with_comma=False):\n \"\"\"\n Call the add_to_log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_to_log(param, add_with_comma)\n\n def add_policy(self, policyname):\n \"\"\"\n Call the add_policy method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_policy(policyname)\n\n def search(self, search_dict, page_size=15, page=1, sortorder=\"asc\",\n timelimit=None):\n \"\"\"\n Call the search method for the one readable module\n \"\"\"\n return self.read_module.search(search_dict, page_size=page_size, page=page,\n sortorder=sortorder, timelimit=timelimit)\n\n def get_count(self, search_dict, timedelta=None, success=None):\n \"\"\"\n Call the count method for the one readable module\n \"\"\"\n return self.read_module.get_count(search_dict, timedelta=timedelta, success=success)\n\n def csv_generator(self, param=None, user=None, timelimit=None):\n \"\"\"\n Call the csv_generator method for the one readable module\n \"\"\"\n return self.read_module.csv_generator(param=param, user=user,\n timelimit=timelimit)\n\n def get_total(self, param, AND=True, display_error=True, timelimit=None):\n \"\"\"\n Call the total method for the one readable module\n \"\"\"\n return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit)\n\n def finalize_log(self):\n \"\"\"\n Call the finalize method of all writeable audit modules\n \"\"\"\n for module in self.write_modules:\n module.finalize_log()\n", "path": "privacyidea/lib/auditmodules/containeraudit.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# 2019-11-07 Cornelius K\u00f6lbel <[email protected]>\n# initial code for writing audit information to a file\n#\n# This code is free software; you can redistribute it and/or\n# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE\n# License as published by the Free Software Foundation; either\n# version 3 of the License, or any later version.\n#\n# This code is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU AFFERO GENERAL PUBLIC LICENSE for more details.\n#\n# You should have received a copy of the GNU Affero General Public\n# License along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n#\n__doc__ = \"\"\"The Container Audit Module allows to write audit information to several different\naudit modules at the same time. E.g. it can write audit information to the SQL Audit Module and to the \nLogger Audit Module. This way audit information can be saved in the SQL database and at the same time\nbe passed to a file or external services via the Python logging facility. \n\nThe Container Audit Module is configured like this:\n\n PI_AUDIT_MODULE = 'privacyidea.lib.auditmodules.containeraudit'\n PI_AUDIT_CONTAINER_WRITE = ['privacyidea.lib.auditmodules.sqlaudit','privacyidea.lib.auditmodules.loggeraudit']\n PI_AUDIT_CONTAINER_READ = 'privacyidea.lib.auditmodules.sqlaudit'\n\nYou also have to provide the configuration parameters for the referenced audit modules.\n\n\"\"\"\n\nimport logging\nfrom privacyidea.lib.auditmodules.base import (Audit as AuditBase)\nfrom privacyidea.lib.utils import get_module_class\n\n\nlog = logging.getLogger(__name__)\n\n\nclass Audit(AuditBase):\n \"\"\"\n This is the ContainerAudit module, which writes the audit entries\n to a list of audit modules.\n \"\"\"\n\n def __init__(self, config=None, startdate=None):\n super(Audit, self).__init__(config, startdate)\n self.name = \"containeraudit\"\n write_conf = self.config.get('PI_AUDIT_CONTAINER_WRITE')\n read_conf = self.config.get('PI_AUDIT_CONTAINER_READ')\n # Initialize all modules\n self.write_modules = [get_module_class(audit_module, \"Audit\", \"log\")(config, startdate)\n for audit_module in write_conf]\n self.read_module = get_module_class(read_conf, \"Audit\", \"log\")(config, startdate)\n if not self.read_module.is_readable:\n log.warning(u\"The specified PI_AUDIT_CONTAINER_READ {0!s} is not readable.\".format(self.read_module))\n\n @property\n def has_data(self):\n return any([x.has_data for x in self.write_modules])\n\n def log(self, param):\n \"\"\"\n Call the log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.log(param)\n\n def add_to_log(self, param, add_with_comma=False):\n \"\"\"\n Call the add_to_log method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_to_log(param, add_with_comma)\n\n def add_policy(self, policyname):\n \"\"\"\n Call the add_policy method for all writeable modules\n \"\"\"\n for module in self.write_modules:\n module.add_policy(policyname)\n\n def search(self, search_dict, page_size=15, page=1, sortorder=\"asc\",\n timelimit=None):\n \"\"\"\n Call the search method for the one readable module\n \"\"\"\n return self.read_module.search(search_dict, page_size=page_size, page=page,\n sortorder=sortorder, timelimit=timelimit)\n\n def get_count(self, search_dict, timedelta=None, success=None):\n \"\"\"\n Call the count method for the one readable module\n \"\"\"\n return self.read_module.get_count(search_dict, timedelta=timedelta, success=success)\n\n def csv_generator(self, param=None, user=None, timelimit=None):\n \"\"\"\n Call the csv_generator method for the one readable module\n \"\"\"\n return self.read_module.csv_generator(param=param, user=user,\n timelimit=timelimit)\n\n def get_total(self, param, AND=True, display_error=True, timelimit=None):\n \"\"\"\n Call the total method for the one readable module\n \"\"\"\n return self.read_module.get_total(param, AND=AND, display_error=display_error, timelimit=timelimit)\n\n def finalize_log(self):\n \"\"\"\n Call the finalize method of all writeable audit modules\n \"\"\"\n for module in self.write_modules:\n module.finalize_log()\n", "path": "privacyidea/lib/auditmodules/containeraudit.py"}]}
1,583
312
gh_patches_debug_47859
rasdani/github-patches
git_diff
saleor__saleor-903
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Server Error (500) when adding attribute Hi, First of all thanks for this excellent software, makes my life easier. I deployed it on heroku using the heroku elements (https://elements.heroku.com/buttons/mirumee/saleor). Then I tried to add an attribute but that resulted in Server Error (500) page. Is this expected behavior? Any settings that have to be changed? If yes then any way to do that on heroku? Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `saleor/product/models/utils.py` Content: ``` 1 from django.utils.encoding import smart_text 2 3 4 def get_attributes_display_map(obj, attributes): 5 display_map = {} 6 for attribute in attributes: 7 value = obj.attributes.get(smart_text(attribute.pk)) 8 if value: 9 choices = {smart_text(a.pk): a for a in attribute.values.all()} 10 choice_obj = choices.get(value) 11 if choice_obj: 12 display_map[attribute.pk] = choice_obj 13 else: 14 display_map[attribute.pk] = value_pk 15 return display_map 16 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/saleor/product/models/utils.py b/saleor/product/models/utils.py --- a/saleor/product/models/utils.py +++ b/saleor/product/models/utils.py @@ -11,5 +11,5 @@ if choice_obj: display_map[attribute.pk] = choice_obj else: - display_map[attribute.pk] = value_pk + display_map[attribute.pk] = value return display_map
{"golden_diff": "diff --git a/saleor/product/models/utils.py b/saleor/product/models/utils.py\n--- a/saleor/product/models/utils.py\n+++ b/saleor/product/models/utils.py\n@@ -11,5 +11,5 @@\n if choice_obj:\n display_map[attribute.pk] = choice_obj\n else:\n- display_map[attribute.pk] = value_pk\n+ display_map[attribute.pk] = value\n return display_map\n", "issue": "Server Error (500) when adding attribute\nHi,\r\n\r\nFirst of all thanks for this excellent software, makes my life easier.\r\n\r\nI deployed it on heroku using the heroku elements (https://elements.heroku.com/buttons/mirumee/saleor).\r\n\r\nThen I tried to add an attribute but that resulted in Server Error (500) page. Is this expected behavior? Any settings that have to be changed? If yes then any way to do that on heroku?\r\n\r\nThanks!\r\n\n", "before_files": [{"content": "from django.utils.encoding import smart_text\n\n\ndef get_attributes_display_map(obj, attributes):\n display_map = {}\n for attribute in attributes:\n value = obj.attributes.get(smart_text(attribute.pk))\n if value:\n choices = {smart_text(a.pk): a for a in attribute.values.all()}\n choice_obj = choices.get(value)\n if choice_obj:\n display_map[attribute.pk] = choice_obj\n else:\n display_map[attribute.pk] = value_pk\n return display_map\n", "path": "saleor/product/models/utils.py"}], "after_files": [{"content": "from django.utils.encoding import smart_text\n\n\ndef get_attributes_display_map(obj, attributes):\n display_map = {}\n for attribute in attributes:\n value = obj.attributes.get(smart_text(attribute.pk))\n if value:\n choices = {smart_text(a.pk): a for a in attribute.values.all()}\n choice_obj = choices.get(value)\n if choice_obj:\n display_map[attribute.pk] = choice_obj\n else:\n display_map[attribute.pk] = value\n return display_map\n", "path": "saleor/product/models/utils.py"}]}
493
99
gh_patches_debug_48735
rasdani/github-patches
git_diff
microsoft__torchgeo-309
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Open in Colab URL broken in 0.1.1 The latest 0.1.1 release broke the "Open in Colab" URL in our tutorials. Still trying to fix this. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 # Configuration file for the Sphinx documentation builder. 5 # 6 # This file only contains a selection of the most common options. For a full 7 # list see the documentation: 8 # https://www.sphinx-doc.org/en/master/usage/configuration.html 9 10 # -- Path setup -------------------------------------------------------------- 11 12 import os 13 import sys 14 15 import pytorch_sphinx_theme 16 17 # If extensions (or modules to document with autodoc) are in another directory, 18 # add these directories to sys.path here. If the directory is relative to the 19 # documentation root, use os.path.abspath to make it absolute, like shown here. 20 sys.path.insert(0, os.path.abspath("..")) 21 22 import torchgeo # noqa: E402 23 24 # -- Project information ----------------------------------------------------- 25 26 project = "torchgeo" 27 copyright = "2021, Microsoft Corporation" 28 author = torchgeo.__author__ 29 version = ".".join(torchgeo.__version__.split(".")[:2]) 30 release = torchgeo.__version__ 31 32 33 # -- General configuration --------------------------------------------------- 34 35 # Add any Sphinx extension module names here, as strings. They can be 36 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 37 # ones. 38 extensions = [ 39 "sphinx.ext.autodoc", 40 "sphinx.ext.intersphinx", 41 "sphinx.ext.napoleon", 42 "sphinx.ext.todo", 43 "sphinx.ext.viewcode", 44 "nbsphinx", 45 ] 46 47 # List of patterns, relative to source directory, that match files and 48 # directories to ignore when looking for source files. 49 # This pattern also affects html_static_path and html_extra_path. 50 exclude_patterns = ["_build"] 51 52 # Sphinx 3.0+ required for: 53 # autodoc_typehints = "description" 54 needs_sphinx = "3.0" 55 56 nitpicky = True 57 nitpick_ignore = [ 58 # https://github.com/sphinx-doc/sphinx/issues/8127 59 ("py:class", ".."), 60 # TODO: can't figure out why this isn't found 61 ("py:class", "LightningDataModule"), 62 # Undocumented class 63 ("py:class", "torchvision.models.resnet.ResNet"), 64 ] 65 66 67 # -- Options for HTML output ------------------------------------------------- 68 69 # The theme to use for HTML and HTML Help pages. See the documentation for 70 # a list of builtin themes. 71 html_theme = "pytorch_sphinx_theme" 72 html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] 73 74 # Theme options are theme-specific and customize the look and feel of a theme 75 # further. For a list of options available for each theme, see the 76 # documentation. 77 html_theme_options = { 78 "collapse_navigation": False, 79 "display_version": True, 80 "logo_only": True, 81 "pytorch_project": "docs", 82 "navigation_with_keys": True, 83 "analytics_id": "UA-209075005-1", 84 } 85 86 html_favicon = os.path.join("..", "logo", "favicon.ico") 87 88 html_static_path = ["_static"] 89 html_css_files = ["workaround.css"] 90 91 # -- Extension configuration ------------------------------------------------- 92 93 # sphinx.ext.autodoc 94 autodoc_default_options = { 95 "members": True, 96 "special-members": True, 97 "show-inheritance": True, 98 } 99 autodoc_member_order = "bysource" 100 autodoc_typehints = "description" 101 102 # sphinx.ext.intersphinx 103 intersphinx_mapping = { 104 "matplotlib": ("https://matplotlib.org/stable/", None), 105 "python": ("https://docs.python.org/3", None), 106 "pytorch-lightning": ("https://pytorch-lightning.readthedocs.io/en/latest/", None), 107 "rasterio": ("https://rasterio.readthedocs.io/en/latest/", None), 108 "rtree": ("https://rtree.readthedocs.io/en/latest/", None), 109 "torch": ("https://pytorch.org/docs/stable", None), 110 "torchvision": ("https://pytorch.org/vision/stable", None), 111 } 112 113 # nbsphinx 114 nbsphinx_execute = "never" 115 # TODO: branch/tag should change depending on which version of docs you look at 116 # TODO: width option of image directive is broken, see: 117 # https://github.com/pytorch/pytorch_sphinx_theme/issues/140 118 nbsphinx_prolog = """ 119 {% set colab = "https://colab.research.google.com" %} 120 {% set repo = "microsoft/torchgeo" %} 121 {% set branch = "main" %} 122 123 .. image:: {{ colab }}/assets/colab-badge.svg 124 :class: colabbadge 125 :alt: Open in Colab 126 :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb 127 """ 128 129 # Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme 130 # See more information here https://github.com/spatialaudio/nbsphinx/issues/599 131 # NOTE: This will likely break nbsphinx widgets 132 nbsphinx_requirejs_path = "" 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -118,7 +118,11 @@ nbsphinx_prolog = """ {% set colab = "https://colab.research.google.com" %} {% set repo = "microsoft/torchgeo" %} -{% set branch = "main" %} +{% if "dev" in env.config.release %} + {% set branch = "main" %} +{% else %} + {% set branch = "releases/v" ~ env.config.version %} +{% endif %} .. image:: {{ colab }}/assets/colab-badge.svg :class: colabbadge
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -118,7 +118,11 @@\n nbsphinx_prolog = \"\"\"\n {% set colab = \"https://colab.research.google.com\" %}\n {% set repo = \"microsoft/torchgeo\" %}\n-{% set branch = \"main\" %}\n+{% if \"dev\" in env.config.release %}\n+ {% set branch = \"main\" %}\n+{% else %}\n+ {% set branch = \"releases/v\" ~ env.config.version %}\n+{% endif %}\n \n .. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n", "issue": "Open in Colab URL broken in 0.1.1\nThe latest 0.1.1 release broke the \"Open in Colab\" URL in our tutorials. Still trying to fix this.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% set branch = \"main\" %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\nimport os\nimport sys\n\nimport pytorch_sphinx_theme\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\nimport torchgeo # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"torchgeo\"\ncopyright = \"2021, Microsoft Corporation\"\nauthor = torchgeo.__author__\nversion = \".\".join(torchgeo.__version__.split(\".\")[:2])\nrelease = torchgeo.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.todo\",\n \"sphinx.ext.viewcode\",\n \"nbsphinx\",\n]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\"]\n\n# Sphinx 3.0+ required for:\n# autodoc_typehints = \"description\"\nneeds_sphinx = \"3.0\"\n\nnitpicky = True\nnitpick_ignore = [\n # https://github.com/sphinx-doc/sphinx/issues/8127\n (\"py:class\", \"..\"),\n # TODO: can't figure out why this isn't found\n (\"py:class\", \"LightningDataModule\"),\n # Undocumented class\n (\"py:class\", \"torchvision.models.resnet.ResNet\"),\n]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = \"pytorch_sphinx_theme\"\nhtml_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n \"collapse_navigation\": False,\n \"display_version\": True,\n \"logo_only\": True,\n \"pytorch_project\": \"docs\",\n \"navigation_with_keys\": True,\n \"analytics_id\": \"UA-209075005-1\",\n}\n\nhtml_favicon = os.path.join(\"..\", \"logo\", \"favicon.ico\")\n\nhtml_static_path = [\"_static\"]\nhtml_css_files = [\"workaround.css\"]\n\n# -- Extension configuration -------------------------------------------------\n\n# sphinx.ext.autodoc\nautodoc_default_options = {\n \"members\": True,\n \"special-members\": True,\n \"show-inheritance\": True,\n}\nautodoc_member_order = \"bysource\"\nautodoc_typehints = \"description\"\n\n# sphinx.ext.intersphinx\nintersphinx_mapping = {\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"python\": (\"https://docs.python.org/3\", None),\n \"pytorch-lightning\": (\"https://pytorch-lightning.readthedocs.io/en/latest/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/latest/\", None),\n \"rtree\": (\"https://rtree.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable\", None),\n}\n\n# nbsphinx\nnbsphinx_execute = \"never\"\n# TODO: branch/tag should change depending on which version of docs you look at\n# TODO: width option of image directive is broken, see:\n# https://github.com/pytorch/pytorch_sphinx_theme/issues/140\nnbsphinx_prolog = \"\"\"\n{% set colab = \"https://colab.research.google.com\" %}\n{% set repo = \"microsoft/torchgeo\" %}\n{% if \"dev\" in env.config.release %}\n {% set branch = \"main\" %}\n{% else %}\n {% set branch = \"releases/v\" ~ env.config.version %}\n{% endif %}\n\n.. image:: {{ colab }}/assets/colab-badge.svg\n :class: colabbadge\n :alt: Open in Colab\n :target: {{ colab }}/github/{{ repo }}/blob/{{ branch }}/docs/{{ env.docname }}.ipynb\n\"\"\"\n\n# Disables requirejs in nbsphinx to enable compatibility with the pytorch_sphinx_theme\n# See more information here https://github.com/spatialaudio/nbsphinx/issues/599\n# NOTE: This will likely break nbsphinx widgets\nnbsphinx_requirejs_path = \"\"\n", "path": "docs/conf.py"}]}
1,697
151
gh_patches_debug_18980
rasdani/github-patches
git_diff
mne-tools__mne-bids-320
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [joss] list supported python versions in pypi piggy-backing off of one of @TomDonoghue's comments: > Is there a minimum version of Python3 required? I guess that there is some minimum required sub-version of Py3 required, but it's not clear what that would be, and it might be useful to note that. you can add supported python versions for pypi, you can add these: ``` Programming Language :: Python :: 3.5 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 ``` [in your setup.py](https://github.com/mne-tools/mne-bids/blob/20fbb881afaab75206db3d8c2d4b226c4c3bc212/setup.py#L50) you can also set [`python_requires`](https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires) in `setup.py` to ensure people do not use the incorrect python version with your package. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #! /usr/bin/env python 2 """Setup MNE-BIDS.""" 3 import os 4 from setuptools import setup, find_packages 5 6 # get the version 7 version = None 8 with open(os.path.join('mne_bids', '__init__.py'), 'r') as fid: 9 for line in (line.strip() for line in fid): 10 if line.startswith('__version__'): 11 version = line.split('=')[1].strip().strip('\'') 12 break 13 if version is None: 14 raise RuntimeError('Could not determine version') 15 16 17 descr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS ' 18 'specification and facilitating their analysis with MNE-Python') 19 20 DISTNAME = 'mne-bids' 21 DESCRIPTION = descr 22 MAINTAINER = 'Mainak Jas' 23 MAINTAINER_EMAIL = '[email protected]' 24 URL = 'https://mne.tools/mne-bids/' 25 LICENSE = 'BSD (3-clause)' 26 DOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git' 27 VERSION = version 28 29 if __name__ == "__main__": 30 setup(name=DISTNAME, 31 maintainer=MAINTAINER, 32 maintainer_email=MAINTAINER_EMAIL, 33 description=DESCRIPTION, 34 license=LICENSE, 35 url=URL, 36 version=VERSION, 37 download_url=DOWNLOAD_URL, 38 long_description=open('README.rst').read(), 39 long_description_content_type='text/x-rst', 40 classifiers=[ 41 'Intended Audience :: Science/Research', 42 'Intended Audience :: Developers', 43 'License :: OSI Approved', 44 'Programming Language :: Python', 45 'Topic :: Software Development', 46 'Topic :: Scientific/Engineering', 47 'Operating System :: Microsoft :: Windows', 48 'Operating System :: POSIX', 49 'Operating System :: Unix', 50 'Operating System :: MacOS', 51 ], 52 platforms='any', 53 packages=find_packages(), 54 entry_points={'console_scripts': [ 55 'mne_bids = mne_bids.commands.run:main', 56 ]}, 57 project_urls={ 58 'Documentation': 'https://mne.tools/mne-bids', 59 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues', 60 'Source': 'https://github.com/mne-tools/mne-bids', 61 }, 62 ) 63 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -37,6 +37,7 @@ download_url=DOWNLOAD_URL, long_description=open('README.rst').read(), long_description_content_type='text/x-rst', + python_requires='~=3.5', classifiers=[ 'Intended Audience :: Science/Research', 'Intended Audience :: Developers', @@ -48,6 +49,9 @@ 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', + 'Programming Language :: Python :: 3.5', + 'Programming Language :: Python :: 3.6', + 'Programming Language :: Python :: 3.7', ], platforms='any', packages=find_packages(),
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,6 +37,7 @@\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n+ python_requires='~=3.5',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n@@ -48,6 +49,9 @@\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n+ 'Programming Language :: Python :: 3.5',\n+ 'Programming Language :: Python :: 3.6',\n+ 'Programming Language :: Python :: 3.7',\n ],\n platforms='any',\n packages=find_packages(),\n", "issue": "[joss] list supported python versions in pypi\npiggy-backing off of one of @TomDonoghue's comments:\r\n> Is there a minimum version of Python3 required? I guess that there is some minimum required sub-version of Py3 required, but it's not clear what that would be, and it might be useful to note that.\r\n\r\nyou can add supported python versions for pypi, you can add these:\r\n```\r\nProgramming Language :: Python :: 3.5\r\nProgramming Language :: Python :: 3.6\r\nProgramming Language :: Python :: 3.7\r\n```\r\n[in your setup.py](https://github.com/mne-tools/mne-bids/blob/20fbb881afaab75206db3d8c2d4b226c4c3bc212/setup.py#L50)\r\nyou can also set [`python_requires`](https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires) in `setup.py` to ensure people do not use the incorrect python version with your package.\n", "before_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '\n 'specification and facilitating their analysis with MNE-Python')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne.tools/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n ],\n platforms='any',\n packages=find_packages(),\n entry_points={'console_scripts': [\n 'mne_bids = mne_bids.commands.run:main',\n ]},\n project_urls={\n 'Documentation': 'https://mne.tools/mne-bids',\n 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',\n 'Source': 'https://github.com/mne-tools/mne-bids',\n },\n )\n", "path": "setup.py"}], "after_files": [{"content": "#! /usr/bin/env python\n\"\"\"Setup MNE-BIDS.\"\"\"\nimport os\nfrom setuptools import setup, find_packages\n\n# get the version\nversion = None\nwith open(os.path.join('mne_bids', '__init__.py'), 'r') as fid:\n for line in (line.strip() for line in fid):\n if line.startswith('__version__'):\n version = line.split('=')[1].strip().strip('\\'')\n break\nif version is None:\n raise RuntimeError('Could not determine version')\n\n\ndescr = ('MNE-BIDS: Organizing MEG, EEG, and iEEG data according to the BIDS '\n 'specification and facilitating their analysis with MNE-Python')\n\nDISTNAME = 'mne-bids'\nDESCRIPTION = descr\nMAINTAINER = 'Mainak Jas'\nMAINTAINER_EMAIL = '[email protected]'\nURL = 'https://mne.tools/mne-bids/'\nLICENSE = 'BSD (3-clause)'\nDOWNLOAD_URL = 'https://github.com/mne-tools/mne-bids.git'\nVERSION = version\n\nif __name__ == \"__main__\":\n setup(name=DISTNAME,\n maintainer=MAINTAINER,\n maintainer_email=MAINTAINER_EMAIL,\n description=DESCRIPTION,\n license=LICENSE,\n url=URL,\n version=VERSION,\n download_url=DOWNLOAD_URL,\n long_description=open('README.rst').read(),\n long_description_content_type='text/x-rst',\n python_requires='~=3.5',\n classifiers=[\n 'Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n ],\n platforms='any',\n packages=find_packages(),\n entry_points={'console_scripts': [\n 'mne_bids = mne_bids.commands.run:main',\n ]},\n project_urls={\n 'Documentation': 'https://mne.tools/mne-bids',\n 'Bug Reports': 'https://github.com/mne-tools/mne-bids/issues',\n 'Source': 'https://github.com/mne-tools/mne-bids',\n },\n )\n", "path": "setup.py"}]}
1,093
176
gh_patches_debug_25171
rasdani/github-patches
git_diff
fidals__shopelectro-778
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cart issues on prod This is PO comments: ----- заходишь на сайт, переходишь в товар. добавляешь его в корзину. уходишь на другой товар, добавляешь его и переходишь. в корзину. ..., а его там нет --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/urls.py` Content: ``` 1 from datetime import timedelta 2 from collections import OrderedDict 3 4 from django.conf import settings 5 from django.conf.urls import url, include 6 from django.conf.urls.static import static 7 from django.contrib.sitemaps.views import sitemap 8 from django.views.decorators.cache import cache_page 9 10 from pages.views import RobotsView, SitemapPage 11 from pages.urls import custom_page_url 12 13 from shopelectro import sitemaps, views 14 from shopelectro.admin import se_admin 15 16 17 def cached_time(*args, **kwargs) -> int: 18 """Return value of time for caching in seconds.""" 19 return int(timedelta(*args, **kwargs).total_seconds()) 20 21 22 # Orders sitemaps instances 23 sitemaps = OrderedDict([ 24 ('index', sitemaps.IndexSitemap), 25 ('category', sitemaps.CategorySitemap), 26 ('category-with-tags', sitemaps.CategoryWithTagsSitemap), 27 ('products', sitemaps.ProductSitemap), 28 ('site', sitemaps.PagesSitemap) 29 ]) 30 31 # disable cache 32 if settings.DEBUG: 33 def cache_page(arg): # Ignore PyFlakesBear 34 if callable(arg): 35 return arg 36 return cache_page 37 38 cached_60d = cache_page(cached_time(days=60)) 39 cached_2h = cache_page(cached_time(hours=2)) 40 41 admin_urls = [ 42 url(r'^', se_admin.urls), 43 url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'), 44 url(r'^get-tree-items/$', views.Tree.as_view()), 45 url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()), 46 url(r'^table-editor-api/$', views.TableEditorAPI.as_view()), 47 url(r'^select2/', include('django_select2.urls')), 48 ] 49 50 catalog_urls = [ 51 # "category" group 52 url(r'^categories/(?P<slug>[\w-]+)/$', 53 cached_2h(views.CategoryPage.as_view()), name='category'), 54 url(r'^categories/(?P<slug>[\w-]+)/tags/(?P<tags>[\w_-]+)/$', 55 cached_2h(views.CategoryPage.as_view()), name='category'), 56 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/$', 57 views.CategoryPage.as_view(), name='category'), 58 url(r'^categories/(?P<slug>[\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$', 59 views.CategoryPage.as_view(), name='category'), 60 # "load more" group 61 url(r'categories/(?P<slug>[\w-]+)/load-more/' 62 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$', 63 views.load_more, name='load_more'), 64 url(r'categories/(?P<slug>[\w-]+)/load-more/' 65 r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\w_-]+)/$', 66 views.load_more, name='load_more'), 67 # rest of urls 68 url(r'^no-images/$', views.ProductsWithoutImages.as_view(), 69 name='products_without_images'), 70 url(r'^no-text/$', views.ProductsWithoutText.as_view(), 71 name='products_without_text'), 72 url(r'^products/(?P<product_vendor_code>[0-9]+)/$', 73 views.ProductPage.as_view(), name='product'), 74 ] 75 76 service_urls = [ 77 url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'), 78 url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'), 79 url(r'^ya-feedback/redirect/$', 80 views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'), 81 url(r'^ya-feedback/request/$', 82 views.ya_feedback_request, name='ya_feedback_request'), 83 ] 84 85 search_urls = [ 86 url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'), 87 ] 88 89 ecommerce_urls = [ 90 url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'), 91 url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'), 92 url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'), 93 url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'), 94 url(r'^order-call/$', views.order_call), 95 url(r'^one-click-buy/$', views.one_click_buy), 96 url(r'^yandex-order/$', views.YandexOrder.as_view()), 97 url(r'', include('ecommerce.urls')), 98 ] 99 100 custom_pages = [ 101 # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work 102 custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'), 103 custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view()), 104 custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()), 105 custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())), 106 custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()), 107 custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()), 108 custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()), 109 ] 110 111 urlpatterns = [ 112 url('', include(custom_pages)), 113 url(r'^admin/', include(admin_urls)), 114 url(r'^catalog/', include(catalog_urls)), 115 url(r'^pages/', include('pages.urls')), 116 url(r'^save-feedback/$', views.save_feedback), 117 url(r'^delete-feedback/$', views.delete_feedback), 118 url(r'^set-view-type/$', views.set_view_type, name='set_view_type'), 119 url(r'^shop/', include(ecommerce_urls)), 120 url(r'^search/', include(search_urls)), 121 url(r'^service/', include(service_urls)), 122 url(r'^sitemap\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'), 123 ] 124 125 if settings.DEBUG: 126 import debug_toolbar 127 128 urlpatterns += [ 129 url(r'^__debug__/', include(debug_toolbar.urls)), 130 *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT), 131 *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT), 132 ] 133 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/urls.py b/shopelectro/urls.py --- a/shopelectro/urls.py +++ b/shopelectro/urls.py @@ -5,7 +5,7 @@ from django.conf.urls import url, include from django.conf.urls.static import static from django.contrib.sitemaps.views import sitemap -from django.views.decorators.cache import cache_page +from django.views.decorators.cache import cache_page, never_cache from pages.views import RobotsView, SitemapPage from pages.urls import custom_page_url @@ -103,9 +103,10 @@ custom_page_url(r'^(?P<page>robots\.txt)$', RobotsView.as_view()), custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()), custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())), - custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()), - custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()), custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()), + # these pages should show only actual state + custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())), + custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())), ] urlpatterns = [
{"golden_diff": "diff --git a/shopelectro/urls.py b/shopelectro/urls.py\n--- a/shopelectro/urls.py\n+++ b/shopelectro/urls.py\n@@ -5,7 +5,7 @@\n from django.conf.urls import url, include\n from django.conf.urls.static import static\n from django.contrib.sitemaps.views import sitemap\n-from django.views.decorators.cache import cache_page\n+from django.views.decorators.cache import cache_page, never_cache\n \n from pages.views import RobotsView, SitemapPage\n from pages.urls import custom_page_url\n@@ -103,9 +103,10 @@\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n- custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n- custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n+ # these pages should show only actual state\n+ custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),\n+ custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),\n ]\n \n urlpatterns = [\n", "issue": "Cart issues on prod\nThis is PO comments:\r\n\r\n-----\r\n\r\n\u0437\u0430\u0445\u043e\u0434\u0438\u0448\u044c \u043d\u0430 \u0441\u0430\u0439\u0442, \u043f\u0435\u0440\u0435\u0445\u043e\u0434\u0438\u0448\u044c \u0432 \u0442\u043e\u0432\u0430\u0440. \u0434\u043e\u0431\u0430\u0432\u043b\u044f\u0435\u0448\u044c \u0435\u0433\u043e \u0432 \u043a\u043e\u0440\u0437\u0438\u043d\u0443. \u0443\u0445\u043e\u0434\u0438\u0448\u044c \u043d\u0430 \u0434\u0440\u0443\u0433\u043e\u0439 \u0442\u043e\u0432\u0430\u0440, \u0434\u043e\u0431\u0430\u0432\u043b\u044f\u0435\u0448\u044c \u0435\u0433\u043e \u0438 \u043f\u0435\u0440\u0435\u0445\u043e\u0434\u0438\u0448\u044c. \u0432 \u043a\u043e\u0440\u0437\u0438\u043d\u0443. ..., \u0430 \u0435\u0433\u043e \u0442\u0430\u043c \u043d\u0435\u0442\r\n\n", "before_files": [{"content": "from datetime import timedelta\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page\n\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, views\nfrom shopelectro.admin import se_admin\n\n\ndef cached_time(*args, **kwargs) -> int:\n \"\"\"Return value of time for caching in seconds.\"\"\"\n return int(timedelta(*args, **kwargs).total_seconds())\n\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(cached_time(days=60))\ncached_2h = cache_page(cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w_-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work\n custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n custom_page_url(r'^shop/(?P<page>order)/$', views.OrderPage.as_view()),\n custom_page_url(r'^shop/(?P<page>order-success)/$', views.OrderSuccess.as_view()),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}], "after_files": [{"content": "from datetime import timedelta\nfrom collections import OrderedDict\n\nfrom django.conf import settings\nfrom django.conf.urls import url, include\nfrom django.conf.urls.static import static\nfrom django.contrib.sitemaps.views import sitemap\nfrom django.views.decorators.cache import cache_page, never_cache\n\nfrom pages.views import RobotsView, SitemapPage\nfrom pages.urls import custom_page_url\n\nfrom shopelectro import sitemaps, views\nfrom shopelectro.admin import se_admin\n\n\ndef cached_time(*args, **kwargs) -> int:\n \"\"\"Return value of time for caching in seconds.\"\"\"\n return int(timedelta(*args, **kwargs).total_seconds())\n\n\n# Orders sitemaps instances\nsitemaps = OrderedDict([\n ('index', sitemaps.IndexSitemap),\n ('category', sitemaps.CategorySitemap),\n ('category-with-tags', sitemaps.CategoryWithTagsSitemap),\n ('products', sitemaps.ProductSitemap),\n ('site', sitemaps.PagesSitemap)\n])\n\n# disable cache\nif settings.DEBUG:\n def cache_page(arg): # Ignore PyFlakesBear\n if callable(arg):\n return arg\n return cache_page\n\ncached_60d = cache_page(cached_time(days=60))\ncached_2h = cache_page(cached_time(hours=2))\n\nadmin_urls = [\n url(r'^', se_admin.urls),\n url(r'^autocomplete/$', views.AdminAutocomplete.as_view(), name='admin_autocomplete'),\n url(r'^get-tree-items/$', views.Tree.as_view()),\n url(r'^redirect-to-product/$', views.RedirectToProduct.as_view()),\n url(r'^table-editor-api/$', views.TableEditorAPI.as_view()),\n url(r'^select2/', include('django_select2.urls')),\n]\n\ncatalog_urls = [\n # \"category\" group\n url(r'^categories/(?P<slug>[\\w-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/tags/(?P<tags>[\\w_-]+)/$',\n cached_2h(views.CategoryPage.as_view()), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/$',\n views.CategoryPage.as_view(), name='category'),\n url(r'^categories/(?P<slug>[\\w-]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.CategoryPage.as_view(), name='category'),\n # \"load more\" group\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/$',\n views.load_more, name='load_more'),\n url(r'categories/(?P<slug>[\\w-]+)/load-more/'\n r'(?P<offset>[0-9]+)/(?P<sorting>[0-9]*)/tags/(?P<tags>[\\w_-]+)/$',\n views.load_more, name='load_more'),\n # rest of urls\n url(r'^no-images/$', views.ProductsWithoutImages.as_view(),\n name='products_without_images'),\n url(r'^no-text/$', views.ProductsWithoutText.as_view(),\n name='products_without_text'),\n url(r'^products/(?P<product_vendor_code>[0-9]+)/$',\n views.ProductPage.as_view(), name='product'),\n]\n\nservice_urls = [\n url(r'^ya-kassa/aviso/$', views.yandex_aviso, name='yandex_aviso'),\n url(r'^ya-kassa/check/$', views.yandex_check, name='yandex_check'),\n url(r'^ya-feedback/redirect/$',\n views.ya_feedback_with_redirect, name='ya_feedback_with_redirect'),\n url(r'^ya-feedback/request/$',\n views.ya_feedback_request, name='ya_feedback_request'),\n]\n\nsearch_urls = [\n url(r'^autocomplete/$', views.Autocomplete.as_view(), name='autocomplete'),\n]\n\necommerce_urls = [\n url(r'^cart-add/$', views.AddToCart.as_view(), name='cart_add'),\n url(r'^cart-change/$', views.ChangeCount.as_view(), name='cart_set_count'),\n url(r'^cart-flush/$', views.FlushCart.as_view(), name='cart_flush'),\n url(r'^cart-remove/$', views.RemoveFromCart.as_view(), name='cart_remove'),\n url(r'^order-call/$', views.order_call),\n url(r'^one-click-buy/$', views.one_click_buy),\n url(r'^yandex-order/$', views.YandexOrder.as_view()),\n url(r'', include('ecommerce.urls')),\n]\n\ncustom_pages = [\n # can't use just `r'^(?P<page>)$'` with no args to views, because reverse don't work\n custom_page_url(r'^$', cached_2h(views.IndexPage.as_view()), {'page': ''}, name='index'),\n custom_page_url(r'^(?P<page>robots\\.txt)$', RobotsView.as_view()),\n custom_page_url(r'^(?P<page>search)/$', views.Search.as_view()),\n custom_page_url(r'^(?P<page>catalog)/$', cached_2h(views.CategoryTree.as_view())),\n custom_page_url(r'^(?P<page>sitemap)/$', SitemapPage.as_view()),\n # these pages should show only actual state\n custom_page_url(r'^shop/(?P<page>order)/$', never_cache(views.OrderPage.as_view())),\n custom_page_url(r'^shop/(?P<page>order-success)/$', never_cache(views.OrderSuccess.as_view())),\n]\n\nurlpatterns = [\n url('', include(custom_pages)),\n url(r'^admin/', include(admin_urls)),\n url(r'^catalog/', include(catalog_urls)),\n url(r'^pages/', include('pages.urls')),\n url(r'^save-feedback/$', views.save_feedback),\n url(r'^delete-feedback/$', views.delete_feedback),\n url(r'^set-view-type/$', views.set_view_type, name='set_view_type'),\n url(r'^shop/', include(ecommerce_urls)),\n url(r'^search/', include(search_urls)),\n url(r'^service/', include(service_urls)),\n url(r'^sitemap\\.xml$', cached_60d(sitemap), {'sitemaps': sitemaps}, name='sitemap'),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n\n urlpatterns += [\n url(r'^__debug__/', include(debug_toolbar.urls)),\n *static(settings.STATIC_URL, document_root=settings.STATIC_ROOT),\n *static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT),\n ]\n", "path": "shopelectro/urls.py"}]}
2,006
335
gh_patches_debug_31138
rasdani/github-patches
git_diff
learningequality__kolibri-11049
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Catch and handle EmptyResultSet error when trying to cache metadata labels EmptyResultSet Sentry Issue: [KOLIBRI-BACKEND-2E9](https://learningequality.sentry.io/issues/4005137733/?referrer=github_integration) ``` EmptyResultSet: (16 additional frame(s) were not displayed) ... File "django/db/models/sql/query.py", line 233, in __str__ sql, params = self.sql_with_params() File "django/db/models/sql/query.py", line 241, in sql_with_params return self.get_compiler(DEFAULT_DB_ALIAS).as_sql() File "django/db/models/sql/compiler.py", line 441, in as_sql where, w_params = self.compile(self.where) if self.where is not None else ("", []) File "django/db/models/sql/compiler.py", line 373, in compile sql, params = node.as_sql(self, self.connection) File "django/db/models/sql/where.py", line 97, in as_sql raise EmptyResultSet ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/core/content/utils/search.py` Content: ``` 1 """ 2 Avoiding direct model imports in here so that we can import these functions into places 3 that should not initiate the Django app registry. 4 """ 5 import hashlib 6 7 try: 8 from django.contrib.postgres.aggregates import BitOr 9 except ImportError: 10 BitOr = None 11 12 from django.db import connections 13 from django.db.models import Aggregate 14 from django.db.models import Case 15 from django.db.models import Value 16 from django.db.models import When 17 from django.db.models.fields import IntegerField 18 from le_utils.constants.labels.accessibility_categories import ( 19 ACCESSIBILITYCATEGORIESLIST, 20 ) 21 from le_utils.constants.labels.learning_activities import LEARNINGACTIVITIESLIST 22 from le_utils.constants.labels.levels import LEVELSLIST 23 from le_utils.constants.labels.needs import NEEDSLIST 24 from le_utils.constants.labels.subjects import SUBJECTSLIST 25 26 from kolibri.core.utils.cache import process_cache as cache 27 28 29 metadata_lookup = { 30 "learning_activities": LEARNINGACTIVITIESLIST, 31 "categories": SUBJECTSLIST, 32 "grade_levels": LEVELSLIST, 33 "accessibility_labels": ACCESSIBILITYCATEGORIESLIST, 34 "learner_needs": NEEDSLIST, 35 } 36 37 38 metadata_bitmasks = {} 39 40 bitmask_fieldnames = {} 41 42 43 for key, labels in metadata_lookup.items(): 44 bitmask_lookup = {} 45 i = 0 46 while labels[i : i + 64]: 47 bitmask_field_name = "{}_bitmask_{}".format(key, i) 48 bitmask_fieldnames[bitmask_field_name] = [] 49 for j, label in enumerate(labels): 50 info = { 51 "bitmask_field_name": bitmask_field_name, 52 "field_name": key, 53 "bits": 2 ** j, 54 "label": label, 55 } 56 bitmask_lookup[label] = info 57 bitmask_fieldnames[bitmask_field_name].append(info) 58 i += 64 59 metadata_bitmasks[key] = bitmask_lookup 60 61 62 def _get_available_languages(base_queryset): 63 from kolibri.core.content.models import Language 64 65 langs = Language.objects.filter( 66 id__in=base_queryset.exclude(lang=None) 67 .values_list("lang_id", flat=True) 68 .distinct() 69 ).values("id", "lang_name") 70 return list(langs) 71 72 73 def _get_available_channels(base_queryset): 74 from kolibri.core.content.models import ChannelMetadata 75 76 return list( 77 ChannelMetadata.objects.filter( 78 id__in=base_queryset.values_list("channel_id", flat=True).distinct() 79 ).values("id", "name") 80 ) 81 82 83 class SQLiteBitwiseORAggregate(Aggregate): 84 name = "BitwiseOR" 85 86 def __init__(self, expression, num_bits=None, **extra): 87 if not num_bits: 88 raise ValueError("num_bits must be a positive integer") 89 self.num_bits = num_bits 90 super(SQLiteBitwiseORAggregate, self).__init__( 91 expression, output_field=IntegerField(), **extra 92 ) 93 94 @property 95 def template(self): 96 return " + ".join( 97 "max(%(expressions)s&{})".format(2 ** i) for i in range(0, self.num_bits) 98 ) 99 100 101 def get_available_metadata_labels(base_queryset): 102 from kolibri.core.device.models import ContentCacheKey 103 104 content_cache_key = ContentCacheKey.get_cache_key() 105 cache_key = "search-labels:{}:{}".format( 106 content_cache_key, 107 hashlib.md5(str(base_queryset.query).encode("utf8")).hexdigest(), 108 ) 109 if cache_key not in cache: 110 base_queryset = base_queryset.order_by() 111 aggregates = {} 112 for field in bitmask_fieldnames: 113 field_agg = field + "_agg" 114 if connections[base_queryset.db].vendor == "sqlite" or BitOr is None: 115 aggregates[field_agg] = SQLiteBitwiseORAggregate( 116 field, num_bits=len(bitmask_fieldnames[field]) 117 ) 118 elif connections[base_queryset.db].vendor == "postgresql": 119 aggregates[field_agg] = BitOr(field) 120 output = {} 121 agg = base_queryset.aggregate(**aggregates) 122 for field, values in bitmask_fieldnames.items(): 123 bit_value = agg[field + "_agg"] 124 for value in values: 125 if value["field_name"] not in output: 126 output[value["field_name"]] = [] 127 if bit_value is not None and bit_value & value["bits"]: 128 output[value["field_name"]].append(value["label"]) 129 output["languages"] = _get_available_languages(base_queryset) 130 output["channels"] = _get_available_channels(base_queryset) 131 cache.set(cache_key, output, timeout=None) 132 return cache.get(cache_key) 133 134 135 def get_all_contentnode_label_metadata(): 136 from kolibri.core.content.models import ContentNode 137 138 return get_available_metadata_labels(ContentNode.objects.filter(available=True)) 139 140 141 def annotate_label_bitmasks(queryset): 142 update_statements = {} 143 for bitmask_fieldname, label_info in bitmask_fieldnames.items(): 144 update_statements[bitmask_fieldname] = sum( 145 Case( 146 When( 147 **{ 148 info["field_name"] + "__contains": info["label"], 149 "then": Value(info["bits"]), 150 } 151 ), 152 default=Value(0), 153 ) 154 for info in label_info 155 ) 156 queryset.update(**update_statements) 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/core/content/utils/search.py b/kolibri/core/content/utils/search.py --- a/kolibri/core/content/utils/search.py +++ b/kolibri/core/content/utils/search.py @@ -9,6 +9,7 @@ except ImportError: BitOr = None +from django.core.exceptions import EmptyResultSet from django.db import connections from django.db.models import Aggregate from django.db.models import Case @@ -39,6 +40,11 @@ bitmask_fieldnames = {} +empty_labels = { + "languages": [], + "channels": [], +} + for key, labels in metadata_lookup.items(): bitmask_lookup = {} @@ -57,6 +63,7 @@ bitmask_fieldnames[bitmask_field_name].append(info) i += 64 metadata_bitmasks[key] = bitmask_lookup + empty_labels[key] = [] def _get_available_languages(base_queryset): @@ -98,14 +105,17 @@ ) -def get_available_metadata_labels(base_queryset): +def get_available_metadata_labels(base_queryset): # noqa: C901 from kolibri.core.device.models import ContentCacheKey content_cache_key = ContentCacheKey.get_cache_key() - cache_key = "search-labels:{}:{}".format( - content_cache_key, - hashlib.md5(str(base_queryset.query).encode("utf8")).hexdigest(), - ) + try: + cache_key = "search-labels:{}:{}".format( + content_cache_key, + hashlib.md5(str(base_queryset.query).encode("utf8")).hexdigest(), + ) + except EmptyResultSet: + return empty_labels if cache_key not in cache: base_queryset = base_queryset.order_by() aggregates = {}
{"golden_diff": "diff --git a/kolibri/core/content/utils/search.py b/kolibri/core/content/utils/search.py\n--- a/kolibri/core/content/utils/search.py\n+++ b/kolibri/core/content/utils/search.py\n@@ -9,6 +9,7 @@\n except ImportError:\n BitOr = None\n \n+from django.core.exceptions import EmptyResultSet\n from django.db import connections\n from django.db.models import Aggregate\n from django.db.models import Case\n@@ -39,6 +40,11 @@\n \n bitmask_fieldnames = {}\n \n+empty_labels = {\n+ \"languages\": [],\n+ \"channels\": [],\n+}\n+\n \n for key, labels in metadata_lookup.items():\n bitmask_lookup = {}\n@@ -57,6 +63,7 @@\n bitmask_fieldnames[bitmask_field_name].append(info)\n i += 64\n metadata_bitmasks[key] = bitmask_lookup\n+ empty_labels[key] = []\n \n \n def _get_available_languages(base_queryset):\n@@ -98,14 +105,17 @@\n )\n \n \n-def get_available_metadata_labels(base_queryset):\n+def get_available_metadata_labels(base_queryset): # noqa: C901\n from kolibri.core.device.models import ContentCacheKey\n \n content_cache_key = ContentCacheKey.get_cache_key()\n- cache_key = \"search-labels:{}:{}\".format(\n- content_cache_key,\n- hashlib.md5(str(base_queryset.query).encode(\"utf8\")).hexdigest(),\n- )\n+ try:\n+ cache_key = \"search-labels:{}:{}\".format(\n+ content_cache_key,\n+ hashlib.md5(str(base_queryset.query).encode(\"utf8\")).hexdigest(),\n+ )\n+ except EmptyResultSet:\n+ return empty_labels\n if cache_key not in cache:\n base_queryset = base_queryset.order_by()\n aggregates = {}\n", "issue": "Catch and handle EmptyResultSet error when trying to cache metadata labels\nEmptyResultSet\n\nSentry Issue: [KOLIBRI-BACKEND-2E9](https://learningequality.sentry.io/issues/4005137733/?referrer=github_integration)\n\n```\nEmptyResultSet: \n(16 additional frame(s) were not displayed)\n...\n File \"django/db/models/sql/query.py\", line 233, in __str__\n sql, params = self.sql_with_params()\n File \"django/db/models/sql/query.py\", line 241, in sql_with_params\n return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()\n File \"django/db/models/sql/compiler.py\", line 441, in as_sql\n where, w_params = self.compile(self.where) if self.where is not None else (\"\", [])\n File \"django/db/models/sql/compiler.py\", line 373, in compile\n sql, params = node.as_sql(self, self.connection)\n File \"django/db/models/sql/where.py\", line 97, in as_sql\n raise EmptyResultSet\n```\n", "before_files": [{"content": "\"\"\"\nAvoiding direct model imports in here so that we can import these functions into places\nthat should not initiate the Django app registry.\n\"\"\"\nimport hashlib\n\ntry:\n from django.contrib.postgres.aggregates import BitOr\nexcept ImportError:\n BitOr = None\n\nfrom django.db import connections\nfrom django.db.models import Aggregate\nfrom django.db.models import Case\nfrom django.db.models import Value\nfrom django.db.models import When\nfrom django.db.models.fields import IntegerField\nfrom le_utils.constants.labels.accessibility_categories import (\n ACCESSIBILITYCATEGORIESLIST,\n)\nfrom le_utils.constants.labels.learning_activities import LEARNINGACTIVITIESLIST\nfrom le_utils.constants.labels.levels import LEVELSLIST\nfrom le_utils.constants.labels.needs import NEEDSLIST\nfrom le_utils.constants.labels.subjects import SUBJECTSLIST\n\nfrom kolibri.core.utils.cache import process_cache as cache\n\n\nmetadata_lookup = {\n \"learning_activities\": LEARNINGACTIVITIESLIST,\n \"categories\": SUBJECTSLIST,\n \"grade_levels\": LEVELSLIST,\n \"accessibility_labels\": ACCESSIBILITYCATEGORIESLIST,\n \"learner_needs\": NEEDSLIST,\n}\n\n\nmetadata_bitmasks = {}\n\nbitmask_fieldnames = {}\n\n\nfor key, labels in metadata_lookup.items():\n bitmask_lookup = {}\n i = 0\n while labels[i : i + 64]:\n bitmask_field_name = \"{}_bitmask_{}\".format(key, i)\n bitmask_fieldnames[bitmask_field_name] = []\n for j, label in enumerate(labels):\n info = {\n \"bitmask_field_name\": bitmask_field_name,\n \"field_name\": key,\n \"bits\": 2 ** j,\n \"label\": label,\n }\n bitmask_lookup[label] = info\n bitmask_fieldnames[bitmask_field_name].append(info)\n i += 64\n metadata_bitmasks[key] = bitmask_lookup\n\n\ndef _get_available_languages(base_queryset):\n from kolibri.core.content.models import Language\n\n langs = Language.objects.filter(\n id__in=base_queryset.exclude(lang=None)\n .values_list(\"lang_id\", flat=True)\n .distinct()\n ).values(\"id\", \"lang_name\")\n return list(langs)\n\n\ndef _get_available_channels(base_queryset):\n from kolibri.core.content.models import ChannelMetadata\n\n return list(\n ChannelMetadata.objects.filter(\n id__in=base_queryset.values_list(\"channel_id\", flat=True).distinct()\n ).values(\"id\", \"name\")\n )\n\n\nclass SQLiteBitwiseORAggregate(Aggregate):\n name = \"BitwiseOR\"\n\n def __init__(self, expression, num_bits=None, **extra):\n if not num_bits:\n raise ValueError(\"num_bits must be a positive integer\")\n self.num_bits = num_bits\n super(SQLiteBitwiseORAggregate, self).__init__(\n expression, output_field=IntegerField(), **extra\n )\n\n @property\n def template(self):\n return \" + \".join(\n \"max(%(expressions)s&{})\".format(2 ** i) for i in range(0, self.num_bits)\n )\n\n\ndef get_available_metadata_labels(base_queryset):\n from kolibri.core.device.models import ContentCacheKey\n\n content_cache_key = ContentCacheKey.get_cache_key()\n cache_key = \"search-labels:{}:{}\".format(\n content_cache_key,\n hashlib.md5(str(base_queryset.query).encode(\"utf8\")).hexdigest(),\n )\n if cache_key not in cache:\n base_queryset = base_queryset.order_by()\n aggregates = {}\n for field in bitmask_fieldnames:\n field_agg = field + \"_agg\"\n if connections[base_queryset.db].vendor == \"sqlite\" or BitOr is None:\n aggregates[field_agg] = SQLiteBitwiseORAggregate(\n field, num_bits=len(bitmask_fieldnames[field])\n )\n elif connections[base_queryset.db].vendor == \"postgresql\":\n aggregates[field_agg] = BitOr(field)\n output = {}\n agg = base_queryset.aggregate(**aggregates)\n for field, values in bitmask_fieldnames.items():\n bit_value = agg[field + \"_agg\"]\n for value in values:\n if value[\"field_name\"] not in output:\n output[value[\"field_name\"]] = []\n if bit_value is not None and bit_value & value[\"bits\"]:\n output[value[\"field_name\"]].append(value[\"label\"])\n output[\"languages\"] = _get_available_languages(base_queryset)\n output[\"channels\"] = _get_available_channels(base_queryset)\n cache.set(cache_key, output, timeout=None)\n return cache.get(cache_key)\n\n\ndef get_all_contentnode_label_metadata():\n from kolibri.core.content.models import ContentNode\n\n return get_available_metadata_labels(ContentNode.objects.filter(available=True))\n\n\ndef annotate_label_bitmasks(queryset):\n update_statements = {}\n for bitmask_fieldname, label_info in bitmask_fieldnames.items():\n update_statements[bitmask_fieldname] = sum(\n Case(\n When(\n **{\n info[\"field_name\"] + \"__contains\": info[\"label\"],\n \"then\": Value(info[\"bits\"]),\n }\n ),\n default=Value(0),\n )\n for info in label_info\n )\n queryset.update(**update_statements)\n", "path": "kolibri/core/content/utils/search.py"}], "after_files": [{"content": "\"\"\"\nAvoiding direct model imports in here so that we can import these functions into places\nthat should not initiate the Django app registry.\n\"\"\"\nimport hashlib\n\ntry:\n from django.contrib.postgres.aggregates import BitOr\nexcept ImportError:\n BitOr = None\n\nfrom django.core.exceptions import EmptyResultSet\nfrom django.db import connections\nfrom django.db.models import Aggregate\nfrom django.db.models import Case\nfrom django.db.models import Value\nfrom django.db.models import When\nfrom django.db.models.fields import IntegerField\nfrom le_utils.constants.labels.accessibility_categories import (\n ACCESSIBILITYCATEGORIESLIST,\n)\nfrom le_utils.constants.labels.learning_activities import LEARNINGACTIVITIESLIST\nfrom le_utils.constants.labels.levels import LEVELSLIST\nfrom le_utils.constants.labels.needs import NEEDSLIST\nfrom le_utils.constants.labels.subjects import SUBJECTSLIST\n\nfrom kolibri.core.utils.cache import process_cache as cache\n\n\nmetadata_lookup = {\n \"learning_activities\": LEARNINGACTIVITIESLIST,\n \"categories\": SUBJECTSLIST,\n \"grade_levels\": LEVELSLIST,\n \"accessibility_labels\": ACCESSIBILITYCATEGORIESLIST,\n \"learner_needs\": NEEDSLIST,\n}\n\n\nmetadata_bitmasks = {}\n\nbitmask_fieldnames = {}\n\nempty_labels = {\n \"languages\": [],\n \"channels\": [],\n}\n\n\nfor key, labels in metadata_lookup.items():\n bitmask_lookup = {}\n i = 0\n while labels[i : i + 64]:\n bitmask_field_name = \"{}_bitmask_{}\".format(key, i)\n bitmask_fieldnames[bitmask_field_name] = []\n for j, label in enumerate(labels):\n info = {\n \"bitmask_field_name\": bitmask_field_name,\n \"field_name\": key,\n \"bits\": 2 ** j,\n \"label\": label,\n }\n bitmask_lookup[label] = info\n bitmask_fieldnames[bitmask_field_name].append(info)\n i += 64\n metadata_bitmasks[key] = bitmask_lookup\n empty_labels[key] = []\n\n\ndef _get_available_languages(base_queryset):\n from kolibri.core.content.models import Language\n\n langs = Language.objects.filter(\n id__in=base_queryset.exclude(lang=None)\n .values_list(\"lang_id\", flat=True)\n .distinct()\n ).values(\"id\", \"lang_name\")\n return list(langs)\n\n\ndef _get_available_channels(base_queryset):\n from kolibri.core.content.models import ChannelMetadata\n\n return list(\n ChannelMetadata.objects.filter(\n id__in=base_queryset.values_list(\"channel_id\", flat=True).distinct()\n ).values(\"id\", \"name\")\n )\n\n\nclass SQLiteBitwiseORAggregate(Aggregate):\n name = \"BitwiseOR\"\n\n def __init__(self, expression, num_bits=None, **extra):\n if not num_bits:\n raise ValueError(\"num_bits must be a positive integer\")\n self.num_bits = num_bits\n super(SQLiteBitwiseORAggregate, self).__init__(\n expression, output_field=IntegerField(), **extra\n )\n\n @property\n def template(self):\n return \" + \".join(\n \"max(%(expressions)s&{})\".format(2 ** i) for i in range(0, self.num_bits)\n )\n\n\ndef get_available_metadata_labels(base_queryset): # noqa: C901\n from kolibri.core.device.models import ContentCacheKey\n\n content_cache_key = ContentCacheKey.get_cache_key()\n try:\n cache_key = \"search-labels:{}:{}\".format(\n content_cache_key,\n hashlib.md5(str(base_queryset.query).encode(\"utf8\")).hexdigest(),\n )\n except EmptyResultSet:\n return empty_labels\n if cache_key not in cache:\n base_queryset = base_queryset.order_by()\n aggregates = {}\n for field in bitmask_fieldnames:\n field_agg = field + \"_agg\"\n if connections[base_queryset.db].vendor == \"sqlite\" or BitOr is None:\n aggregates[field_agg] = SQLiteBitwiseORAggregate(\n field, num_bits=len(bitmask_fieldnames[field])\n )\n elif connections[base_queryset.db].vendor == \"postgresql\":\n aggregates[field_agg] = BitOr(field)\n output = {}\n agg = base_queryset.aggregate(**aggregates)\n for field, values in bitmask_fieldnames.items():\n bit_value = agg[field + \"_agg\"]\n for value in values:\n if value[\"field_name\"] not in output:\n output[value[\"field_name\"]] = []\n if bit_value is not None and bit_value & value[\"bits\"]:\n output[value[\"field_name\"]].append(value[\"label\"])\n output[\"languages\"] = _get_available_languages(base_queryset)\n output[\"channels\"] = _get_available_channels(base_queryset)\n cache.set(cache_key, output, timeout=None)\n return cache.get(cache_key)\n\n\ndef get_all_contentnode_label_metadata():\n from kolibri.core.content.models import ContentNode\n\n return get_available_metadata_labels(ContentNode.objects.filter(available=True))\n\n\ndef annotate_label_bitmasks(queryset):\n update_statements = {}\n for bitmask_fieldname, label_info in bitmask_fieldnames.items():\n update_statements[bitmask_fieldname] = sum(\n Case(\n When(\n **{\n info[\"field_name\"] + \"__contains\": info[\"label\"],\n \"then\": Value(info[\"bits\"]),\n }\n ),\n default=Value(0),\n )\n for info in label_info\n )\n queryset.update(**update_statements)\n", "path": "kolibri/core/content/utils/search.py"}]}
1,983
392
gh_patches_debug_18943
rasdani/github-patches
git_diff
ansible__ansible-lint-996
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- E208 is too aggressive ##### Summary When E208 is triggered, it should be sure that a file is being created. There are modules which only optionally create the file and, when not used in that form, then they should not require a mode be set. Such an example are ini_file, lineinfile, or blockinfile. These modules are frequently used in their default mode with `create: false`. Someone should not be required to set `mode: "0644"` or such in this situation. ##### Issue Type - Bug Report ##### Ansible and Ansible Lint details <!--- Paste verbatim output between tripple backticks --> ```console (paste below) ansible --version ansible 2.9.12 config file = /home/greg/.ansible.cfg configured module search path = ['/home/greg/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules'] ansible python module location = /home/greg/src/ansible_collections/devroles/system/.tox/lint_all/lib/python3.8/site-packages/ansible executable location = .tox/lint_all/bin/ansible python version = 3.8.5 (default, Aug 12 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)] ansible-lint --version ansible-lint 4.3.1 ``` - ansible installation method: one of source, pip, OS package pip - ansible-lint installation method: one of source, pip, OS package pip ##### OS / ENVIRONMENT Fedora 32 ##### STEPS TO REPRODUCE ```yaml - name: should not produce E208 lineinfile: line: "# some content here" file: "{{ ansible_user_dir }}/.bashrc" - name: SHOULD produce E208 lineinfile: line: "# more content file: "{{ ansible_user_dir }}/.bash_profile" create: true ``` ##### Desired Behaviour ansible-lint should not error when it encounters a missing "mode" parameter for these modules, when `create: false`. Note that `create: false` is the default behavior of those modules ##### Actual Behaviour ansible-lint throws E208 on both of the above steps --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/ansiblelint/rules/MissingFilePermissionsRule.py` Content: ``` 1 # Copyright (c) 2020 Sorin Sbarnea <[email protected]> 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to deal 5 # in the Software without restriction, including without limitation the rights 6 # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 # copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 # THE SOFTWARE. 20 from ansiblelint.rules import AnsibleLintRule 21 22 23 class MissingFilePermissionsRule(AnsibleLintRule): 24 id = "208" 25 shortdesc = 'File permissions not mentioned' 26 description = ( 27 "Missing mode parameter can cause unexpected file permissions based " 28 "on version of Ansible being used. Be explicit, or if you still " 29 "want the default behavior you can use ``mode: preserve`` to avoid " 30 "hitting this rule. See " 31 "https://github.com/ansible/ansible/issues/71200" 32 ) 33 severity = 'VERY_HIGH' 34 tags = ['unpredictability'] 35 version_added = 'v4.3.0' 36 37 _modules = ( 38 'copy', 39 'file', 40 'ini_file', 41 'lineinfile', 42 'replace', 43 'template', 44 'unarchive', 45 ) 46 47 def matchtask(self, file, task): 48 if task["action"]["__ansible_module__"] not in self._modules: 49 return False 50 51 if task['action'].get('state', None) == "absent": 52 return False 53 54 if task['action'].get('state', None) == "link": 55 return False 56 57 mode = task['action'].get('mode', None) 58 return mode is None 59 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/ansiblelint/rules/MissingFilePermissionsRule.py b/lib/ansiblelint/rules/MissingFilePermissionsRule.py --- a/lib/ansiblelint/rules/MissingFilePermissionsRule.py +++ b/lib/ansiblelint/rules/MissingFilePermissionsRule.py @@ -35,17 +35,28 @@ version_added = 'v4.3.0' _modules = ( + 'assemble', + 'archive', 'copy', 'file', - 'ini_file', - 'lineinfile', 'replace', 'template', 'unarchive', ) + _modules_with_create = ( + 'blockinfile', + 'ini_file', + 'lineinfile' + ) + def matchtask(self, file, task): - if task["action"]["__ansible_module__"] not in self._modules: + if task["action"]["__ansible_module__"] not in self._modules and \ + task["action"]["__ansible_module__"] not in self._modules_with_create: + return False + + if task["action"]["__ansible_module__"] in self._modules_with_create and \ + not task["action"].get("create", False): return False if task['action'].get('state', None) == "absent":
{"golden_diff": "diff --git a/lib/ansiblelint/rules/MissingFilePermissionsRule.py b/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n--- a/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n+++ b/lib/ansiblelint/rules/MissingFilePermissionsRule.py\n@@ -35,17 +35,28 @@\n version_added = 'v4.3.0'\n \n _modules = (\n+ 'assemble',\n+ 'archive',\n 'copy',\n 'file',\n- 'ini_file',\n- 'lineinfile',\n 'replace',\n 'template',\n 'unarchive',\n )\n \n+ _modules_with_create = (\n+ 'blockinfile',\n+ 'ini_file',\n+ 'lineinfile'\n+ )\n+\n def matchtask(self, file, task):\n- if task[\"action\"][\"__ansible_module__\"] not in self._modules:\n+ if task[\"action\"][\"__ansible_module__\"] not in self._modules and \\\n+ task[\"action\"][\"__ansible_module__\"] not in self._modules_with_create:\n+ return False\n+\n+ if task[\"action\"][\"__ansible_module__\"] in self._modules_with_create and \\\n+ not task[\"action\"].get(\"create\", False):\n return False\n \n if task['action'].get('state', None) == \"absent\":\n", "issue": "E208 is too aggressive\n##### Summary\r\nWhen E208 is triggered, it should be sure that a file is being created. There are modules which only optionally create the file and, when not used in that form, then they should not require a mode be set. Such an example are ini_file, lineinfile, or blockinfile. These modules are frequently used in their default mode with `create: false`. Someone should not be required to set `mode: \"0644\"` or such in this situation.\r\n\r\n\r\n##### Issue Type\r\n\r\n- Bug Report\r\n\r\n##### Ansible and Ansible Lint details\r\n<!--- Paste verbatim output between tripple backticks -->\r\n```console (paste below)\r\nansible --version\r\nansible 2.9.12\r\n config file = /home/greg/.ansible.cfg\r\n configured module search path = ['/home/greg/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/greg/src/ansible_collections/devroles/system/.tox/lint_all/lib/python3.8/site-packages/ansible\r\n executable location = .tox/lint_all/bin/ansible\r\n python version = 3.8.5 (default, Aug 12 2020, 00:00:00) [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]\r\n\r\nansible-lint --version\r\nansible-lint 4.3.1\r\n```\r\n\r\n- ansible installation method: one of source, pip, OS package\r\npip\r\n- ansible-lint installation method: one of source, pip, OS package\r\npip\r\n\r\n##### OS / ENVIRONMENT\r\nFedora 32\r\n\r\n\r\n##### STEPS TO REPRODUCE\r\n```yaml\r\n- name: should not produce E208\r\n lineinfile:\r\n line: \"# some content here\"\r\n file: \"{{ ansible_user_dir }}/.bashrc\"\r\n\r\n- name: SHOULD produce E208\r\n lineinfile:\r\n line: \"# more content\r\n file: \"{{ ansible_user_dir }}/.bash_profile\"\r\n create: true\r\n```\r\n##### Desired Behaviour\r\nansible-lint should not error when it encounters a missing \"mode\" parameter for these modules, when `create: false`. Note that `create: false` is the default behavior of those modules\r\n\r\n##### Actual Behaviour\r\nansible-lint throws E208 on both of the above steps\n", "before_files": [{"content": "# Copyright (c) 2020 Sorin Sbarnea <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom ansiblelint.rules import AnsibleLintRule\n\n\nclass MissingFilePermissionsRule(AnsibleLintRule):\n id = \"208\"\n shortdesc = 'File permissions not mentioned'\n description = (\n \"Missing mode parameter can cause unexpected file permissions based \"\n \"on version of Ansible being used. Be explicit, or if you still \"\n \"want the default behavior you can use ``mode: preserve`` to avoid \"\n \"hitting this rule. See \"\n \"https://github.com/ansible/ansible/issues/71200\"\n )\n severity = 'VERY_HIGH'\n tags = ['unpredictability']\n version_added = 'v4.3.0'\n\n _modules = (\n 'copy',\n 'file',\n 'ini_file',\n 'lineinfile',\n 'replace',\n 'template',\n 'unarchive',\n )\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] not in self._modules:\n return False\n\n if task['action'].get('state', None) == \"absent\":\n return False\n\n if task['action'].get('state', None) == \"link\":\n return False\n\n mode = task['action'].get('mode', None)\n return mode is None\n", "path": "lib/ansiblelint/rules/MissingFilePermissionsRule.py"}], "after_files": [{"content": "# Copyright (c) 2020 Sorin Sbarnea <[email protected]>\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\nfrom ansiblelint.rules import AnsibleLintRule\n\n\nclass MissingFilePermissionsRule(AnsibleLintRule):\n id = \"208\"\n shortdesc = 'File permissions not mentioned'\n description = (\n \"Missing mode parameter can cause unexpected file permissions based \"\n \"on version of Ansible being used. Be explicit, or if you still \"\n \"want the default behavior you can use ``mode: preserve`` to avoid \"\n \"hitting this rule. See \"\n \"https://github.com/ansible/ansible/issues/71200\"\n )\n severity = 'VERY_HIGH'\n tags = ['unpredictability']\n version_added = 'v4.3.0'\n\n _modules = (\n 'assemble',\n 'archive',\n 'copy',\n 'file',\n 'replace',\n 'template',\n 'unarchive',\n )\n\n _modules_with_create = (\n 'blockinfile',\n 'ini_file',\n 'lineinfile'\n )\n\n def matchtask(self, file, task):\n if task[\"action\"][\"__ansible_module__\"] not in self._modules and \\\n task[\"action\"][\"__ansible_module__\"] not in self._modules_with_create:\n return False\n\n if task[\"action\"][\"__ansible_module__\"] in self._modules_with_create and \\\n not task[\"action\"].get(\"create\", False):\n return False\n\n if task['action'].get('state', None) == \"absent\":\n return False\n\n if task['action'].get('state', None) == \"link\":\n return False\n\n mode = task['action'].get('mode', None)\n return mode is None\n", "path": "lib/ansiblelint/rules/MissingFilePermissionsRule.py"}]}
1,422
290
gh_patches_debug_22774
rasdani/github-patches
git_diff
vispy__vispy-1596
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- XYZAxisVisuals Override Defaults It looks like XYZAxisVisual is not overridable in the **init** function for the verts and color arguments? Passing in `pos=my_custom_verts` results in `TypeError: __init__() got multiple values for keyword argument 'pos'`. The `**kwds` argument looks like it is being passed through to the Line class, via LineVisual. Does a method exist to specify the verts, color, and / or connect kwargs? I am hesitant to submit a PR modifying **kwds since I am not 100% sure how the passing is working. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `vispy/visuals/xyz_axis.py` Content: ``` 1 2 import numpy as np 3 4 from .line import LineVisual 5 6 7 class XYZAxisVisual(LineVisual): 8 """ 9 Simple 3D axis for indicating coordinate system orientation. Axes are 10 x=red, y=green, z=blue. 11 """ 12 def __init__(self, **kwargs): 13 verts = np.array([[0, 0, 0], 14 [1, 0, 0], 15 [0, 0, 0], 16 [0, 1, 0], 17 [0, 0, 0], 18 [0, 0, 1]]) 19 color = np.array([[1, 0, 0, 1], 20 [1, 0, 0, 1], 21 [0, 1, 0, 1], 22 [0, 1, 0, 1], 23 [0, 0, 1, 1], 24 [0, 0, 1, 1]]) 25 LineVisual.__init__(self, pos=verts, color=color, connect='segments', 26 method='gl', **kwargs) 27 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/vispy/visuals/xyz_axis.py b/vispy/visuals/xyz_axis.py --- a/vispy/visuals/xyz_axis.py +++ b/vispy/visuals/xyz_axis.py @@ -10,17 +10,24 @@ x=red, y=green, z=blue. """ def __init__(self, **kwargs): - verts = np.array([[0, 0, 0], - [1, 0, 0], - [0, 0, 0], - [0, 1, 0], - [0, 0, 0], - [0, 0, 1]]) + pos = np.array([[0, 0, 0], + [1, 0, 0], + [0, 0, 0], + [0, 1, 0], + [0, 0, 0], + [0, 0, 1]]) color = np.array([[1, 0, 0, 1], [1, 0, 0, 1], [0, 1, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 1, 1]]) - LineVisual.__init__(self, pos=verts, color=color, connect='segments', - method='gl', **kwargs) + connect = 'segments' + method = 'gl' + + kwargs.setdefault('pos', pos) + kwargs.setdefault('color', color) + kwargs.setdefault('connect', connect) + kwargs.setdefault('method', method) + + LineVisual.__init__(self, **kwargs)
{"golden_diff": "diff --git a/vispy/visuals/xyz_axis.py b/vispy/visuals/xyz_axis.py\n--- a/vispy/visuals/xyz_axis.py\n+++ b/vispy/visuals/xyz_axis.py\n@@ -10,17 +10,24 @@\n x=red, y=green, z=blue.\n \"\"\"\n def __init__(self, **kwargs):\n- verts = np.array([[0, 0, 0],\n- [1, 0, 0],\n- [0, 0, 0],\n- [0, 1, 0],\n- [0, 0, 0],\n- [0, 0, 1]])\n+ pos = np.array([[0, 0, 0],\n+ [1, 0, 0],\n+ [0, 0, 0],\n+ [0, 1, 0],\n+ [0, 0, 0],\n+ [0, 0, 1]])\n color = np.array([[1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n [0, 0, 1, 1]])\n- LineVisual.__init__(self, pos=verts, color=color, connect='segments',\n- method='gl', **kwargs)\n+ connect = 'segments'\n+ method = 'gl'\n+\n+ kwargs.setdefault('pos', pos)\n+ kwargs.setdefault('color', color)\n+ kwargs.setdefault('connect', connect)\n+ kwargs.setdefault('method', method)\n+\n+ LineVisual.__init__(self, **kwargs)\n", "issue": "XYZAxisVisuals Override Defaults\nIt looks like XYZAxisVisual is not overridable in the **init** function for the verts and color arguments? Passing in `pos=my_custom_verts` results in `TypeError: __init__() got multiple values for keyword argument 'pos'`.\n\nThe `**kwds` argument looks like it is being passed through to the Line class, via LineVisual. Does a method exist to specify the verts, color, and / or connect kwargs? I am hesitant to submit a PR modifying **kwds since I am not 100% sure how the passing is working.\n\n", "before_files": [{"content": "\nimport numpy as np\n\nfrom .line import LineVisual\n\n\nclass XYZAxisVisual(LineVisual):\n \"\"\"\n Simple 3D axis for indicating coordinate system orientation. Axes are\n x=red, y=green, z=blue.\n \"\"\"\n def __init__(self, **kwargs):\n verts = np.array([[0, 0, 0],\n [1, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n [0, 0, 1]])\n color = np.array([[1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n [0, 0, 1, 1]])\n LineVisual.__init__(self, pos=verts, color=color, connect='segments',\n method='gl', **kwargs)\n", "path": "vispy/visuals/xyz_axis.py"}], "after_files": [{"content": "\nimport numpy as np\n\nfrom .line import LineVisual\n\n\nclass XYZAxisVisual(LineVisual):\n \"\"\"\n Simple 3D axis for indicating coordinate system orientation. Axes are\n x=red, y=green, z=blue.\n \"\"\"\n def __init__(self, **kwargs):\n pos = np.array([[0, 0, 0],\n [1, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [0, 0, 0],\n [0, 0, 1]])\n color = np.array([[1, 0, 0, 1],\n [1, 0, 0, 1],\n [0, 1, 0, 1],\n [0, 1, 0, 1],\n [0, 0, 1, 1],\n [0, 0, 1, 1]])\n connect = 'segments'\n method = 'gl'\n\n kwargs.setdefault('pos', pos)\n kwargs.setdefault('color', color)\n kwargs.setdefault('connect', connect)\n kwargs.setdefault('method', method)\n\n LineVisual.__init__(self, **kwargs)\n", "path": "vispy/visuals/xyz_axis.py"}]}
673
406
gh_patches_debug_30150
rasdani/github-patches
git_diff
ocf__ocfweb-141
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Docs don't auto-reload in dev Because we only read docs once, changes to the Markdown files require manually killing and restarting the server. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ocfweb/docs/markdown_based.py` Content: ``` 1 """Documents backed by Markdown. 2 3 This is the most common type of doc. It reads a Markdown fil and renders it in 4 a standard template. 5 6 Markdown documents can specify the meta attributes: 7 8 - [[!meta title="Page title"]] 9 Changes the page title; all templates must specify this. 10 11 - [[!meta template="my_template.html"]] 12 Changes the page template; most templates should *not* specify this, 13 unless they want to customize something (e.g. the sidebar) 14 """ 15 import os 16 from functools import partial 17 from pathlib import Path 18 19 from django.shortcuts import render 20 21 from ocfweb.component.markdown import markdown_and_toc 22 from ocfweb.component.markdown import text_and_meta 23 from ocfweb.docs.doc import Document 24 25 26 DOCS_DIR = Path(__file__).parent.joinpath('docs') 27 28 29 def render_markdown_doc(meta, text, doc, request): 30 html, toc = markdown_and_toc(text) 31 return render( 32 request, 33 meta.get('template', 'doc.html'), 34 { 35 'title': doc.title, 36 'doc': doc, 37 'html': html, 38 'toc': toc, 39 }, 40 ) 41 42 43 def get_markdown_docs(): 44 for path in DOCS_DIR.glob('**/*.md'): 45 name, _ = os.path.splitext(str(path.relative_to(DOCS_DIR))) 46 47 # sanity check that the file is under the directory we expect 48 assert DOCS_DIR in path.parents 49 50 with path.open() as f: 51 text, meta = text_and_meta(f) 52 53 if 'title' not in meta: 54 raise ValueError('Document {} lacks required title meta variable.'.format(name)) 55 56 yield Document( 57 name='/' + name, 58 title=meta['title'], 59 render=partial(render_markdown_doc, meta, text) 60 ) 61 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ocfweb/docs/markdown_based.py b/ocfweb/docs/markdown_based.py --- a/ocfweb/docs/markdown_based.py +++ b/ocfweb/docs/markdown_based.py @@ -1,6 +1,6 @@ """Documents backed by Markdown. -This is the most common type of doc. It reads a Markdown fil and renders it in +This is the most common type of doc. It reads a Markdown file and renders it in a standard template. Markdown documents can specify the meta attributes: @@ -16,6 +16,7 @@ from functools import partial from pathlib import Path +from django.conf import settings from django.shortcuts import render from ocfweb.component.markdown import markdown_and_toc @@ -26,13 +27,20 @@ DOCS_DIR = Path(__file__).parent.joinpath('docs') -def render_markdown_doc(meta, text, doc, request): +def render_markdown_doc(path, meta, text, doc, request): + + # Reload markdown docs if in development + if settings.DEBUG: + with path.open() as f: + text, meta = text_and_meta(f) + html, toc = markdown_and_toc(text) + return render( request, meta.get('template', 'doc.html'), { - 'title': doc.title, + 'title': meta['title'], 'doc': doc, 'html': html, 'toc': toc, @@ -56,5 +64,5 @@ yield Document( name='/' + name, title=meta['title'], - render=partial(render_markdown_doc, meta, text) + render=partial(render_markdown_doc, path, meta, text), )
{"golden_diff": "diff --git a/ocfweb/docs/markdown_based.py b/ocfweb/docs/markdown_based.py\n--- a/ocfweb/docs/markdown_based.py\n+++ b/ocfweb/docs/markdown_based.py\n@@ -1,6 +1,6 @@\n \"\"\"Documents backed by Markdown.\n \n-This is the most common type of doc. It reads a Markdown fil and renders it in\n+This is the most common type of doc. It reads a Markdown file and renders it in\n a standard template.\n \n Markdown documents can specify the meta attributes:\n@@ -16,6 +16,7 @@\n from functools import partial\n from pathlib import Path\n \n+from django.conf import settings\n from django.shortcuts import render\n \n from ocfweb.component.markdown import markdown_and_toc\n@@ -26,13 +27,20 @@\n DOCS_DIR = Path(__file__).parent.joinpath('docs')\n \n \n-def render_markdown_doc(meta, text, doc, request):\n+def render_markdown_doc(path, meta, text, doc, request):\n+\n+ # Reload markdown docs if in development\n+ if settings.DEBUG:\n+ with path.open() as f:\n+ text, meta = text_and_meta(f)\n+\n html, toc = markdown_and_toc(text)\n+\n return render(\n request,\n meta.get('template', 'doc.html'),\n {\n- 'title': doc.title,\n+ 'title': meta['title'],\n 'doc': doc,\n 'html': html,\n 'toc': toc,\n@@ -56,5 +64,5 @@\n yield Document(\n name='/' + name,\n title=meta['title'],\n- render=partial(render_markdown_doc, meta, text)\n+ render=partial(render_markdown_doc, path, meta, text),\n )\n", "issue": "Docs don't auto-reload in dev\nBecause we only read docs once, changes to the Markdown files require manually killing and restarting the server.\n\n", "before_files": [{"content": "\"\"\"Documents backed by Markdown.\n\nThis is the most common type of doc. It reads a Markdown fil and renders it in\na standard template.\n\nMarkdown documents can specify the meta attributes:\n\n - [[!meta title=\"Page title\"]]\n Changes the page title; all templates must specify this.\n\n - [[!meta template=\"my_template.html\"]]\n Changes the page template; most templates should *not* specify this,\n unless they want to customize something (e.g. the sidebar)\n\"\"\"\nimport os\nfrom functools import partial\nfrom pathlib import Path\n\nfrom django.shortcuts import render\n\nfrom ocfweb.component.markdown import markdown_and_toc\nfrom ocfweb.component.markdown import text_and_meta\nfrom ocfweb.docs.doc import Document\n\n\nDOCS_DIR = Path(__file__).parent.joinpath('docs')\n\n\ndef render_markdown_doc(meta, text, doc, request):\n html, toc = markdown_and_toc(text)\n return render(\n request,\n meta.get('template', 'doc.html'),\n {\n 'title': doc.title,\n 'doc': doc,\n 'html': html,\n 'toc': toc,\n },\n )\n\n\ndef get_markdown_docs():\n for path in DOCS_DIR.glob('**/*.md'):\n name, _ = os.path.splitext(str(path.relative_to(DOCS_DIR)))\n\n # sanity check that the file is under the directory we expect\n assert DOCS_DIR in path.parents\n\n with path.open() as f:\n text, meta = text_and_meta(f)\n\n if 'title' not in meta:\n raise ValueError('Document {} lacks required title meta variable.'.format(name))\n\n yield Document(\n name='/' + name,\n title=meta['title'],\n render=partial(render_markdown_doc, meta, text)\n )\n", "path": "ocfweb/docs/markdown_based.py"}], "after_files": [{"content": "\"\"\"Documents backed by Markdown.\n\nThis is the most common type of doc. It reads a Markdown file and renders it in\na standard template.\n\nMarkdown documents can specify the meta attributes:\n\n - [[!meta title=\"Page title\"]]\n Changes the page title; all templates must specify this.\n\n - [[!meta template=\"my_template.html\"]]\n Changes the page template; most templates should *not* specify this,\n unless they want to customize something (e.g. the sidebar)\n\"\"\"\nimport os\nfrom functools import partial\nfrom pathlib import Path\n\nfrom django.conf import settings\nfrom django.shortcuts import render\n\nfrom ocfweb.component.markdown import markdown_and_toc\nfrom ocfweb.component.markdown import text_and_meta\nfrom ocfweb.docs.doc import Document\n\n\nDOCS_DIR = Path(__file__).parent.joinpath('docs')\n\n\ndef render_markdown_doc(path, meta, text, doc, request):\n\n # Reload markdown docs if in development\n if settings.DEBUG:\n with path.open() as f:\n text, meta = text_and_meta(f)\n\n html, toc = markdown_and_toc(text)\n\n return render(\n request,\n meta.get('template', 'doc.html'),\n {\n 'title': meta['title'],\n 'doc': doc,\n 'html': html,\n 'toc': toc,\n },\n )\n\n\ndef get_markdown_docs():\n for path in DOCS_DIR.glob('**/*.md'):\n name, _ = os.path.splitext(str(path.relative_to(DOCS_DIR)))\n\n # sanity check that the file is under the directory we expect\n assert DOCS_DIR in path.parents\n\n with path.open() as f:\n text, meta = text_and_meta(f)\n\n if 'title' not in meta:\n raise ValueError('Document {} lacks required title meta variable.'.format(name))\n\n yield Document(\n name='/' + name,\n title=meta['title'],\n render=partial(render_markdown_doc, path, meta, text),\n )\n", "path": "ocfweb/docs/markdown_based.py"}]}
793
390
gh_patches_debug_12226
rasdani/github-patches
git_diff
googleapis__python-bigquery-643
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- deps: expand extras to support pyarrow v4 We're actually already testing with pyarrow v4 in some of the samples tests, so this should be safe to expand in our `setup.py` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import io 16 import os 17 18 import setuptools 19 20 21 # Package metadata. 22 23 name = "google-cloud-bigquery" 24 description = "Google BigQuery API client library" 25 26 # Should be one of: 27 # 'Development Status :: 3 - Alpha' 28 # 'Development Status :: 4 - Beta' 29 # 'Development Status :: 5 - Production/Stable' 30 release_status = "Development Status :: 5 - Production/Stable" 31 dependencies = [ 32 "google-api-core[grpc] >= 1.23.0, < 2.0.0dev", 33 "proto-plus >= 1.10.0", 34 "google-cloud-core >= 1.4.1, < 2.0dev", 35 "google-resumable-media >= 0.6.0, < 2.0dev", 36 "packaging >= 14.3", 37 "protobuf >= 3.12.0", 38 "requests >= 2.18.0, < 3.0.0dev", 39 ] 40 extras = { 41 "bqstorage": [ 42 "google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev", 43 # Due to an issue in pip's dependency resolver, the `grpc` extra is not 44 # installed, even though `google-cloud-bigquery-storage` specifies it 45 # as `google-api-core[grpc]`. We thus need to explicitly specify it here. 46 # See: https://github.com/googleapis/python-bigquery/issues/83 The 47 # grpc.Channel.close() method isn't added until 1.32.0. 48 # https://github.com/grpc/grpc/pull/15254 49 "grpcio >= 1.32.0, < 2.0dev", 50 "pyarrow >= 1.0.0, < 4.0dev", 51 ], 52 "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"], 53 "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"], 54 "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"], 55 "opentelemetry": [ 56 "opentelemetry-api >= 0.11b0", 57 "opentelemetry-sdk >= 0.11b0", 58 "opentelemetry-instrumentation >= 0.11b0", 59 ], 60 } 61 62 all_extras = [] 63 64 for extra in extras: 65 # Exclude this extra from all to avoid overly strict dependencies on core 66 # libraries such as pyarrow. 67 # https://github.com/googleapis/python-bigquery/issues/563 68 if extra in {"bignumeric_type"}: 69 continue 70 all_extras.extend(extras[extra]) 71 72 extras["all"] = all_extras 73 74 # Setup boilerplate below this line. 75 76 package_root = os.path.abspath(os.path.dirname(__file__)) 77 78 readme_filename = os.path.join(package_root, "README.rst") 79 with io.open(readme_filename, encoding="utf-8") as readme_file: 80 readme = readme_file.read() 81 82 version = {} 83 with open(os.path.join(package_root, "google/cloud/bigquery/version.py")) as fp: 84 exec(fp.read(), version) 85 version = version["__version__"] 86 87 # Only include packages under the 'google' namespace. Do not include tests, 88 # benchmarks, etc. 89 packages = [ 90 package 91 for package in setuptools.PEP420PackageFinder.find() 92 if package.startswith("google") 93 ] 94 95 # Determine which namespaces are needed. 96 namespaces = ["google"] 97 if "google.cloud" in packages: 98 namespaces.append("google.cloud") 99 100 101 setuptools.setup( 102 name=name, 103 version=version, 104 description=description, 105 long_description=readme, 106 author="Google LLC", 107 author_email="[email protected]", 108 license="Apache 2.0", 109 url="https://github.com/googleapis/python-bigquery", 110 classifiers=[ 111 release_status, 112 "Intended Audience :: Developers", 113 "License :: OSI Approved :: Apache Software License", 114 "Programming Language :: Python", 115 "Programming Language :: Python :: 3", 116 "Programming Language :: Python :: 3.6", 117 "Programming Language :: Python :: 3.7", 118 "Programming Language :: Python :: 3.8", 119 "Programming Language :: Python :: 3.9", 120 "Operating System :: OS Independent", 121 "Topic :: Internet", 122 ], 123 platforms="Posix; MacOS X; Windows", 124 packages=packages, 125 namespace_packages=namespaces, 126 install_requires=dependencies, 127 extras_require=extras, 128 python_requires=">=3.6, <3.10", 129 include_package_data=True, 130 zip_safe=False, 131 ) 132 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -47,10 +47,10 @@ # grpc.Channel.close() method isn't added until 1.32.0. # https://github.com/grpc/grpc/pull/15254 "grpcio >= 1.32.0, < 2.0dev", - "pyarrow >= 1.0.0, < 4.0dev", + "pyarrow >= 1.0.0, < 5.0dev", ], - "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 4.0dev"], - "bignumeric_type": ["pyarrow >= 3.0.0, < 4.0dev"], + "pandas": ["pandas>=0.23.0", "pyarrow >= 1.0.0, < 5.0dev"], + "bignumeric_type": ["pyarrow >= 3.0.0, < 5.0dev"], "tqdm": ["tqdm >= 4.7.4, <5.0.0dev"], "opentelemetry": [ "opentelemetry-api >= 0.11b0",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -47,10 +47,10 @@\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n- \"pyarrow >= 1.0.0, < 4.0dev\",\n+ \"pyarrow >= 1.0.0, < 5.0dev\",\n ],\n- \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n- \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n+ \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 5.0dev\"],\n+ \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 5.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n", "issue": "deps: expand extras to support pyarrow v4\nWe're actually already testing with pyarrow v4 in some of the samples tests, so this should be safe to expand in our `setup.py`\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 4.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 4.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 4.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n \"opentelemetry-sdk >= 0.11b0\",\n \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nimport os\n\nimport setuptools\n\n\n# Package metadata.\n\nname = \"google-cloud-bigquery\"\ndescription = \"Google BigQuery API client library\"\n\n# Should be one of:\n# 'Development Status :: 3 - Alpha'\n# 'Development Status :: 4 - Beta'\n# 'Development Status :: 5 - Production/Stable'\nrelease_status = \"Development Status :: 5 - Production/Stable\"\ndependencies = [\n \"google-api-core[grpc] >= 1.23.0, < 2.0.0dev\",\n \"proto-plus >= 1.10.0\",\n \"google-cloud-core >= 1.4.1, < 2.0dev\",\n \"google-resumable-media >= 0.6.0, < 2.0dev\",\n \"packaging >= 14.3\",\n \"protobuf >= 3.12.0\",\n \"requests >= 2.18.0, < 3.0.0dev\",\n]\nextras = {\n \"bqstorage\": [\n \"google-cloud-bigquery-storage >= 2.0.0, <3.0.0dev\",\n # Due to an issue in pip's dependency resolver, the `grpc` extra is not\n # installed, even though `google-cloud-bigquery-storage` specifies it\n # as `google-api-core[grpc]`. We thus need to explicitly specify it here.\n # See: https://github.com/googleapis/python-bigquery/issues/83 The\n # grpc.Channel.close() method isn't added until 1.32.0.\n # https://github.com/grpc/grpc/pull/15254\n \"grpcio >= 1.32.0, < 2.0dev\",\n \"pyarrow >= 1.0.0, < 5.0dev\",\n ],\n \"pandas\": [\"pandas>=0.23.0\", \"pyarrow >= 1.0.0, < 5.0dev\"],\n \"bignumeric_type\": [\"pyarrow >= 3.0.0, < 5.0dev\"],\n \"tqdm\": [\"tqdm >= 4.7.4, <5.0.0dev\"],\n \"opentelemetry\": [\n \"opentelemetry-api >= 0.11b0\",\n \"opentelemetry-sdk >= 0.11b0\",\n \"opentelemetry-instrumentation >= 0.11b0\",\n ],\n}\n\nall_extras = []\n\nfor extra in extras:\n # Exclude this extra from all to avoid overly strict dependencies on core\n # libraries such as pyarrow.\n # https://github.com/googleapis/python-bigquery/issues/563\n if extra in {\"bignumeric_type\"}:\n continue\n all_extras.extend(extras[extra])\n\nextras[\"all\"] = all_extras\n\n# Setup boilerplate below this line.\n\npackage_root = os.path.abspath(os.path.dirname(__file__))\n\nreadme_filename = os.path.join(package_root, \"README.rst\")\nwith io.open(readme_filename, encoding=\"utf-8\") as readme_file:\n readme = readme_file.read()\n\nversion = {}\nwith open(os.path.join(package_root, \"google/cloud/bigquery/version.py\")) as fp:\n exec(fp.read(), version)\nversion = version[\"__version__\"]\n\n# Only include packages under the 'google' namespace. Do not include tests,\n# benchmarks, etc.\npackages = [\n package\n for package in setuptools.PEP420PackageFinder.find()\n if package.startswith(\"google\")\n]\n\n# Determine which namespaces are needed.\nnamespaces = [\"google\"]\nif \"google.cloud\" in packages:\n namespaces.append(\"google.cloud\")\n\n\nsetuptools.setup(\n name=name,\n version=version,\n description=description,\n long_description=readme,\n author=\"Google LLC\",\n author_email=\"[email protected]\",\n license=\"Apache 2.0\",\n url=\"https://github.com/googleapis/python-bigquery\",\n classifiers=[\n release_status,\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Operating System :: OS Independent\",\n \"Topic :: Internet\",\n ],\n platforms=\"Posix; MacOS X; Windows\",\n packages=packages,\n namespace_packages=namespaces,\n install_requires=dependencies,\n extras_require=extras,\n python_requires=\">=3.6, <3.10\",\n include_package_data=True,\n zip_safe=False,\n)\n", "path": "setup.py"}]}
1,756
302
gh_patches_debug_15693
rasdani/github-patches
git_diff
pypa__pip-2396
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pip.utils.filesystem.check_path_owner should treat group membership properly I have two users who share same .pip/cache directory: `a` and `b`. Both users are members of the group `pip`. There is third user, `pip` who is also member of group `pip` and that user handles all download and caching for users a and b. `.pip/cache` directory configured to have group writeable permission and sticky bit, to make sure all newly created files and folders are owned by group `pip`. Function [check_path_owner](https://github.com/pypa/pip/blob/6.0.6/pip/utils/filesystem.py#L7) only checks if owner is same user, but it is somewhat wrong. You should check group membership as well. Another option, is to create new command-line option, which will allow to ignore this check. Either solution will work. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pip/utils/filesystem.py` Content: ``` 1 import os 2 import os.path 3 4 from pip.compat import get_path_uid 5 6 7 def check_path_owner(path): 8 # If we don't have a way to check the effective uid of this process, then 9 # we'll just assume that we own the directory. 10 if not hasattr(os, "geteuid"): 11 return True 12 13 previous = None 14 while path != previous: 15 if os.path.lexists(path): 16 # Actually do the ownership check 17 try: 18 if get_path_uid(path) != os.geteuid(): 19 return False 20 except OSError: 21 return False 22 return True 23 else: 24 previous, path = path, os.path.dirname(path) 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pip/utils/filesystem.py b/pip/utils/filesystem.py --- a/pip/utils/filesystem.py +++ b/pip/utils/filesystem.py @@ -13,12 +13,16 @@ previous = None while path != previous: if os.path.lexists(path): - # Actually do the ownership check - try: - if get_path_uid(path) != os.geteuid(): + # Check if path is writable by current user. + if os.geteuid() == 0: + # Special handling for root user in order to handle properly + # cases where users use sudo without -H flag. + try: + path_uid = get_path_uid(path) + except OSError: return False - except OSError: - return False - return True + return path_uid == 0 + else: + return os.access(path, os.W_OK) else: previous, path = path, os.path.dirname(path)
{"golden_diff": "diff --git a/pip/utils/filesystem.py b/pip/utils/filesystem.py\n--- a/pip/utils/filesystem.py\n+++ b/pip/utils/filesystem.py\n@@ -13,12 +13,16 @@\n previous = None\n while path != previous:\n if os.path.lexists(path):\n- # Actually do the ownership check\n- try:\n- if get_path_uid(path) != os.geteuid():\n+ # Check if path is writable by current user.\n+ if os.geteuid() == 0:\n+ # Special handling for root user in order to handle properly\n+ # cases where users use sudo without -H flag.\n+ try:\n+ path_uid = get_path_uid(path)\n+ except OSError:\n return False\n- except OSError:\n- return False\n- return True\n+ return path_uid == 0\n+ else:\n+ return os.access(path, os.W_OK)\n else:\n previous, path = path, os.path.dirname(path)\n", "issue": "pip.utils.filesystem.check_path_owner should treat group membership properly\nI have two users who share same .pip/cache directory: `a` and `b`. Both users are members of the group `pip`. There is third user, `pip` who is also member of group `pip` and that user handles all download and caching for users a and b. `.pip/cache` directory configured to have group writeable permission and sticky bit, to make sure all newly created files and folders are owned by group `pip`. \n\nFunction [check_path_owner](https://github.com/pypa/pip/blob/6.0.6/pip/utils/filesystem.py#L7) only checks if owner is same user, but it is somewhat wrong. You should check group membership as well. \n\nAnother option, is to create new command-line option, which will allow to ignore this check. \n\nEither solution will work.\n\n", "before_files": [{"content": "import os\nimport os.path\n\nfrom pip.compat import get_path_uid\n\n\ndef check_path_owner(path):\n # If we don't have a way to check the effective uid of this process, then\n # we'll just assume that we own the directory.\n if not hasattr(os, \"geteuid\"):\n return True\n\n previous = None\n while path != previous:\n if os.path.lexists(path):\n # Actually do the ownership check\n try:\n if get_path_uid(path) != os.geteuid():\n return False\n except OSError:\n return False\n return True\n else:\n previous, path = path, os.path.dirname(path)\n", "path": "pip/utils/filesystem.py"}], "after_files": [{"content": "import os\nimport os.path\n\nfrom pip.compat import get_path_uid\n\n\ndef check_path_owner(path):\n # If we don't have a way to check the effective uid of this process, then\n # we'll just assume that we own the directory.\n if not hasattr(os, \"geteuid\"):\n return True\n\n previous = None\n while path != previous:\n if os.path.lexists(path):\n # Check if path is writable by current user.\n if os.geteuid() == 0:\n # Special handling for root user in order to handle properly\n # cases where users use sudo without -H flag.\n try:\n path_uid = get_path_uid(path)\n except OSError:\n return False\n return path_uid == 0\n else:\n return os.access(path, os.W_OK)\n else:\n previous, path = path, os.path.dirname(path)\n", "path": "pip/utils/filesystem.py"}]}
632
224
gh_patches_debug_1046
rasdani/github-patches
git_diff
enthought__chaco-424
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Demo quiver.py not working **Problem Description** Zooming in will ends with the following and blank plot. **Reproduction Steps:** Run the file and zoom in until the plot breaks. **Expected behavior:** Plot disappear if keep zooming in and ends with following trace. ``` Traceback (most recent call last): File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/qt4/base_window.py", line 202, in paintEvent self.handler.paintEvent(event) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/qt4/base_window.py", line 54, in paintEvent self._enable_window._paint(event) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/abstract_window.py", line 468, in _paint self.component.draw(gc, view_bounds=(0, 0, size[0], size[1])) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py", line 427, in draw self._draw(gc, view_bounds, mode) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py", line 779, in _draw self._dispatch_draw(layer, gc, view_bounds, mode) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/container.py", line 272, in _dispatch_draw component._dispatch_draw(layer, gc, new_bounds, mode) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/container.py", line 272, in _dispatch_draw component._dispatch_draw(layer, gc, new_bounds, mode) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py", line 799, in _dispatch_draw handler(gc, view_bounds, mode) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/base_xy_plot.py", line 466, in _draw_plot self._draw_component(gc, view_bounds, mode) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/base_xy_plot.py", line 474, in _draw_component self._render(gc, pts) File "/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/quiverplot.py", line 80, in _render ends = points + self._cached_vector_data ValueError: operands could not be broadcast together with shapes (0,) (0,2) ``` **OS, Python version:** OSX, Python 2.7 splits from #385 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `chaco/quiverplot.py` Content: ``` 1 2 from __future__ import with_statement 3 4 from numpy import array, compress, matrix, newaxis, sqrt, zeros 5 6 # Enthought library imports 7 from enable.api import ColorTrait 8 from traits.api import Array, Enum, Float, Instance, Int 9 10 # Chaco relative imports 11 from .abstract_data_source import AbstractDataSource 12 from .scatterplot import ScatterPlot 13 14 class QuiverPlot(ScatterPlot): 15 16 # Determines how to interpret the data in the **vectors** data source. 17 # "vector": each tuple is a (dx, dy) 18 # "radial": each tuple is an (r, theta) 19 data_type = Enum("vector", "radial") # TODO: implement "radial" 20 21 # A datasource that returns an Nx2 array array indicating directions 22 # of the vectors. The interpretation of this array is dependent on 23 # the setting of the **data_type** attribute. 24 # 25 # Usually this will be a MultiArrayDataSource. 26 vectors = Instance(AbstractDataSource) 27 28 #------------------------------------------------------------------------ 29 # Visual attributes of the vector 30 #------------------------------------------------------------------------ 31 32 # The color of the lines 33 line_color = ColorTrait("black") 34 35 # The width of the lines 36 line_width = Float(1.0) 37 38 # The length, in pixels, of the arrowhead 39 arrow_size = Int(5) 40 41 #------------------------------------------------------------------------ 42 # Private traits 43 #------------------------------------------------------------------------ 44 45 _cached_vector_data = Array 46 _selected_vector_data = Array 47 48 def _gather_points_old(self): 49 # In addition to the standard scatterplot _gather_points, we need 50 # to also grab the vectors that fall inside the view range 51 super(QuiverPlot, self)._gather_points_old() 52 53 if not self.index or not self.value: 54 return 55 56 if len(self._cached_point_mask) == 0: 57 self._cached_vector_data = [] 58 return 59 60 vectors = self.vectors.get_data() 61 self._cached_vector_data = compress(self._cached_point_mask, vectors, axis=0) 62 63 if self._cached_selected_pts is not None: 64 indices = self._cached_selection_point_mask 65 self._selected_vector_data = compress(indices, vectors, axis=0) 66 else: 67 self._selected_vector_data = None 68 return 69 70 71 def _render(self, gc, points, icon_mode=False): 72 with gc: 73 gc.clip_to_rect(self.x, self.y, self.width, self.height) 74 75 gc.set_stroke_color(self.line_color_) 76 gc.set_line_width(self.line_width) 77 78 # Draw the body of the arrow 79 starts = points 80 ends = points + self._cached_vector_data 81 gc.begin_path() 82 gc.line_set(starts, ends) 83 gc.stroke_path() 84 85 if self.arrow_size > 0: 86 vec = self._cached_vector_data 87 unit_vec = vec / sqrt(vec[:,0] ** 2 + vec[:,1] ** 2)[:, newaxis] 88 a = 0.707106781 # sqrt(2)/2 89 90 # Draw the left arrowhead (for an arrow pointing straight up) 91 arrow_ends = ends - array(unit_vec * matrix([[a, a], [-a, a]])) * self.arrow_size 92 gc.begin_path() 93 gc.line_set(ends, arrow_ends) 94 gc.stroke_path() 95 96 # Draw the left arrowhead (for an arrow pointing straight up) 97 arrow_ends = ends - array(unit_vec * matrix([[a, -a], [a, a]])) * self.arrow_size 98 gc.begin_path() 99 gc.line_set(ends, arrow_ends) 100 gc.stroke_path() 101 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/chaco/quiverplot.py b/chaco/quiverplot.py --- a/chaco/quiverplot.py +++ b/chaco/quiverplot.py @@ -69,6 +69,9 @@ def _render(self, gc, points, icon_mode=False): + if len(points) < 1: + return + with gc: gc.clip_to_rect(self.x, self.y, self.width, self.height)
{"golden_diff": "diff --git a/chaco/quiverplot.py b/chaco/quiverplot.py\n--- a/chaco/quiverplot.py\n+++ b/chaco/quiverplot.py\n@@ -69,6 +69,9 @@\n \n \n def _render(self, gc, points, icon_mode=False):\n+ if len(points) < 1:\n+ return\n+\n with gc:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n", "issue": "Demo quiver.py not working\n**Problem Description**\r\nZooming in will ends with the following and blank plot.\r\n\r\n**Reproduction Steps:**\r\n\r\nRun the file and zoom in until the plot breaks.\r\n\r\n**Expected behavior:**\r\n\r\nPlot disappear if keep zooming in and ends with following trace.\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/qt4/base_window.py\", line 202, in paintEvent\r\n self.handler.paintEvent(event)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/qt4/base_window.py\", line 54, in paintEvent\r\n self._enable_window._paint(event)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/abstract_window.py\", line 468, in _paint\r\n self.component.draw(gc, view_bounds=(0, 0, size[0], size[1]))\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py\", line 427, in draw\r\n self._draw(gc, view_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py\", line 779, in _draw\r\n self._dispatch_draw(layer, gc, view_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/container.py\", line 272, in _dispatch_draw\r\n component._dispatch_draw(layer, gc, new_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/container.py\", line 272, in _dispatch_draw\r\n component._dispatch_draw(layer, gc, new_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/enable/component.py\", line 799, in _dispatch_draw\r\n handler(gc, view_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/base_xy_plot.py\", line 466, in _draw_plot\r\n self._draw_component(gc, view_bounds, mode)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/base_xy_plot.py\", line 474, in _draw_component\r\n self._render(gc, pts)\r\n File \"/Users/ktakami/.edm/envs/chaco-dev/lib/python2.7/site-packages/chaco/quiverplot.py\", line 80, in _render\r\n ends = points + self._cached_vector_data\r\nValueError: operands could not be broadcast together with shapes (0,) (0,2)\r\n```\r\n\r\n**OS, Python version:** \r\nOSX, Python 2.7\r\n\r\nsplits from #385 \n", "before_files": [{"content": "\nfrom __future__ import with_statement\n\nfrom numpy import array, compress, matrix, newaxis, sqrt, zeros\n\n# Enthought library imports\nfrom enable.api import ColorTrait\nfrom traits.api import Array, Enum, Float, Instance, Int\n\n# Chaco relative imports\nfrom .abstract_data_source import AbstractDataSource\nfrom .scatterplot import ScatterPlot\n\nclass QuiverPlot(ScatterPlot):\n\n # Determines how to interpret the data in the **vectors** data source.\n # \"vector\": each tuple is a (dx, dy)\n # \"radial\": each tuple is an (r, theta)\n data_type = Enum(\"vector\", \"radial\") # TODO: implement \"radial\"\n\n # A datasource that returns an Nx2 array array indicating directions\n # of the vectors. The interpretation of this array is dependent on\n # the setting of the **data_type** attribute.\n #\n # Usually this will be a MultiArrayDataSource.\n vectors = Instance(AbstractDataSource)\n\n #------------------------------------------------------------------------\n # Visual attributes of the vector\n #------------------------------------------------------------------------\n\n # The color of the lines\n line_color = ColorTrait(\"black\")\n\n # The width of the lines\n line_width = Float(1.0)\n\n # The length, in pixels, of the arrowhead\n arrow_size = Int(5)\n\n #------------------------------------------------------------------------\n # Private traits\n #------------------------------------------------------------------------\n\n _cached_vector_data = Array\n _selected_vector_data = Array\n\n def _gather_points_old(self):\n # In addition to the standard scatterplot _gather_points, we need\n # to also grab the vectors that fall inside the view range\n super(QuiverPlot, self)._gather_points_old()\n\n if not self.index or not self.value:\n return\n\n if len(self._cached_point_mask) == 0:\n self._cached_vector_data = []\n return\n\n vectors = self.vectors.get_data()\n self._cached_vector_data = compress(self._cached_point_mask, vectors, axis=0)\n\n if self._cached_selected_pts is not None:\n indices = self._cached_selection_point_mask\n self._selected_vector_data = compress(indices, vectors, axis=0)\n else:\n self._selected_vector_data = None\n return\n\n\n def _render(self, gc, points, icon_mode=False):\n with gc:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n\n gc.set_stroke_color(self.line_color_)\n gc.set_line_width(self.line_width)\n\n # Draw the body of the arrow\n starts = points\n ends = points + self._cached_vector_data\n gc.begin_path()\n gc.line_set(starts, ends)\n gc.stroke_path()\n\n if self.arrow_size > 0:\n vec = self._cached_vector_data\n unit_vec = vec / sqrt(vec[:,0] ** 2 + vec[:,1] ** 2)[:, newaxis]\n a = 0.707106781 # sqrt(2)/2\n\n # Draw the left arrowhead (for an arrow pointing straight up)\n arrow_ends = ends - array(unit_vec * matrix([[a, a], [-a, a]])) * self.arrow_size\n gc.begin_path()\n gc.line_set(ends, arrow_ends)\n gc.stroke_path()\n\n # Draw the left arrowhead (for an arrow pointing straight up)\n arrow_ends = ends - array(unit_vec * matrix([[a, -a], [a, a]])) * self.arrow_size\n gc.begin_path()\n gc.line_set(ends, arrow_ends)\n gc.stroke_path()\n", "path": "chaco/quiverplot.py"}], "after_files": [{"content": "\nfrom __future__ import with_statement\n\nfrom numpy import array, compress, matrix, newaxis, sqrt, zeros\n\n# Enthought library imports\nfrom enable.api import ColorTrait\nfrom traits.api import Array, Enum, Float, Instance, Int\n\n# Chaco relative imports\nfrom .abstract_data_source import AbstractDataSource\nfrom .scatterplot import ScatterPlot\n\nclass QuiverPlot(ScatterPlot):\n\n # Determines how to interpret the data in the **vectors** data source.\n # \"vector\": each tuple is a (dx, dy)\n # \"radial\": each tuple is an (r, theta)\n data_type = Enum(\"vector\", \"radial\") # TODO: implement \"radial\"\n\n # A datasource that returns an Nx2 array array indicating directions\n # of the vectors. The interpretation of this array is dependent on\n # the setting of the **data_type** attribute.\n #\n # Usually this will be a MultiArrayDataSource.\n vectors = Instance(AbstractDataSource)\n\n #------------------------------------------------------------------------\n # Visual attributes of the vector\n #------------------------------------------------------------------------\n\n # The color of the lines\n line_color = ColorTrait(\"black\")\n\n # The width of the lines\n line_width = Float(1.0)\n\n # The length, in pixels, of the arrowhead\n arrow_size = Int(5)\n\n #------------------------------------------------------------------------\n # Private traits\n #------------------------------------------------------------------------\n\n _cached_vector_data = Array\n _selected_vector_data = Array\n\n def _gather_points_old(self):\n # In addition to the standard scatterplot _gather_points, we need\n # to also grab the vectors that fall inside the view range\n super(QuiverPlot, self)._gather_points_old()\n\n if not self.index or not self.value:\n return\n\n if len(self._cached_point_mask) == 0:\n self._cached_vector_data = []\n return\n\n vectors = self.vectors.get_data()\n self._cached_vector_data = compress(self._cached_point_mask, vectors, axis=0)\n\n if self._cached_selected_pts is not None:\n indices = self._cached_selection_point_mask\n self._selected_vector_data = compress(indices, vectors, axis=0)\n else:\n self._selected_vector_data = None\n return\n\n\n def _render(self, gc, points, icon_mode=False):\n if len(points) < 1:\n return\n\n with gc:\n gc.clip_to_rect(self.x, self.y, self.width, self.height)\n\n gc.set_stroke_color(self.line_color_)\n gc.set_line_width(self.line_width)\n\n # Draw the body of the arrow\n starts = points\n ends = points + self._cached_vector_data\n gc.begin_path()\n gc.line_set(starts, ends)\n gc.stroke_path()\n\n if self.arrow_size > 0:\n vec = self._cached_vector_data\n unit_vec = vec / sqrt(vec[:,0] ** 2 + vec[:,1] ** 2)[:, newaxis]\n a = 0.707106781 # sqrt(2)/2\n\n # Draw the left arrowhead (for an arrow pointing straight up)\n arrow_ends = ends - array(unit_vec * matrix([[a, a], [-a, a]])) * self.arrow_size\n gc.begin_path()\n gc.line_set(ends, arrow_ends)\n gc.stroke_path()\n\n # Draw the left arrowhead (for an arrow pointing straight up)\n arrow_ends = ends - array(unit_vec * matrix([[a, -a], [a, a]])) * self.arrow_size\n gc.begin_path()\n gc.line_set(ends, arrow_ends)\n gc.stroke_path()\n", "path": "chaco/quiverplot.py"}]}
1,921
99
gh_patches_debug_21949
rasdani/github-patches
git_diff
scoutapp__scout_apm_python-205
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Capture Celery Queue Name --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/scout_apm/celery.py` Content: ``` 1 # coding=utf-8 2 from __future__ import absolute_import, division, print_function, unicode_literals 3 4 from celery.signals import task_postrun, task_prerun 5 6 import scout_apm.core 7 from scout_apm.core.tracked_request import TrackedRequest 8 9 10 # TODO: Capture queue. 11 # https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa 12 def prerun_callback(sender=None, headers=None, body=None, **kwargs): 13 name = kwargs["task"].name 14 15 tr = TrackedRequest.instance() 16 tr.mark_real_request() 17 span = tr.start_span(operation=("Job/" + name)) 18 span.tag("queue", "default") 19 20 21 def postrun_callback(sender=None, headers=None, body=None, **kwargs): 22 tr = TrackedRequest.instance() 23 tr.stop_span() 24 25 26 def install(): 27 installed = scout_apm.core.install() 28 if not installed: 29 return 30 31 task_prerun.connect(prerun_callback) 32 task_postrun.connect(postrun_callback) 33 34 35 def uninstall(): 36 task_prerun.disconnect(prerun_callback) 37 task_postrun.disconnect(postrun_callback) 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py --- a/src/scout_apm/celery.py +++ b/src/scout_apm/celery.py @@ -7,20 +7,22 @@ from scout_apm.core.tracked_request import TrackedRequest -# TODO: Capture queue. -# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa -def prerun_callback(sender=None, headers=None, body=None, **kwargs): - name = kwargs["task"].name +def prerun_callback(task=None, **kwargs): + tracked_request = TrackedRequest.instance() + tracked_request.mark_real_request() - tr = TrackedRequest.instance() - tr.mark_real_request() - span = tr.start_span(operation=("Job/" + name)) - span.tag("queue", "default") + delivery_info = task.request.delivery_info + tracked_request.tag("is_eager", delivery_info.get("is_eager", False)) + tracked_request.tag("exchange", delivery_info.get("exchange", "unknown")) + tracked_request.tag("routing_key", delivery_info.get("routing_key", "unknown")) + tracked_request.tag("queue", delivery_info.get("queue", "unknown")) + tracked_request.start_span(operation=("Job/" + task.name)) -def postrun_callback(sender=None, headers=None, body=None, **kwargs): - tr = TrackedRequest.instance() - tr.stop_span() + +def postrun_callback(task=None, **kwargs): + tracked_request = TrackedRequest.instance() + tracked_request.stop_span() def install():
{"golden_diff": "diff --git a/src/scout_apm/celery.py b/src/scout_apm/celery.py\n--- a/src/scout_apm/celery.py\n+++ b/src/scout_apm/celery.py\n@@ -7,20 +7,22 @@\n from scout_apm.core.tracked_request import TrackedRequest\n \n \n-# TODO: Capture queue.\n-# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa\n-def prerun_callback(sender=None, headers=None, body=None, **kwargs):\n- name = kwargs[\"task\"].name\n+def prerun_callback(task=None, **kwargs):\n+ tracked_request = TrackedRequest.instance()\n+ tracked_request.mark_real_request()\n \n- tr = TrackedRequest.instance()\n- tr.mark_real_request()\n- span = tr.start_span(operation=(\"Job/\" + name))\n- span.tag(\"queue\", \"default\")\n+ delivery_info = task.request.delivery_info\n+ tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n+ tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n+ tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n+ tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n \n+ tracked_request.start_span(operation=(\"Job/\" + task.name))\n \n-def postrun_callback(sender=None, headers=None, body=None, **kwargs):\n- tr = TrackedRequest.instance()\n- tr.stop_span()\n+\n+def postrun_callback(task=None, **kwargs):\n+ tracked_request = TrackedRequest.instance()\n+ tracked_request.stop_span()\n \n \n def install():\n", "issue": "Capture Celery Queue Name\n\n", "before_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom celery.signals import task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\n# TODO: Capture queue.\n# https://stackoverflow.com/questions/22385297/how-to-get-the-queue-in-which-a-task-was-run-celery?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa\ndef prerun_callback(sender=None, headers=None, body=None, **kwargs):\n name = kwargs[\"task\"].name\n\n tr = TrackedRequest.instance()\n tr.mark_real_request()\n span = tr.start_span(operation=(\"Job/\" + name))\n span.tag(\"queue\", \"default\")\n\n\ndef postrun_callback(sender=None, headers=None, body=None, **kwargs):\n tr = TrackedRequest.instance()\n tr.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}], "after_files": [{"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom celery.signals import task_postrun, task_prerun\n\nimport scout_apm.core\nfrom scout_apm.core.tracked_request import TrackedRequest\n\n\ndef prerun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.mark_real_request()\n\n delivery_info = task.request.delivery_info\n tracked_request.tag(\"is_eager\", delivery_info.get(\"is_eager\", False))\n tracked_request.tag(\"exchange\", delivery_info.get(\"exchange\", \"unknown\"))\n tracked_request.tag(\"routing_key\", delivery_info.get(\"routing_key\", \"unknown\"))\n tracked_request.tag(\"queue\", delivery_info.get(\"queue\", \"unknown\"))\n\n tracked_request.start_span(operation=(\"Job/\" + task.name))\n\n\ndef postrun_callback(task=None, **kwargs):\n tracked_request = TrackedRequest.instance()\n tracked_request.stop_span()\n\n\ndef install():\n installed = scout_apm.core.install()\n if not installed:\n return\n\n task_prerun.connect(prerun_callback)\n task_postrun.connect(postrun_callback)\n\n\ndef uninstall():\n task_prerun.disconnect(prerun_callback)\n task_postrun.disconnect(postrun_callback)\n", "path": "src/scout_apm/celery.py"}]}
621
397
gh_patches_debug_2698
rasdani/github-patches
git_diff
learningequality__kolibri-4343
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Enable ePUB plugin to run by default ### Observed behavior ePUB plugin is not enabled by default, and it prevents from importing & viewing ePUB files, until the command `kolibri plugin kolibri.plugins.document_epub_render enable` is run. ### User-facing consequences Inability to view and import ePUB files. ### Context dev environment, tried on `develop` and `0.11.a7` branches --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `kolibri/utils/conf.py` Content: ``` 1 """ 2 Kolibri configuration data 3 ========================== 4 5 .. warning:: 6 Do not load any django.conf.settings stuff here. This configuration data 7 precedes loading of settings, it is not part of the settings stack. 8 9 TODO: We need to figure out our conf API. Do we store in ini/json/yaml? 10 11 * How do we retrieve config data? 12 * When should configuration files be loaded and written? 13 14 This module should be easier to document, for instance by having VARIABLES 15 instead of a dict. 16 17 """ 18 from __future__ import absolute_import 19 from __future__ import print_function 20 from __future__ import unicode_literals 21 22 import json 23 import logging 24 import os 25 26 from .compat import module_exists 27 from .options import read_options_file 28 29 logger = logging.getLogger(__name__) 30 31 # use default OS encoding 32 with open(os.path.join(os.path.dirname(__file__), 'KOLIBRI_CORE_JS_NAME')) as f: 33 KOLIBRI_CORE_JS_NAME = f.read().strip() 34 35 #: Absolute path of the main user data directory. 36 #: Will be created automatically if it doesn't exist. 37 KOLIBRI_HOME = os.path.abspath(os.path.expanduser(os.environ["KOLIBRI_HOME"])) 38 39 # Creating KOLIBRI_HOME atm. has to happen here as for instance utils.cli is not 40 # called through py.test. This file is the first basic entry point of 41 # Kolibri, although utils.cli may or may not precede it. 42 if not os.path.exists(KOLIBRI_HOME): 43 parent = os.path.dirname(KOLIBRI_HOME) 44 if not os.path.exists(parent): 45 raise RuntimeError("The parent of your KOLIBRI_HOME does not exist: {}".format(parent)) 46 os.mkdir(KOLIBRI_HOME) 47 48 #: Set defaults before updating the dict 49 config = {} 50 51 try: 52 # The default list for this is populated from build_tools/default_plugins.txt 53 # in the root of the Kolibri repository. The default list is identical to the list below, 54 # except that the style_guide plugin is not enabled in production builds. 55 # Caveat: this list may have been changed at build time to specify a different list of plugins. 56 from .build_config.default_plugins import plugins 57 DEFAULT_PLUGINS = plugins 58 except ImportError: 59 DEFAULT_PLUGINS = [ 60 "kolibri.plugins.facility_management", 61 "kolibri.plugins.device_management", 62 "kolibri.plugins.learn", 63 "kolibri.plugins.document_pdf_render", 64 "kolibri.plugins.html5_app_renderer", 65 "kolibri.plugins.media_player", 66 "kolibri.plugins.setup_wizard", 67 "kolibri.plugins.coach", 68 "kolibri.plugins.user", 69 "kolibri_exercise_perseus_plugin", 70 "kolibri.plugins.style_guide", 71 ] 72 73 #: Everything in this list is added to django.conf.settings.INSTALLED_APPS 74 config['INSTALLED_APPS'] = DEFAULT_PLUGINS 75 76 #: Well-known plugin names that are automatically searched for and enabled on 77 #: first-run. 78 config['AUTO_SEARCH_PLUGINS'] = [] 79 80 #: If a config file does not exist, we assume it's the first run 81 config['FIRST_RUN'] = True 82 83 conf_file = os.path.join(KOLIBRI_HOME, "kolibri_settings.json") 84 85 86 def update(new_values): 87 """ 88 Updates current configuration with ``new_values``. Does not save to file. 89 """ 90 config.update(new_values) 91 92 93 def save(first_run=False): 94 """Saves the current state of the configuration""" 95 config['FIRST_RUN'] = first_run 96 # use default OS encoding 97 with open(conf_file, 'w') as kolibri_conf_file: 98 json.dump(config, kolibri_conf_file, indent=2, sort_keys=True) 99 100 101 if not os.path.isfile(conf_file): 102 logger.info("Initialize kolibri_settings.json..") 103 save(True) 104 else: 105 # Open up the config file and overwrite defaults 106 # use default OS encoding 107 with open(conf_file, 'r') as kolibri_conf_file: 108 config.update(json.load(kolibri_conf_file)) 109 110 111 def autoremove_unavailable_plugins(): 112 """ 113 Sanitize INSTALLED_APPS - something that should be done separately for all 114 build in plugins, but we should not auto-remove plugins that are actually 115 configured by the user or some other kind of hard dependency that should 116 make execution stop if not loadable. 117 """ 118 global config 119 changed = False 120 # Iterate over a copy of the list so that it is not modified during the loop 121 for module_path in config['INSTALLED_APPS'][:]: 122 if not module_exists(module_path): 123 config['INSTALLED_APPS'].remove(module_path) 124 logger.error( 125 ( 126 "Plugin {mod} not found and disabled. To re-enable it, run:\n" 127 " $ kolibri plugin {mod} enable" 128 ).format(mod=module_path) 129 ) 130 changed = True 131 if changed: 132 save() 133 134 135 def enable_default_plugins(): 136 """ 137 Enable new plugins that have been added between versions 138 This will have the undesired side effect of reactivating 139 default plugins that have been explicitly disabled by a user. 140 However, until we add disabled plugins to a blacklist, this is 141 unavoidable. 142 """ 143 global config 144 changed = False 145 for module_path in DEFAULT_PLUGINS: 146 if module_path not in config['INSTALLED_APPS']: 147 config['INSTALLED_APPS'].append(module_path) 148 logger.warning( 149 ( 150 "Default plugin {mod} not found in configuration. To re-disable it, run:\n" 151 " $ kolibri plugin {mod} disable" 152 ).format(mod=module_path) 153 ) 154 changed = True 155 156 if changed: 157 save() 158 159 160 # read the config file options in here so they can be accessed from a standard location 161 OPTIONS = read_options_file(KOLIBRI_HOME) 162 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/kolibri/utils/conf.py b/kolibri/utils/conf.py --- a/kolibri/utils/conf.py +++ b/kolibri/utils/conf.py @@ -68,6 +68,7 @@ "kolibri.plugins.user", "kolibri_exercise_perseus_plugin", "kolibri.plugins.style_guide", + "kolibri.plugins.document_epub_render", ] #: Everything in this list is added to django.conf.settings.INSTALLED_APPS
{"golden_diff": "diff --git a/kolibri/utils/conf.py b/kolibri/utils/conf.py\n--- a/kolibri/utils/conf.py\n+++ b/kolibri/utils/conf.py\n@@ -68,6 +68,7 @@\n \"kolibri.plugins.user\",\n \"kolibri_exercise_perseus_plugin\",\n \"kolibri.plugins.style_guide\",\n+ \"kolibri.plugins.document_epub_render\",\n ]\n \n #: Everything in this list is added to django.conf.settings.INSTALLED_APPS\n", "issue": "Enable ePUB plugin to run by default\n\r\n### Observed behavior\r\n\r\nePUB plugin is not enabled by default, and it prevents from importing & viewing ePUB files, until the command `kolibri plugin kolibri.plugins.document_epub_render enable` is run.\r\n\r\n### User-facing consequences\r\nInability to view and import ePUB files.\r\n\r\n\r\n### Context\r\ndev environment, tried on `develop` and `0.11.a7` branches\r\n\n", "before_files": [{"content": "\"\"\"\nKolibri configuration data\n==========================\n\n.. warning::\n Do not load any django.conf.settings stuff here. This configuration data\n precedes loading of settings, it is not part of the settings stack.\n\nTODO: We need to figure out our conf API. Do we store in ini/json/yaml?\n\n * How do we retrieve config data?\n * When should configuration files be loaded and written?\n\nThis module should be easier to document, for instance by having VARIABLES\ninstead of a dict.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport os\n\nfrom .compat import module_exists\nfrom .options import read_options_file\n\nlogger = logging.getLogger(__name__)\n\n# use default OS encoding\nwith open(os.path.join(os.path.dirname(__file__), 'KOLIBRI_CORE_JS_NAME')) as f:\n KOLIBRI_CORE_JS_NAME = f.read().strip()\n\n#: Absolute path of the main user data directory.\n#: Will be created automatically if it doesn't exist.\nKOLIBRI_HOME = os.path.abspath(os.path.expanduser(os.environ[\"KOLIBRI_HOME\"]))\n\n# Creating KOLIBRI_HOME atm. has to happen here as for instance utils.cli is not\n# called through py.test. This file is the first basic entry point of\n# Kolibri, although utils.cli may or may not precede it.\nif not os.path.exists(KOLIBRI_HOME):\n parent = os.path.dirname(KOLIBRI_HOME)\n if not os.path.exists(parent):\n raise RuntimeError(\"The parent of your KOLIBRI_HOME does not exist: {}\".format(parent))\n os.mkdir(KOLIBRI_HOME)\n\n#: Set defaults before updating the dict\nconfig = {}\n\ntry:\n # The default list for this is populated from build_tools/default_plugins.txt\n # in the root of the Kolibri repository. The default list is identical to the list below,\n # except that the style_guide plugin is not enabled in production builds.\n # Caveat: this list may have been changed at build time to specify a different list of plugins.\n from .build_config.default_plugins import plugins\n DEFAULT_PLUGINS = plugins\nexcept ImportError:\n DEFAULT_PLUGINS = [\n \"kolibri.plugins.facility_management\",\n \"kolibri.plugins.device_management\",\n \"kolibri.plugins.learn\",\n \"kolibri.plugins.document_pdf_render\",\n \"kolibri.plugins.html5_app_renderer\",\n \"kolibri.plugins.media_player\",\n \"kolibri.plugins.setup_wizard\",\n \"kolibri.plugins.coach\",\n \"kolibri.plugins.user\",\n \"kolibri_exercise_perseus_plugin\",\n \"kolibri.plugins.style_guide\",\n ]\n\n#: Everything in this list is added to django.conf.settings.INSTALLED_APPS\nconfig['INSTALLED_APPS'] = DEFAULT_PLUGINS\n\n#: Well-known plugin names that are automatically searched for and enabled on\n#: first-run.\nconfig['AUTO_SEARCH_PLUGINS'] = []\n\n#: If a config file does not exist, we assume it's the first run\nconfig['FIRST_RUN'] = True\n\nconf_file = os.path.join(KOLIBRI_HOME, \"kolibri_settings.json\")\n\n\ndef update(new_values):\n \"\"\"\n Updates current configuration with ``new_values``. Does not save to file.\n \"\"\"\n config.update(new_values)\n\n\ndef save(first_run=False):\n \"\"\"Saves the current state of the configuration\"\"\"\n config['FIRST_RUN'] = first_run\n # use default OS encoding\n with open(conf_file, 'w') as kolibri_conf_file:\n json.dump(config, kolibri_conf_file, indent=2, sort_keys=True)\n\n\nif not os.path.isfile(conf_file):\n logger.info(\"Initialize kolibri_settings.json..\")\n save(True)\nelse:\n # Open up the config file and overwrite defaults\n # use default OS encoding\n with open(conf_file, 'r') as kolibri_conf_file:\n config.update(json.load(kolibri_conf_file))\n\n\ndef autoremove_unavailable_plugins():\n \"\"\"\n Sanitize INSTALLED_APPS - something that should be done separately for all\n build in plugins, but we should not auto-remove plugins that are actually\n configured by the user or some other kind of hard dependency that should\n make execution stop if not loadable.\n \"\"\"\n global config\n changed = False\n # Iterate over a copy of the list so that it is not modified during the loop\n for module_path in config['INSTALLED_APPS'][:]:\n if not module_exists(module_path):\n config['INSTALLED_APPS'].remove(module_path)\n logger.error(\n (\n \"Plugin {mod} not found and disabled. To re-enable it, run:\\n\"\n \" $ kolibri plugin {mod} enable\"\n ).format(mod=module_path)\n )\n changed = True\n if changed:\n save()\n\n\ndef enable_default_plugins():\n \"\"\"\n Enable new plugins that have been added between versions\n This will have the undesired side effect of reactivating\n default plugins that have been explicitly disabled by a user.\n However, until we add disabled plugins to a blacklist, this is\n unavoidable.\n \"\"\"\n global config\n changed = False\n for module_path in DEFAULT_PLUGINS:\n if module_path not in config['INSTALLED_APPS']:\n config['INSTALLED_APPS'].append(module_path)\n logger.warning(\n (\n \"Default plugin {mod} not found in configuration. To re-disable it, run:\\n\"\n \" $ kolibri plugin {mod} disable\"\n ).format(mod=module_path)\n )\n changed = True\n\n if changed:\n save()\n\n\n# read the config file options in here so they can be accessed from a standard location\nOPTIONS = read_options_file(KOLIBRI_HOME)\n", "path": "kolibri/utils/conf.py"}], "after_files": [{"content": "\"\"\"\nKolibri configuration data\n==========================\n\n.. warning::\n Do not load any django.conf.settings stuff here. This configuration data\n precedes loading of settings, it is not part of the settings stack.\n\nTODO: We need to figure out our conf API. Do we store in ini/json/yaml?\n\n * How do we retrieve config data?\n * When should configuration files be loaded and written?\n\nThis module should be easier to document, for instance by having VARIABLES\ninstead of a dict.\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport json\nimport logging\nimport os\n\nfrom .compat import module_exists\nfrom .options import read_options_file\n\nlogger = logging.getLogger(__name__)\n\n# use default OS encoding\nwith open(os.path.join(os.path.dirname(__file__), 'KOLIBRI_CORE_JS_NAME')) as f:\n KOLIBRI_CORE_JS_NAME = f.read().strip()\n\n#: Absolute path of the main user data directory.\n#: Will be created automatically if it doesn't exist.\nKOLIBRI_HOME = os.path.abspath(os.path.expanduser(os.environ[\"KOLIBRI_HOME\"]))\n\n# Creating KOLIBRI_HOME atm. has to happen here as for instance utils.cli is not\n# called through py.test. This file is the first basic entry point of\n# Kolibri, although utils.cli may or may not precede it.\nif not os.path.exists(KOLIBRI_HOME):\n parent = os.path.dirname(KOLIBRI_HOME)\n if not os.path.exists(parent):\n raise RuntimeError(\"The parent of your KOLIBRI_HOME does not exist: {}\".format(parent))\n os.mkdir(KOLIBRI_HOME)\n\n#: Set defaults before updating the dict\nconfig = {}\n\ntry:\n # The default list for this is populated from build_tools/default_plugins.txt\n # in the root of the Kolibri repository. The default list is identical to the list below,\n # except that the style_guide plugin is not enabled in production builds.\n # Caveat: this list may have been changed at build time to specify a different list of plugins.\n from .build_config.default_plugins import plugins\n DEFAULT_PLUGINS = plugins\nexcept ImportError:\n DEFAULT_PLUGINS = [\n \"kolibri.plugins.facility_management\",\n \"kolibri.plugins.device_management\",\n \"kolibri.plugins.learn\",\n \"kolibri.plugins.document_pdf_render\",\n \"kolibri.plugins.html5_app_renderer\",\n \"kolibri.plugins.media_player\",\n \"kolibri.plugins.setup_wizard\",\n \"kolibri.plugins.coach\",\n \"kolibri.plugins.user\",\n \"kolibri_exercise_perseus_plugin\",\n \"kolibri.plugins.style_guide\",\n \"kolibri.plugins.document_epub_render\",\n ]\n\n#: Everything in this list is added to django.conf.settings.INSTALLED_APPS\nconfig['INSTALLED_APPS'] = DEFAULT_PLUGINS\n\n#: Well-known plugin names that are automatically searched for and enabled on\n#: first-run.\nconfig['AUTO_SEARCH_PLUGINS'] = []\n\n#: If a config file does not exist, we assume it's the first run\nconfig['FIRST_RUN'] = True\n\nconf_file = os.path.join(KOLIBRI_HOME, \"kolibri_settings.json\")\n\n\ndef update(new_values):\n \"\"\"\n Updates current configuration with ``new_values``. Does not save to file.\n \"\"\"\n config.update(new_values)\n\n\ndef save(first_run=False):\n \"\"\"Saves the current state of the configuration\"\"\"\n config['FIRST_RUN'] = first_run\n # use default OS encoding\n with open(conf_file, 'w') as kolibri_conf_file:\n json.dump(config, kolibri_conf_file, indent=2, sort_keys=True)\n\n\nif not os.path.isfile(conf_file):\n logger.info(\"Initialize kolibri_settings.json..\")\n save(True)\nelse:\n # Open up the config file and overwrite defaults\n # use default OS encoding\n with open(conf_file, 'r') as kolibri_conf_file:\n config.update(json.load(kolibri_conf_file))\n\n\ndef autoremove_unavailable_plugins():\n \"\"\"\n Sanitize INSTALLED_APPS - something that should be done separately for all\n build in plugins, but we should not auto-remove plugins that are actually\n configured by the user or some other kind of hard dependency that should\n make execution stop if not loadable.\n \"\"\"\n global config\n changed = False\n # Iterate over a copy of the list so that it is not modified during the loop\n for module_path in config['INSTALLED_APPS'][:]:\n if not module_exists(module_path):\n config['INSTALLED_APPS'].remove(module_path)\n logger.error(\n (\n \"Plugin {mod} not found and disabled. To re-enable it, run:\\n\"\n \" $ kolibri plugin {mod} enable\"\n ).format(mod=module_path)\n )\n changed = True\n if changed:\n save()\n\n\ndef enable_default_plugins():\n \"\"\"\n Enable new plugins that have been added between versions\n This will have the undesired side effect of reactivating\n default plugins that have been explicitly disabled by a user.\n However, until we add disabled plugins to a blacklist, this is\n unavoidable.\n \"\"\"\n global config\n changed = False\n for module_path in DEFAULT_PLUGINS:\n if module_path not in config['INSTALLED_APPS']:\n config['INSTALLED_APPS'].append(module_path)\n logger.warning(\n (\n \"Default plugin {mod} not found in configuration. To re-disable it, run:\\n\"\n \" $ kolibri plugin {mod} disable\"\n ).format(mod=module_path)\n )\n changed = True\n\n if changed:\n save()\n\n\n# read the config file options in here so they can be accessed from a standard location\nOPTIONS = read_options_file(KOLIBRI_HOME)\n", "path": "kolibri/utils/conf.py"}]}
1,976
104
gh_patches_debug_2999
rasdani/github-patches
git_diff
iterative__dvc-2457
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- dvc remove CLI documentation inconsistency `dvc remove` (without `targets`) prints help which states that `targets` are optional, and if not specified will remove all DVC-files. Clearly not the case. ```bash $ dvc remove [...] targets DVC-files to remove. Optional. (Finds all DVC-files in the workspace by default.) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/command/remove.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import argparse 4 import logging 5 6 import dvc.prompt as prompt 7 from dvc.exceptions import DvcException 8 from dvc.command.base import CmdBase, append_doc_link 9 10 11 logger = logging.getLogger(__name__) 12 13 14 class CmdRemove(CmdBase): 15 def _is_outs_only(self, target): 16 if not self.args.purge: 17 return True 18 19 if self.args.force: 20 return False 21 22 msg = "Are you sure you want to remove {} with its outputs?".format( 23 target 24 ) 25 26 if prompt.confirm(msg): 27 return False 28 29 raise DvcException( 30 "Cannot purge without a confirmation from the user." 31 " Use '-f' to force." 32 ) 33 34 def run(self): 35 for target in self.args.targets: 36 try: 37 outs_only = self._is_outs_only(target) 38 self.repo.remove(target, outs_only=outs_only) 39 except DvcException: 40 logger.exception("failed to remove {}".format(target)) 41 return 1 42 return 0 43 44 45 def add_parser(subparsers, parent_parser): 46 REMOVE_HELP = "Remove DVC-file outputs." 47 remove_parser = subparsers.add_parser( 48 "remove", 49 parents=[parent_parser], 50 description=append_doc_link(REMOVE_HELP, "remove"), 51 help=REMOVE_HELP, 52 formatter_class=argparse.RawDescriptionHelpFormatter, 53 ) 54 remove_parser_group = remove_parser.add_mutually_exclusive_group() 55 remove_parser_group.add_argument( 56 "-o", 57 "--outs", 58 action="store_true", 59 default=True, 60 help="Only remove DVC-file outputs. (Default)", 61 ) 62 remove_parser_group.add_argument( 63 "-p", 64 "--purge", 65 action="store_true", 66 default=False, 67 help="Remove DVC-file and all its outputs.", 68 ) 69 remove_parser.add_argument( 70 "-f", 71 "--force", 72 action="store_true", 73 default=False, 74 help="Force purge.", 75 ) 76 remove_parser.add_argument( 77 "targets", 78 nargs="+", 79 help="DVC-files to remove. Optional. " 80 "(Finds all DVC-files in the workspace by default.)", 81 ) 82 remove_parser.set_defaults(func=CmdRemove) 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/command/remove.py b/dvc/command/remove.py --- a/dvc/command/remove.py +++ b/dvc/command/remove.py @@ -74,9 +74,6 @@ help="Force purge.", ) remove_parser.add_argument( - "targets", - nargs="+", - help="DVC-files to remove. Optional. " - "(Finds all DVC-files in the workspace by default.)", + "targets", nargs="+", help="DVC-files to remove." ) remove_parser.set_defaults(func=CmdRemove)
{"golden_diff": "diff --git a/dvc/command/remove.py b/dvc/command/remove.py\n--- a/dvc/command/remove.py\n+++ b/dvc/command/remove.py\n@@ -74,9 +74,6 @@\n help=\"Force purge.\",\n )\n remove_parser.add_argument(\n- \"targets\",\n- nargs=\"+\",\n- help=\"DVC-files to remove. Optional. \"\n- \"(Finds all DVC-files in the workspace by default.)\",\n+ \"targets\", nargs=\"+\", help=\"DVC-files to remove.\"\n )\n remove_parser.set_defaults(func=CmdRemove)\n", "issue": "dvc remove CLI documentation inconsistency\n`dvc remove` (without `targets`) prints help which states that `targets` are optional, and if not specified will remove all DVC-files. Clearly not the case.\r\n\r\n```bash\r\n$ dvc remove\r\n[...]\r\n targets DVC-files to remove. Optional. (Finds all DVC-files in the\r\n workspace by default.)\r\n```\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\n\nimport dvc.prompt as prompt\nfrom dvc.exceptions import DvcException\nfrom dvc.command.base import CmdBase, append_doc_link\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdRemove(CmdBase):\n def _is_outs_only(self, target):\n if not self.args.purge:\n return True\n\n if self.args.force:\n return False\n\n msg = \"Are you sure you want to remove {} with its outputs?\".format(\n target\n )\n\n if prompt.confirm(msg):\n return False\n\n raise DvcException(\n \"Cannot purge without a confirmation from the user.\"\n \" Use '-f' to force.\"\n )\n\n def run(self):\n for target in self.args.targets:\n try:\n outs_only = self._is_outs_only(target)\n self.repo.remove(target, outs_only=outs_only)\n except DvcException:\n logger.exception(\"failed to remove {}\".format(target))\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n REMOVE_HELP = \"Remove DVC-file outputs.\"\n remove_parser = subparsers.add_parser(\n \"remove\",\n parents=[parent_parser],\n description=append_doc_link(REMOVE_HELP, \"remove\"),\n help=REMOVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n remove_parser_group = remove_parser.add_mutually_exclusive_group()\n remove_parser_group.add_argument(\n \"-o\",\n \"--outs\",\n action=\"store_true\",\n default=True,\n help=\"Only remove DVC-file outputs. (Default)\",\n )\n remove_parser_group.add_argument(\n \"-p\",\n \"--purge\",\n action=\"store_true\",\n default=False,\n help=\"Remove DVC-file and all its outputs.\",\n )\n remove_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Force purge.\",\n )\n remove_parser.add_argument(\n \"targets\",\n nargs=\"+\",\n help=\"DVC-files to remove. Optional. \"\n \"(Finds all DVC-files in the workspace by default.)\",\n )\n remove_parser.set_defaults(func=CmdRemove)\n", "path": "dvc/command/remove.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport argparse\nimport logging\n\nimport dvc.prompt as prompt\nfrom dvc.exceptions import DvcException\nfrom dvc.command.base import CmdBase, append_doc_link\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdRemove(CmdBase):\n def _is_outs_only(self, target):\n if not self.args.purge:\n return True\n\n if self.args.force:\n return False\n\n msg = \"Are you sure you want to remove {} with its outputs?\".format(\n target\n )\n\n if prompt.confirm(msg):\n return False\n\n raise DvcException(\n \"Cannot purge without a confirmation from the user.\"\n \" Use '-f' to force.\"\n )\n\n def run(self):\n for target in self.args.targets:\n try:\n outs_only = self._is_outs_only(target)\n self.repo.remove(target, outs_only=outs_only)\n except DvcException:\n logger.exception(\"failed to remove {}\".format(target))\n return 1\n return 0\n\n\ndef add_parser(subparsers, parent_parser):\n REMOVE_HELP = \"Remove DVC-file outputs.\"\n remove_parser = subparsers.add_parser(\n \"remove\",\n parents=[parent_parser],\n description=append_doc_link(REMOVE_HELP, \"remove\"),\n help=REMOVE_HELP,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n remove_parser_group = remove_parser.add_mutually_exclusive_group()\n remove_parser_group.add_argument(\n \"-o\",\n \"--outs\",\n action=\"store_true\",\n default=True,\n help=\"Only remove DVC-file outputs. (Default)\",\n )\n remove_parser_group.add_argument(\n \"-p\",\n \"--purge\",\n action=\"store_true\",\n default=False,\n help=\"Remove DVC-file and all its outputs.\",\n )\n remove_parser.add_argument(\n \"-f\",\n \"--force\",\n action=\"store_true\",\n default=False,\n help=\"Force purge.\",\n )\n remove_parser.add_argument(\n \"targets\", nargs=\"+\", help=\"DVC-files to remove.\"\n )\n remove_parser.set_defaults(func=CmdRemove)\n", "path": "dvc/command/remove.py"}]}
982
125
gh_patches_debug_82
rasdani/github-patches
git_diff
fidals__shopelectro-719
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add canonicals to category page For example this two pages contains no canonicals: - https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2 - ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical Add canonicals to category page For example this two pages contains no canonicals: - https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2 - ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `shopelectro/context.py` Content: ``` 1 from functools import partial 2 3 from catalog.newcontext import Context, Tags 4 5 6 class Page(Context): 7 8 def __init__(self, page, tags: Tags): 9 self._page = page 10 self._tags = tags 11 12 def context(self): 13 def template_context(page, tag_titles, tags): 14 return { 15 'page': page, 16 'tag_titles': tag_titles, 17 'tags': tags, 18 } 19 20 tags_qs = self._tags.qs() 21 self._page.get_template_render_context = partial( 22 template_context, self._page, tags_qs.as_title(), tags_qs 23 ) 24 25 return { 26 'page': self._page, 27 'skip_canonical': tags_qs.exists(), 28 } 29 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/shopelectro/context.py b/shopelectro/context.py --- a/shopelectro/context.py +++ b/shopelectro/context.py @@ -24,5 +24,4 @@ return { 'page': self._page, - 'skip_canonical': tags_qs.exists(), }
{"golden_diff": "diff --git a/shopelectro/context.py b/shopelectro/context.py\n--- a/shopelectro/context.py\n+++ b/shopelectro/context.py\n@@ -24,5 +24,4 @@\n \n return {\n 'page': self._page,\n- 'skip_canonical': tags_qs.exists(),\n }\n", "issue": "Add canonicals to category page\nFor example this two pages contains no canonicals:\r\n- https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2\r\n- ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical\nAdd canonicals to category page\nFor example this two pages contains no canonicals:\r\n- https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/tags/li-ro_hbced/?page=2\r\n- ~https://www.shopelectro.ru/catalog/categories/akkumuliatory-270/?page=2~ checked - it contains canonical\n", "before_files": [{"content": "from functools import partial\n\nfrom catalog.newcontext import Context, Tags\n\n\nclass Page(Context):\n\n def __init__(self, page, tags: Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n 'skip_canonical': tags_qs.exists(),\n }\n", "path": "shopelectro/context.py"}], "after_files": [{"content": "from functools import partial\n\nfrom catalog.newcontext import Context, Tags\n\n\nclass Page(Context):\n\n def __init__(self, page, tags: Tags):\n self._page = page\n self._tags = tags\n\n def context(self):\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n tags_qs = self._tags.qs()\n self._page.get_template_render_context = partial(\n template_context, self._page, tags_qs.as_title(), tags_qs\n )\n\n return {\n 'page': self._page,\n }\n", "path": "shopelectro/context.py"}]}
624
73
gh_patches_debug_17860
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-6112
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cleanup exception that are not logged as error After #4495 got merged @agjohnson suggested to have an attribute in the Exception class and check for that attribute before log the exception, instead of defining a list for the warning exceptions as I did at: https://github.com/rtfd/readthedocs.org/pull/4495/files#diff-ca52b098301dd315a834b3556ab9a7d5R424 Also, there are more exceptions that have to treat in the same way: `ProjectConfigurationError` for example. https://sentry.io/read-the-docs/readthedocs-org/issues/668248681/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/vcs_support/base.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 """Base classes for VCS backends.""" 4 import logging 5 import os 6 import shutil 7 8 9 log = logging.getLogger(__name__) 10 11 12 class VCSVersion: 13 14 """ 15 Represents a Version (tag or branch) in a VCS. 16 17 This class should only be instantiated in BaseVCS subclasses. 18 19 It can act as a context manager to temporarily switch to this tag (eg to 20 build docs for this tag). 21 """ 22 23 def __init__(self, repository, identifier, verbose_name): 24 self.repository = repository 25 self.identifier = identifier 26 self.verbose_name = verbose_name 27 28 def __repr__(self): 29 return '<VCSVersion: {}:{}'.format( 30 self.repository.repo_url, 31 self.verbose_name, 32 ) 33 34 35 class BaseVCS: 36 37 """ 38 Base for VCS Classes. 39 40 VCS commands are ran inside a ``LocalEnvironment``. 41 """ 42 43 supports_tags = False # Whether this VCS supports tags or not. 44 supports_branches = False # Whether this VCS supports branches or not. 45 supports_submodules = False 46 47 # ========================================================================= 48 # General methods 49 # ========================================================================= 50 51 # Defining a base API, so we'll have unused args 52 # pylint: disable=unused-argument 53 def __init__( 54 self, project, version_slug, environment=None, 55 verbose_name=None, version_type=None, **kwargs 56 ): 57 self.default_branch = project.default_branch 58 self.project = project 59 self.name = project.name 60 self.repo_url = project.clean_repo 61 self.working_dir = project.checkout_path(version_slug) 62 # required for External versions 63 self.verbose_name = verbose_name 64 self.version_type = version_type 65 66 from readthedocs.doc_builder.environments import LocalEnvironment 67 self.environment = environment or LocalEnvironment(project) 68 69 # Update the env variables with the proper VCS env variables 70 self.environment.environment.update(self.env) 71 72 def check_working_dir(self): 73 if not os.path.exists(self.working_dir): 74 os.makedirs(self.working_dir) 75 76 def make_clean_working_dir(self): 77 """Ensures that the working dir exists and is empty.""" 78 shutil.rmtree(self.working_dir, ignore_errors=True) 79 self.check_working_dir() 80 81 @property 82 def env(self): 83 environment = os.environ.copy() 84 85 # TODO: kind of a hack 86 del environment['PATH'] 87 88 return environment 89 90 def update(self): 91 """ 92 Update a local copy of the repository in self.working_dir. 93 94 If self.working_dir is already a valid local copy of the repository, 95 update the repository, else create a new local copy of the repository. 96 """ 97 self.check_working_dir() 98 99 def run(self, *cmd, **kwargs): 100 kwargs.update({ 101 'cwd': self.working_dir, 102 'shell': False, 103 }) 104 105 build_cmd = self.environment.run(*cmd, **kwargs) 106 # Return a tuple to keep compatibility 107 return (build_cmd.exit_code, build_cmd.output, build_cmd.error) 108 109 # ========================================================================= 110 # Tag / Branch related methods 111 # These methods only apply if supports_tags = True and/or 112 # support_branches = True 113 # ========================================================================= 114 115 @property 116 def tags(self): 117 """ 118 Returns a list of VCSVersion objects. 119 120 See VCSVersion for more information. 121 """ 122 raise NotImplementedError 123 124 @property 125 def branches(self): 126 """ 127 Returns a list of VCSVersion objects. 128 129 See VCSVersion for more information. 130 """ 131 raise NotImplementedError 132 133 @property 134 def commit(self): 135 """Returns a string representing the current commit.""" 136 raise NotImplementedError 137 138 def checkout(self, identifier=None): 139 """ 140 Set the state to the given identifier. 141 142 If identifier is None, checkout to the latest revision. 143 144 The type and format of identifier may change from VCS to VCS, so each 145 backend is responsible to understand it's identifiers. 146 """ 147 self.check_working_dir() 148 149 def update_submodules(self, config): 150 """ 151 Update the submodules of the current checkout. 152 153 :type config: readthedocs.config.BuildConfigBase 154 """ 155 raise NotImplementedError 156 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/vcs_support/base.py b/readthedocs/vcs_support/base.py --- a/readthedocs/vcs_support/base.py +++ b/readthedocs/vcs_support/base.py @@ -1,10 +1,11 @@ -# -*- coding: utf-8 -*- - """Base classes for VCS backends.""" import logging import os import shutil +from readthedocs.doc_builder.exceptions import BuildEnvironmentWarning +from readthedocs.projects.exceptions import RepositoryError + log = logging.getLogger(__name__) @@ -102,7 +103,13 @@ 'shell': False, }) - build_cmd = self.environment.run(*cmd, **kwargs) + try: + build_cmd = self.environment.run(*cmd, **kwargs) + except BuildEnvironmentWarning as e: + # Re raise as RepositoryError, + # so isn't logged as ERROR. + raise RepositoryError(str(e)) + # Return a tuple to keep compatibility return (build_cmd.exit_code, build_cmd.output, build_cmd.error)
{"golden_diff": "diff --git a/readthedocs/vcs_support/base.py b/readthedocs/vcs_support/base.py\n--- a/readthedocs/vcs_support/base.py\n+++ b/readthedocs/vcs_support/base.py\n@@ -1,10 +1,11 @@\n-# -*- coding: utf-8 -*-\n-\n \"\"\"Base classes for VCS backends.\"\"\"\n import logging\n import os\n import shutil\n \n+from readthedocs.doc_builder.exceptions import BuildEnvironmentWarning\n+from readthedocs.projects.exceptions import RepositoryError\n+\n \n log = logging.getLogger(__name__)\n \n@@ -102,7 +103,13 @@\n 'shell': False,\n })\n \n- build_cmd = self.environment.run(*cmd, **kwargs)\n+ try:\n+ build_cmd = self.environment.run(*cmd, **kwargs)\n+ except BuildEnvironmentWarning as e:\n+ # Re raise as RepositoryError,\n+ # so isn't logged as ERROR.\n+ raise RepositoryError(str(e))\n+\n # Return a tuple to keep compatibility\n return (build_cmd.exit_code, build_cmd.output, build_cmd.error)\n", "issue": "Cleanup exception that are not logged as error\nAfter #4495 got merged @agjohnson suggested to have an attribute in the Exception class and check for that attribute before log the exception, instead of defining a list for the warning exceptions as I did at:\r\n\r\nhttps://github.com/rtfd/readthedocs.org/pull/4495/files#diff-ca52b098301dd315a834b3556ab9a7d5R424\r\n\r\nAlso, there are more exceptions that have to treat in the same way: `ProjectConfigurationError` for example.\r\n\r\nhttps://sentry.io/read-the-docs/readthedocs-org/issues/668248681/\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Base classes for VCS backends.\"\"\"\nimport logging\nimport os\nimport shutil\n\n\nlog = logging.getLogger(__name__)\n\n\nclass VCSVersion:\n\n \"\"\"\n Represents a Version (tag or branch) in a VCS.\n\n This class should only be instantiated in BaseVCS subclasses.\n\n It can act as a context manager to temporarily switch to this tag (eg to\n build docs for this tag).\n \"\"\"\n\n def __init__(self, repository, identifier, verbose_name):\n self.repository = repository\n self.identifier = identifier\n self.verbose_name = verbose_name\n\n def __repr__(self):\n return '<VCSVersion: {}:{}'.format(\n self.repository.repo_url,\n self.verbose_name,\n )\n\n\nclass BaseVCS:\n\n \"\"\"\n Base for VCS Classes.\n\n VCS commands are ran inside a ``LocalEnvironment``.\n \"\"\"\n\n supports_tags = False # Whether this VCS supports tags or not.\n supports_branches = False # Whether this VCS supports branches or not.\n supports_submodules = False\n\n # =========================================================================\n # General methods\n # =========================================================================\n\n # Defining a base API, so we'll have unused args\n # pylint: disable=unused-argument\n def __init__(\n self, project, version_slug, environment=None,\n verbose_name=None, version_type=None, **kwargs\n ):\n self.default_branch = project.default_branch\n self.project = project\n self.name = project.name\n self.repo_url = project.clean_repo\n self.working_dir = project.checkout_path(version_slug)\n # required for External versions\n self.verbose_name = verbose_name\n self.version_type = version_type\n\n from readthedocs.doc_builder.environments import LocalEnvironment\n self.environment = environment or LocalEnvironment(project)\n\n # Update the env variables with the proper VCS env variables\n self.environment.environment.update(self.env)\n\n def check_working_dir(self):\n if not os.path.exists(self.working_dir):\n os.makedirs(self.working_dir)\n\n def make_clean_working_dir(self):\n \"\"\"Ensures that the working dir exists and is empty.\"\"\"\n shutil.rmtree(self.working_dir, ignore_errors=True)\n self.check_working_dir()\n\n @property\n def env(self):\n environment = os.environ.copy()\n\n # TODO: kind of a hack\n del environment['PATH']\n\n return environment\n\n def update(self):\n \"\"\"\n Update a local copy of the repository in self.working_dir.\n\n If self.working_dir is already a valid local copy of the repository,\n update the repository, else create a new local copy of the repository.\n \"\"\"\n self.check_working_dir()\n\n def run(self, *cmd, **kwargs):\n kwargs.update({\n 'cwd': self.working_dir,\n 'shell': False,\n })\n\n build_cmd = self.environment.run(*cmd, **kwargs)\n # Return a tuple to keep compatibility\n return (build_cmd.exit_code, build_cmd.output, build_cmd.error)\n\n # =========================================================================\n # Tag / Branch related methods\n # These methods only apply if supports_tags = True and/or\n # support_branches = True\n # =========================================================================\n\n @property\n def tags(self):\n \"\"\"\n Returns a list of VCSVersion objects.\n\n See VCSVersion for more information.\n \"\"\"\n raise NotImplementedError\n\n @property\n def branches(self):\n \"\"\"\n Returns a list of VCSVersion objects.\n\n See VCSVersion for more information.\n \"\"\"\n raise NotImplementedError\n\n @property\n def commit(self):\n \"\"\"Returns a string representing the current commit.\"\"\"\n raise NotImplementedError\n\n def checkout(self, identifier=None):\n \"\"\"\n Set the state to the given identifier.\n\n If identifier is None, checkout to the latest revision.\n\n The type and format of identifier may change from VCS to VCS, so each\n backend is responsible to understand it's identifiers.\n \"\"\"\n self.check_working_dir()\n\n def update_submodules(self, config):\n \"\"\"\n Update the submodules of the current checkout.\n\n :type config: readthedocs.config.BuildConfigBase\n \"\"\"\n raise NotImplementedError\n", "path": "readthedocs/vcs_support/base.py"}], "after_files": [{"content": "\"\"\"Base classes for VCS backends.\"\"\"\nimport logging\nimport os\nimport shutil\n\nfrom readthedocs.doc_builder.exceptions import BuildEnvironmentWarning\nfrom readthedocs.projects.exceptions import RepositoryError\n\n\nlog = logging.getLogger(__name__)\n\n\nclass VCSVersion:\n\n \"\"\"\n Represents a Version (tag or branch) in a VCS.\n\n This class should only be instantiated in BaseVCS subclasses.\n\n It can act as a context manager to temporarily switch to this tag (eg to\n build docs for this tag).\n \"\"\"\n\n def __init__(self, repository, identifier, verbose_name):\n self.repository = repository\n self.identifier = identifier\n self.verbose_name = verbose_name\n\n def __repr__(self):\n return '<VCSVersion: {}:{}'.format(\n self.repository.repo_url,\n self.verbose_name,\n )\n\n\nclass BaseVCS:\n\n \"\"\"\n Base for VCS Classes.\n\n VCS commands are ran inside a ``LocalEnvironment``.\n \"\"\"\n\n supports_tags = False # Whether this VCS supports tags or not.\n supports_branches = False # Whether this VCS supports branches or not.\n supports_submodules = False\n\n # =========================================================================\n # General methods\n # =========================================================================\n\n # Defining a base API, so we'll have unused args\n # pylint: disable=unused-argument\n def __init__(\n self, project, version_slug, environment=None,\n verbose_name=None, version_type=None, **kwargs\n ):\n self.default_branch = project.default_branch\n self.project = project\n self.name = project.name\n self.repo_url = project.clean_repo\n self.working_dir = project.checkout_path(version_slug)\n # required for External versions\n self.verbose_name = verbose_name\n self.version_type = version_type\n\n from readthedocs.doc_builder.environments import LocalEnvironment\n self.environment = environment or LocalEnvironment(project)\n\n # Update the env variables with the proper VCS env variables\n self.environment.environment.update(self.env)\n\n def check_working_dir(self):\n if not os.path.exists(self.working_dir):\n os.makedirs(self.working_dir)\n\n def make_clean_working_dir(self):\n \"\"\"Ensures that the working dir exists and is empty.\"\"\"\n shutil.rmtree(self.working_dir, ignore_errors=True)\n self.check_working_dir()\n\n @property\n def env(self):\n environment = os.environ.copy()\n\n # TODO: kind of a hack\n del environment['PATH']\n\n return environment\n\n def update(self):\n \"\"\"\n Update a local copy of the repository in self.working_dir.\n\n If self.working_dir is already a valid local copy of the repository,\n update the repository, else create a new local copy of the repository.\n \"\"\"\n self.check_working_dir()\n\n def run(self, *cmd, **kwargs):\n kwargs.update({\n 'cwd': self.working_dir,\n 'shell': False,\n })\n\n try:\n build_cmd = self.environment.run(*cmd, **kwargs)\n except BuildEnvironmentWarning as e:\n # Re raise as RepositoryError,\n # so isn't logged as ERROR.\n raise RepositoryError(str(e))\n\n # Return a tuple to keep compatibility\n return (build_cmd.exit_code, build_cmd.output, build_cmd.error)\n\n # =========================================================================\n # Tag / Branch related methods\n # These methods only apply if supports_tags = True and/or\n # support_branches = True\n # =========================================================================\n\n @property\n def tags(self):\n \"\"\"\n Returns a list of VCSVersion objects.\n\n See VCSVersion for more information.\n \"\"\"\n raise NotImplementedError\n\n @property\n def branches(self):\n \"\"\"\n Returns a list of VCSVersion objects.\n\n See VCSVersion for more information.\n \"\"\"\n raise NotImplementedError\n\n @property\n def commit(self):\n \"\"\"Returns a string representing the current commit.\"\"\"\n raise NotImplementedError\n\n def checkout(self, identifier=None):\n \"\"\"\n Set the state to the given identifier.\n\n If identifier is None, checkout to the latest revision.\n\n The type and format of identifier may change from VCS to VCS, so each\n backend is responsible to understand it's identifiers.\n \"\"\"\n self.check_working_dir()\n\n def update_submodules(self, config):\n \"\"\"\n Update the submodules of the current checkout.\n\n :type config: readthedocs.config.BuildConfigBase\n \"\"\"\n raise NotImplementedError\n", "path": "readthedocs/vcs_support/base.py"}]}
1,697
236
gh_patches_debug_42123
rasdani/github-patches
git_diff
scrapy__scrapy-2400
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cookies from the Cookie request header are not processed I am new in scrapy, and I meet some problems which I can not get answer from google, so I post it here: 1 Cookie not work even set in DEFAULT_REQUEST_HEADERS: ``` DEFAULT_REQUEST_HEADERS = { 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'accept-encoding': 'gzip, deflate, sdch', 'cache-control': 'no-cache', 'cookie': 'xx=yy', 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36' } ``` ``` class MySpider(scrapy.Spider): def make_requests_from_url(self, url): return scrapy.http.Request(url, headers=DEFAULT_REQUEST_HEADERS) ``` I know the `make_requests_from_url` will only called once for the start_urls, and in my opinion, the first request will send the cookie I set in the `DEFAULT_REQUEST_HEADERS`, however it does not. 2 Share settings between spiders. I have multiple spiders in the project which share most of the settings like `RandomAgentMiddleware` `RandomProxyMiddleware` `UserAgent` `DEFAULT_REQUEST_HEADERS` and etc, however they are configured inside the settings.py for each spider. Is it possible to share these settings? --- The `COOKIES_ENABLED` is set to true. Double-encoded cookies When cookies are passed as UTF8 encoded bytes to the `Request` constructor, they end up being encoded twice and escaped in the `Cookie` header. ``` $ scrapy shell (...) In [1]: fetch(scrapy.Request('https://httpbin.org/cookies', cookies={'a': u'á'.encode('utf8')})) In [2]: request.headers['Cookie'] Out[2]: b"a=b'\\xc3\\xa1'" In [3]: print(response.text) { "cookies": { "a": "b'\\xc3\\xa1'" } } ``` This seems to happen only in Python 3. ``` $ scrapy version -v Scrapy : 1.5.0 lxml : 4.2.6.0 libxml2 : 2.9.8 cssselect : 1.0.3 parsel : 1.5.1 w3lib : 1.19.0 Twisted : 18.9.0 Python : 3.6.0 (default, Sep 1 2017, 10:59:37) - [GCC 4.8.4] pyOpenSSL : 18.0.0 (OpenSSL 1.1.0j 20 Nov 2018) cryptography : 2.4.2 Platform : Linux-4.4.0-134-generic-x86_64-with-debian-jessie-sid ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/downloadermiddlewares/cookies.py` Content: ``` 1 import logging 2 from collections import defaultdict 3 4 from scrapy.exceptions import NotConfigured 5 from scrapy.http import Response 6 from scrapy.http.cookies import CookieJar 7 from scrapy.utils.python import to_unicode 8 9 10 logger = logging.getLogger(__name__) 11 12 13 class CookiesMiddleware: 14 """This middleware enables working with sites that need cookies""" 15 16 def __init__(self, debug=False): 17 self.jars = defaultdict(CookieJar) 18 self.debug = debug 19 20 @classmethod 21 def from_crawler(cls, crawler): 22 if not crawler.settings.getbool('COOKIES_ENABLED'): 23 raise NotConfigured 24 return cls(crawler.settings.getbool('COOKIES_DEBUG')) 25 26 def process_request(self, request, spider): 27 if request.meta.get('dont_merge_cookies', False): 28 return 29 30 cookiejarkey = request.meta.get("cookiejar") 31 jar = self.jars[cookiejarkey] 32 cookies = self._get_request_cookies(jar, request) 33 for cookie in cookies: 34 jar.set_cookie_if_ok(cookie, request) 35 36 # set Cookie header 37 request.headers.pop('Cookie', None) 38 jar.add_cookie_header(request) 39 self._debug_cookie(request, spider) 40 41 def process_response(self, request, response, spider): 42 if request.meta.get('dont_merge_cookies', False): 43 return response 44 45 # extract cookies from Set-Cookie and drop invalid/expired cookies 46 cookiejarkey = request.meta.get("cookiejar") 47 jar = self.jars[cookiejarkey] 48 jar.extract_cookies(response, request) 49 self._debug_set_cookie(response, spider) 50 51 return response 52 53 def _debug_cookie(self, request, spider): 54 if self.debug: 55 cl = [to_unicode(c, errors='replace') 56 for c in request.headers.getlist('Cookie')] 57 if cl: 58 cookies = "\n".join("Cookie: {}\n".format(c) for c in cl) 59 msg = "Sending cookies to: {}\n{}".format(request, cookies) 60 logger.debug(msg, extra={'spider': spider}) 61 62 def _debug_set_cookie(self, response, spider): 63 if self.debug: 64 cl = [to_unicode(c, errors='replace') 65 for c in response.headers.getlist('Set-Cookie')] 66 if cl: 67 cookies = "\n".join("Set-Cookie: {}\n".format(c) for c in cl) 68 msg = "Received cookies from: {}\n{}".format(response, cookies) 69 logger.debug(msg, extra={'spider': spider}) 70 71 def _format_cookie(self, cookie): 72 # build cookie string 73 cookie_str = '%s=%s' % (cookie['name'], cookie['value']) 74 75 if cookie.get('path', None): 76 cookie_str += '; Path=%s' % cookie['path'] 77 if cookie.get('domain', None): 78 cookie_str += '; Domain=%s' % cookie['domain'] 79 80 return cookie_str 81 82 def _get_request_cookies(self, jar, request): 83 if isinstance(request.cookies, dict): 84 cookie_list = [ 85 {'name': k, 'value': v} 86 for k, v in request.cookies.items() 87 ] 88 else: 89 cookie_list = request.cookies 90 91 cookies = [self._format_cookie(x) for x in cookie_list] 92 headers = {'Set-Cookie': cookies} 93 response = Response(request.url, headers=headers) 94 95 return jar.make_cookies(response, request) 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/downloadermiddlewares/cookies.py b/scrapy/downloadermiddlewares/cookies.py --- a/scrapy/downloadermiddlewares/cookies.py +++ b/scrapy/downloadermiddlewares/cookies.py @@ -29,8 +29,7 @@ cookiejarkey = request.meta.get("cookiejar") jar = self.jars[cookiejarkey] - cookies = self._get_request_cookies(jar, request) - for cookie in cookies: + for cookie in self._get_request_cookies(jar, request): jar.set_cookie_if_ok(cookie, request) # set Cookie header @@ -68,28 +67,65 @@ msg = "Received cookies from: {}\n{}".format(response, cookies) logger.debug(msg, extra={'spider': spider}) - def _format_cookie(self, cookie): - # build cookie string - cookie_str = '%s=%s' % (cookie['name'], cookie['value']) - - if cookie.get('path', None): - cookie_str += '; Path=%s' % cookie['path'] - if cookie.get('domain', None): - cookie_str += '; Domain=%s' % cookie['domain'] - + def _format_cookie(self, cookie, request): + """ + Given a dict consisting of cookie components, return its string representation. + Decode from bytes if necessary. + """ + decoded = {} + for key in ("name", "value", "path", "domain"): + if not cookie.get(key): + if key in ("name", "value"): + msg = "Invalid cookie found in request {}: {} ('{}' is missing)" + logger.warning(msg.format(request, cookie, key)) + return + continue + if isinstance(cookie[key], str): + decoded[key] = cookie[key] + else: + try: + decoded[key] = cookie[key].decode("utf8") + except UnicodeDecodeError: + logger.warning("Non UTF-8 encoded cookie found in request %s: %s", + request, cookie) + decoded[key] = cookie[key].decode("latin1", errors="replace") + + cookie_str = "{}={}".format(decoded.pop("name"), decoded.pop("value")) + for key, value in decoded.items(): # path, domain + cookie_str += "; {}={}".format(key.capitalize(), value) return cookie_str def _get_request_cookies(self, jar, request): - if isinstance(request.cookies, dict): - cookie_list = [ - {'name': k, 'value': v} - for k, v in request.cookies.items() - ] - else: - cookie_list = request.cookies - - cookies = [self._format_cookie(x) for x in cookie_list] - headers = {'Set-Cookie': cookies} - response = Response(request.url, headers=headers) - - return jar.make_cookies(response, request) + """ + Extract cookies from a Request. Values from the `Request.cookies` attribute + take precedence over values from the `Cookie` request header. + """ + def get_cookies_from_header(jar, request): + cookie_header = request.headers.get("Cookie") + if not cookie_header: + return [] + cookie_gen_bytes = (s.strip() for s in cookie_header.split(b";")) + cookie_list_unicode = [] + for cookie_bytes in cookie_gen_bytes: + try: + cookie_unicode = cookie_bytes.decode("utf8") + except UnicodeDecodeError: + logger.warning("Non UTF-8 encoded cookie found in request %s: %s", + request, cookie_bytes) + cookie_unicode = cookie_bytes.decode("latin1", errors="replace") + cookie_list_unicode.append(cookie_unicode) + response = Response(request.url, headers={"Set-Cookie": cookie_list_unicode}) + return jar.make_cookies(response, request) + + def get_cookies_from_attribute(jar, request): + if not request.cookies: + return [] + elif isinstance(request.cookies, dict): + cookies = ({"name": k, "value": v} for k, v in request.cookies.items()) + else: + cookies = request.cookies + formatted = filter(None, (self._format_cookie(c, request) for c in cookies)) + response = Response(request.url, headers={"Set-Cookie": formatted}) + return jar.make_cookies(response, request) + + return get_cookies_from_header(jar, request) + get_cookies_from_attribute(jar, request)
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/cookies.py b/scrapy/downloadermiddlewares/cookies.py\n--- a/scrapy/downloadermiddlewares/cookies.py\n+++ b/scrapy/downloadermiddlewares/cookies.py\n@@ -29,8 +29,7 @@\n \n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n- cookies = self._get_request_cookies(jar, request)\n- for cookie in cookies:\n+ for cookie in self._get_request_cookies(jar, request):\n jar.set_cookie_if_ok(cookie, request)\n \n # set Cookie header\n@@ -68,28 +67,65 @@\n msg = \"Received cookies from: {}\\n{}\".format(response, cookies)\n logger.debug(msg, extra={'spider': spider})\n \n- def _format_cookie(self, cookie):\n- # build cookie string\n- cookie_str = '%s=%s' % (cookie['name'], cookie['value'])\n-\n- if cookie.get('path', None):\n- cookie_str += '; Path=%s' % cookie['path']\n- if cookie.get('domain', None):\n- cookie_str += '; Domain=%s' % cookie['domain']\n-\n+ def _format_cookie(self, cookie, request):\n+ \"\"\"\n+ Given a dict consisting of cookie components, return its string representation.\n+ Decode from bytes if necessary.\n+ \"\"\"\n+ decoded = {}\n+ for key in (\"name\", \"value\", \"path\", \"domain\"):\n+ if not cookie.get(key):\n+ if key in (\"name\", \"value\"):\n+ msg = \"Invalid cookie found in request {}: {} ('{}' is missing)\"\n+ logger.warning(msg.format(request, cookie, key))\n+ return\n+ continue\n+ if isinstance(cookie[key], str):\n+ decoded[key] = cookie[key]\n+ else:\n+ try:\n+ decoded[key] = cookie[key].decode(\"utf8\")\n+ except UnicodeDecodeError:\n+ logger.warning(\"Non UTF-8 encoded cookie found in request %s: %s\",\n+ request, cookie)\n+ decoded[key] = cookie[key].decode(\"latin1\", errors=\"replace\")\n+\n+ cookie_str = \"{}={}\".format(decoded.pop(\"name\"), decoded.pop(\"value\"))\n+ for key, value in decoded.items(): # path, domain\n+ cookie_str += \"; {}={}\".format(key.capitalize(), value)\n return cookie_str\n \n def _get_request_cookies(self, jar, request):\n- if isinstance(request.cookies, dict):\n- cookie_list = [\n- {'name': k, 'value': v}\n- for k, v in request.cookies.items()\n- ]\n- else:\n- cookie_list = request.cookies\n-\n- cookies = [self._format_cookie(x) for x in cookie_list]\n- headers = {'Set-Cookie': cookies}\n- response = Response(request.url, headers=headers)\n-\n- return jar.make_cookies(response, request)\n+ \"\"\"\n+ Extract cookies from a Request. Values from the `Request.cookies` attribute\n+ take precedence over values from the `Cookie` request header.\n+ \"\"\"\n+ def get_cookies_from_header(jar, request):\n+ cookie_header = request.headers.get(\"Cookie\")\n+ if not cookie_header:\n+ return []\n+ cookie_gen_bytes = (s.strip() for s in cookie_header.split(b\";\"))\n+ cookie_list_unicode = []\n+ for cookie_bytes in cookie_gen_bytes:\n+ try:\n+ cookie_unicode = cookie_bytes.decode(\"utf8\")\n+ except UnicodeDecodeError:\n+ logger.warning(\"Non UTF-8 encoded cookie found in request %s: %s\",\n+ request, cookie_bytes)\n+ cookie_unicode = cookie_bytes.decode(\"latin1\", errors=\"replace\")\n+ cookie_list_unicode.append(cookie_unicode)\n+ response = Response(request.url, headers={\"Set-Cookie\": cookie_list_unicode})\n+ return jar.make_cookies(response, request)\n+\n+ def get_cookies_from_attribute(jar, request):\n+ if not request.cookies:\n+ return []\n+ elif isinstance(request.cookies, dict):\n+ cookies = ({\"name\": k, \"value\": v} for k, v in request.cookies.items())\n+ else:\n+ cookies = request.cookies\n+ formatted = filter(None, (self._format_cookie(c, request) for c in cookies))\n+ response = Response(request.url, headers={\"Set-Cookie\": formatted})\n+ return jar.make_cookies(response, request)\n+\n+ return get_cookies_from_header(jar, request) + get_cookies_from_attribute(jar, request)\n", "issue": "Cookies from the Cookie request header are not processed\nI am new in scrapy, and I meet some problems which I can not get answer from google, so I post it here:\n\n1 Cookie not work even set in DEFAULT_REQUEST_HEADERS:\n\n```\nDEFAULT_REQUEST_HEADERS = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'accept-encoding': 'gzip, deflate, sdch',\n 'cache-control': 'no-cache',\n 'cookie': 'xx=yy',\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.94 Safari/537.36'\n}\n```\n\n```\nclass MySpider(scrapy.Spider):\n def make_requests_from_url(self, url):\n return scrapy.http.Request(url, headers=DEFAULT_REQUEST_HEADERS)\n```\n\nI know the `make_requests_from_url` will only called once for the start_urls, and in my opinion, the first request will send the cookie I set in the `DEFAULT_REQUEST_HEADERS`, however it does not.\n\n2 Share settings between spiders.\n\nI have multiple spiders in the project which share most of the settings like `RandomAgentMiddleware` `RandomProxyMiddleware` `UserAgent` `DEFAULT_REQUEST_HEADERS` and etc, however they are configured inside the settings.py for each spider.\n\nIs it possible to share these settings?\n\n---\n\nThe \n`COOKIES_ENABLED` is set to true.\n\nDouble-encoded cookies\nWhen cookies are passed as UTF8 encoded bytes to the `Request` constructor, they end up being encoded twice and escaped in the `Cookie` header.\r\n\r\n```\r\n$ scrapy shell\r\n(...)\r\nIn [1]: fetch(scrapy.Request('https://httpbin.org/cookies', cookies={'a': u'\u00e1'.encode('utf8')}))\r\n\r\nIn [2]: request.headers['Cookie']\r\nOut[2]: b\"a=b'\\\\xc3\\\\xa1'\"\r\n\r\nIn [3]: print(response.text)\r\n{\r\n \"cookies\": {\r\n \"a\": \"b'\\\\xc3\\\\xa1'\"\r\n }\r\n}\r\n```\r\n\r\nThis seems to happen only in Python 3.\r\n```\r\n$ scrapy version -v\r\nScrapy : 1.5.0\r\nlxml : 4.2.6.0\r\nlibxml2 : 2.9.8\r\ncssselect : 1.0.3\r\nparsel : 1.5.1\r\nw3lib : 1.19.0\r\nTwisted : 18.9.0\r\nPython : 3.6.0 (default, Sep 1 2017, 10:59:37) - [GCC 4.8.4]\r\npyOpenSSL : 18.0.0 (OpenSSL 1.1.0j 20 Nov 2018)\r\ncryptography : 2.4.2\r\nPlatform : Linux-4.4.0-134-generic-x86_64-with-debian-jessie-sid\r\n```\n", "before_files": [{"content": "import logging\nfrom collections import defaultdict\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.http import Response\nfrom scrapy.http.cookies import CookieJar\nfrom scrapy.utils.python import to_unicode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CookiesMiddleware:\n \"\"\"This middleware enables working with sites that need cookies\"\"\"\n\n def __init__(self, debug=False):\n self.jars = defaultdict(CookieJar)\n self.debug = debug\n\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COOKIES_ENABLED'):\n raise NotConfigured\n return cls(crawler.settings.getbool('COOKIES_DEBUG'))\n\n def process_request(self, request, spider):\n if request.meta.get('dont_merge_cookies', False):\n return\n\n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n cookies = self._get_request_cookies(jar, request)\n for cookie in cookies:\n jar.set_cookie_if_ok(cookie, request)\n\n # set Cookie header\n request.headers.pop('Cookie', None)\n jar.add_cookie_header(request)\n self._debug_cookie(request, spider)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_merge_cookies', False):\n return response\n\n # extract cookies from Set-Cookie and drop invalid/expired cookies\n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n jar.extract_cookies(response, request)\n self._debug_set_cookie(response, spider)\n\n return response\n\n def _debug_cookie(self, request, spider):\n if self.debug:\n cl = [to_unicode(c, errors='replace')\n for c in request.headers.getlist('Cookie')]\n if cl:\n cookies = \"\\n\".join(\"Cookie: {}\\n\".format(c) for c in cl)\n msg = \"Sending cookies to: {}\\n{}\".format(request, cookies)\n logger.debug(msg, extra={'spider': spider})\n\n def _debug_set_cookie(self, response, spider):\n if self.debug:\n cl = [to_unicode(c, errors='replace')\n for c in response.headers.getlist('Set-Cookie')]\n if cl:\n cookies = \"\\n\".join(\"Set-Cookie: {}\\n\".format(c) for c in cl)\n msg = \"Received cookies from: {}\\n{}\".format(response, cookies)\n logger.debug(msg, extra={'spider': spider})\n\n def _format_cookie(self, cookie):\n # build cookie string\n cookie_str = '%s=%s' % (cookie['name'], cookie['value'])\n\n if cookie.get('path', None):\n cookie_str += '; Path=%s' % cookie['path']\n if cookie.get('domain', None):\n cookie_str += '; Domain=%s' % cookie['domain']\n\n return cookie_str\n\n def _get_request_cookies(self, jar, request):\n if isinstance(request.cookies, dict):\n cookie_list = [\n {'name': k, 'value': v}\n for k, v in request.cookies.items()\n ]\n else:\n cookie_list = request.cookies\n\n cookies = [self._format_cookie(x) for x in cookie_list]\n headers = {'Set-Cookie': cookies}\n response = Response(request.url, headers=headers)\n\n return jar.make_cookies(response, request)\n", "path": "scrapy/downloadermiddlewares/cookies.py"}], "after_files": [{"content": "import logging\nfrom collections import defaultdict\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.http import Response\nfrom scrapy.http.cookies import CookieJar\nfrom scrapy.utils.python import to_unicode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CookiesMiddleware:\n \"\"\"This middleware enables working with sites that need cookies\"\"\"\n\n def __init__(self, debug=False):\n self.jars = defaultdict(CookieJar)\n self.debug = debug\n\n @classmethod\n def from_crawler(cls, crawler):\n if not crawler.settings.getbool('COOKIES_ENABLED'):\n raise NotConfigured\n return cls(crawler.settings.getbool('COOKIES_DEBUG'))\n\n def process_request(self, request, spider):\n if request.meta.get('dont_merge_cookies', False):\n return\n\n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n for cookie in self._get_request_cookies(jar, request):\n jar.set_cookie_if_ok(cookie, request)\n\n # set Cookie header\n request.headers.pop('Cookie', None)\n jar.add_cookie_header(request)\n self._debug_cookie(request, spider)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_merge_cookies', False):\n return response\n\n # extract cookies from Set-Cookie and drop invalid/expired cookies\n cookiejarkey = request.meta.get(\"cookiejar\")\n jar = self.jars[cookiejarkey]\n jar.extract_cookies(response, request)\n self._debug_set_cookie(response, spider)\n\n return response\n\n def _debug_cookie(self, request, spider):\n if self.debug:\n cl = [to_unicode(c, errors='replace')\n for c in request.headers.getlist('Cookie')]\n if cl:\n cookies = \"\\n\".join(\"Cookie: {}\\n\".format(c) for c in cl)\n msg = \"Sending cookies to: {}\\n{}\".format(request, cookies)\n logger.debug(msg, extra={'spider': spider})\n\n def _debug_set_cookie(self, response, spider):\n if self.debug:\n cl = [to_unicode(c, errors='replace')\n for c in response.headers.getlist('Set-Cookie')]\n if cl:\n cookies = \"\\n\".join(\"Set-Cookie: {}\\n\".format(c) for c in cl)\n msg = \"Received cookies from: {}\\n{}\".format(response, cookies)\n logger.debug(msg, extra={'spider': spider})\n\n def _format_cookie(self, cookie, request):\n \"\"\"\n Given a dict consisting of cookie components, return its string representation.\n Decode from bytes if necessary.\n \"\"\"\n decoded = {}\n for key in (\"name\", \"value\", \"path\", \"domain\"):\n if not cookie.get(key):\n if key in (\"name\", \"value\"):\n msg = \"Invalid cookie found in request {}: {} ('{}' is missing)\"\n logger.warning(msg.format(request, cookie, key))\n return\n continue\n if isinstance(cookie[key], str):\n decoded[key] = cookie[key]\n else:\n try:\n decoded[key] = cookie[key].decode(\"utf8\")\n except UnicodeDecodeError:\n logger.warning(\"Non UTF-8 encoded cookie found in request %s: %s\",\n request, cookie)\n decoded[key] = cookie[key].decode(\"latin1\", errors=\"replace\")\n\n cookie_str = \"{}={}\".format(decoded.pop(\"name\"), decoded.pop(\"value\"))\n for key, value in decoded.items(): # path, domain\n cookie_str += \"; {}={}\".format(key.capitalize(), value)\n return cookie_str\n\n def _get_request_cookies(self, jar, request):\n \"\"\"\n Extract cookies from a Request. Values from the `Request.cookies` attribute\n take precedence over values from the `Cookie` request header.\n \"\"\"\n def get_cookies_from_header(jar, request):\n cookie_header = request.headers.get(\"Cookie\")\n if not cookie_header:\n return []\n cookie_gen_bytes = (s.strip() for s in cookie_header.split(b\";\"))\n cookie_list_unicode = []\n for cookie_bytes in cookie_gen_bytes:\n try:\n cookie_unicode = cookie_bytes.decode(\"utf8\")\n except UnicodeDecodeError:\n logger.warning(\"Non UTF-8 encoded cookie found in request %s: %s\",\n request, cookie_bytes)\n cookie_unicode = cookie_bytes.decode(\"latin1\", errors=\"replace\")\n cookie_list_unicode.append(cookie_unicode)\n response = Response(request.url, headers={\"Set-Cookie\": cookie_list_unicode})\n return jar.make_cookies(response, request)\n\n def get_cookies_from_attribute(jar, request):\n if not request.cookies:\n return []\n elif isinstance(request.cookies, dict):\n cookies = ({\"name\": k, \"value\": v} for k, v in request.cookies.items())\n else:\n cookies = request.cookies\n formatted = filter(None, (self._format_cookie(c, request) for c in cookies))\n response = Response(request.url, headers={\"Set-Cookie\": formatted})\n return jar.make_cookies(response, request)\n\n return get_cookies_from_header(jar, request) + get_cookies_from_attribute(jar, request)\n", "path": "scrapy/downloadermiddlewares/cookies.py"}]}
1,854
996
gh_patches_debug_19309
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-2048
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support absolute-form HTTP requests with IPv6 addresses ##### Steps to reproduce the problem: 1. MITMDump proxy IPv6 flow 2. Log ``` 172.17.15.1:53074: HTTP protocol error in client request: Bad HTTP request line: b'GET http://[::ffff:180.97.8.37]/mmsns/9KavCVwReibwDKBMmibrWUdVZZbHCQ0bV3R89mboKO6QDls7Sxcl4tfbHvLIHFbj3NASftTH2VAGw/150?tp=wxpc&length=2208&width=1242&idx=1&token=WSEN6qDsKwV8A02w3onOGQYfxnkibdqSOkmHhZGNB4DGicdGyTltMQXCTF7lr4IJR8Jz4lKQBBW47EV1CP33SGjg HTTP/1.1' 172.17.15.1:53075: HTTP protocol error in client request: Bad HTTP request line: b'GET http://[::ffff:b461:819]/mmcrhead/Q3auHgzwzM606QEH0kXoF60vMh5Iiay7B3DiauET3kCpbBwEfgzhNqOSeJ6y4geORGPxEcKf36Totd4sHQcwvBEg/0 HTTP/1.1' ``` ##### Any other comments? What have you tried so far? No ##### System information ``` Mitmproxy version: 1.0.2 Python version: 3.6.0 Platform: Darwin-15.6.0-x86_64-i386-64bit SSL version: OpenSSL 1.0.2j 26 Sep 2016 Mac version: 10.11.6 ('', '', '') x86_6 ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/net/check.py` Content: ``` 1 import re 2 3 # Allow underscore in host name 4 _label_valid = re.compile(b"(?!-)[A-Z\d\-_]{1,63}(?<!-)$", re.IGNORECASE) 5 6 7 def is_valid_host(host: bytes) -> bool: 8 """ 9 Checks if a hostname is valid. 10 """ 11 try: 12 host.decode("idna") 13 except ValueError: 14 return False 15 if len(host) > 255: 16 return False 17 if host and host[-1:] == b".": 18 host = host[:-1] 19 return all(_label_valid.match(x) for x in host.split(b".")) 20 21 22 def is_valid_port(port): 23 return 0 <= port <= 65535 24 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/net/check.py b/mitmproxy/net/check.py --- a/mitmproxy/net/check.py +++ b/mitmproxy/net/check.py @@ -1,3 +1,4 @@ +import ipaddress import re # Allow underscore in host name @@ -6,17 +7,26 @@ def is_valid_host(host: bytes) -> bool: """ - Checks if a hostname is valid. + Checks if the passed bytes are a valid DNS hostname or an IPv4/IPv6 address. """ try: host.decode("idna") except ValueError: return False + # RFC1035: 255 bytes or less. if len(host) > 255: return False if host and host[-1:] == b".": host = host[:-1] - return all(_label_valid.match(x) for x in host.split(b".")) + # DNS hostname + if all(_label_valid.match(x) for x in host.split(b".")): + return True + # IPv4/IPv6 address + try: + ipaddress.ip_address(host.decode('idna')) + return True + except ValueError: + return False def is_valid_port(port):
{"golden_diff": "diff --git a/mitmproxy/net/check.py b/mitmproxy/net/check.py\n--- a/mitmproxy/net/check.py\n+++ b/mitmproxy/net/check.py\n@@ -1,3 +1,4 @@\n+import ipaddress\n import re\n \n # Allow underscore in host name\n@@ -6,17 +7,26 @@\n \n def is_valid_host(host: bytes) -> bool:\n \"\"\"\n- Checks if a hostname is valid.\n+ Checks if the passed bytes are a valid DNS hostname or an IPv4/IPv6 address.\n \"\"\"\n try:\n host.decode(\"idna\")\n except ValueError:\n return False\n+ # RFC1035: 255 bytes or less.\n if len(host) > 255:\n return False\n if host and host[-1:] == b\".\":\n host = host[:-1]\n- return all(_label_valid.match(x) for x in host.split(b\".\"))\n+ # DNS hostname\n+ if all(_label_valid.match(x) for x in host.split(b\".\")):\n+ return True\n+ # IPv4/IPv6 address\n+ try:\n+ ipaddress.ip_address(host.decode('idna'))\n+ return True\n+ except ValueError:\n+ return False\n \n \n def is_valid_port(port):\n", "issue": "Support absolute-form HTTP requests with IPv6 addresses\n##### Steps to reproduce the problem:\r\n\r\n1. MITMDump proxy IPv6 flow\r\n2. Log\r\n```\r\n172.17.15.1:53074: HTTP protocol error in client request: Bad HTTP request line: b'GET http://[::ffff:180.97.8.37]/mmsns/9KavCVwReibwDKBMmibrWUdVZZbHCQ0bV3R89mboKO6QDls7Sxcl4tfbHvLIHFbj3NASftTH2VAGw/150?tp=wxpc&length=2208&width=1242&idx=1&token=WSEN6qDsKwV8A02w3onOGQYfxnkibdqSOkmHhZGNB4DGicdGyTltMQXCTF7lr4IJR8Jz4lKQBBW47EV1CP33SGjg HTTP/1.1'\r\n172.17.15.1:53075: HTTP protocol error in client request: Bad HTTP request line: b'GET http://[::ffff:b461:819]/mmcrhead/Q3auHgzwzM606QEH0kXoF60vMh5Iiay7B3DiauET3kCpbBwEfgzhNqOSeJ6y4geORGPxEcKf36Totd4sHQcwvBEg/0 HTTP/1.1'\r\n```\r\n\r\n\r\n##### Any other comments? What have you tried so far?\r\nNo\r\n\r\n\r\n##### System information\r\n```\r\nMitmproxy version: 1.0.2\r\nPython version: 3.6.0\r\nPlatform: Darwin-15.6.0-x86_64-i386-64bit\r\nSSL version: OpenSSL 1.0.2j 26 Sep 2016\r\nMac version: 10.11.6 ('', '', '') x86_6\r\n```\r\n\r\n\n", "before_files": [{"content": "import re\n\n# Allow underscore in host name\n_label_valid = re.compile(b\"(?!-)[A-Z\\d\\-_]{1,63}(?<!-)$\", re.IGNORECASE)\n\n\ndef is_valid_host(host: bytes) -> bool:\n \"\"\"\n Checks if a hostname is valid.\n \"\"\"\n try:\n host.decode(\"idna\")\n except ValueError:\n return False\n if len(host) > 255:\n return False\n if host and host[-1:] == b\".\":\n host = host[:-1]\n return all(_label_valid.match(x) for x in host.split(b\".\"))\n\n\ndef is_valid_port(port):\n return 0 <= port <= 65535\n", "path": "mitmproxy/net/check.py"}], "after_files": [{"content": "import ipaddress\nimport re\n\n# Allow underscore in host name\n_label_valid = re.compile(b\"(?!-)[A-Z\\d\\-_]{1,63}(?<!-)$\", re.IGNORECASE)\n\n\ndef is_valid_host(host: bytes) -> bool:\n \"\"\"\n Checks if the passed bytes are a valid DNS hostname or an IPv4/IPv6 address.\n \"\"\"\n try:\n host.decode(\"idna\")\n except ValueError:\n return False\n # RFC1035: 255 bytes or less.\n if len(host) > 255:\n return False\n if host and host[-1:] == b\".\":\n host = host[:-1]\n # DNS hostname\n if all(_label_valid.match(x) for x in host.split(b\".\")):\n return True\n # IPv4/IPv6 address\n try:\n ipaddress.ip_address(host.decode('idna'))\n return True\n except ValueError:\n return False\n\n\ndef is_valid_port(port):\n return 0 <= port <= 65535\n", "path": "mitmproxy/net/check.py"}]}
940
283
gh_patches_debug_16329
rasdani/github-patches
git_diff
gratipay__gratipay.com-3934
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Can't submit new team after changing image. Can't believe this didn't come up yet. I noticed this while exploring [create.json.spt](https://github.com/gratipay/gratipay.com/blob/master/www/teams/create.json.spt) which inspires the new [edit.json.spt](https://github.com/gratipay/gratipay.com/pull/3923/files#diff-6). The way it is written right now, we first write the team details to the db (with a unique generated `slug`) and _then_ try to save the team image. If a user uploads an image of size > 1Mb or an image which is not a jpg or png, the team creation won't be successful as far as the user is concerned and he'll resubmit the team application form with an appropriate image. But when he does again, we would have already created a slug for that team name resulting in a misleading message of `Sorry, there is already a team using <slug>.` when in fact the `slug` was created because we wrote the team details to the db first. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `gratipay/utils/images.py` Content: ``` 1 import zipfile 2 from cStringIO import StringIO 3 4 import requests 5 6 def imgize(image, image_type): 7 large = None 8 small = None 9 crops = requests.post( 'http://gip.rocks/v1', 10 data=image, 11 headers={'Content-Type': image_type} 12 ) 13 if crops.status_code == 200: 14 zf = zipfile.ZipFile(StringIO(crops.content)) 15 large = zf.open('160').read() 16 small = zf.open('48').read() 17 18 return crops.status_code, large, small ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/gratipay/utils/images.py b/gratipay/utils/images.py --- a/gratipay/utils/images.py +++ b/gratipay/utils/images.py @@ -8,11 +8,22 @@ small = None crops = requests.post( 'http://gip.rocks/v1', data=image, - headers={'Content-Type': image_type} - ) + headers={'Content-Type': image_type}) + if crops.status_code == 200: zf = zipfile.ZipFile(StringIO(crops.content)) large = zf.open('160').read() small = zf.open('48').read() + return large, small + elif crops.status_code == 413: + raise ImageTooLarge + elif crops.status_code == 415: + raise InvalidImageType + else: + raise UnknownImageError + +class ImageTooLarge(Exception): pass + +class InvalidImageType(Exception): pass - return crops.status_code, large, small \ No newline at end of file +class UnknownImageError(Exception): pass
{"golden_diff": "diff --git a/gratipay/utils/images.py b/gratipay/utils/images.py\n--- a/gratipay/utils/images.py\n+++ b/gratipay/utils/images.py\n@@ -8,11 +8,22 @@\n small = None\n crops = requests.post( 'http://gip.rocks/v1',\n data=image,\n- headers={'Content-Type': image_type}\n- )\n+ headers={'Content-Type': image_type})\n+\n if crops.status_code == 200:\n zf = zipfile.ZipFile(StringIO(crops.content))\n large = zf.open('160').read()\n small = zf.open('48').read()\n+ return large, small\n+ elif crops.status_code == 413:\n+ raise ImageTooLarge\n+ elif crops.status_code == 415:\n+ raise InvalidImageType\n+ else:\n+ raise UnknownImageError\n+\n+class ImageTooLarge(Exception): pass\n+\n+class InvalidImageType(Exception): pass\n \n- return crops.status_code, large, small\n\\ No newline at end of file\n+class UnknownImageError(Exception): pass\n", "issue": "Can't submit new team after changing image.\nCan't believe this didn't come up yet. I noticed this while exploring [create.json.spt](https://github.com/gratipay/gratipay.com/blob/master/www/teams/create.json.spt) which inspires the new [edit.json.spt](https://github.com/gratipay/gratipay.com/pull/3923/files#diff-6). \n\nThe way it is written right now, we first write the team details to the db (with a unique generated `slug`) and _then_ try to save the team image. If a user uploads an image of size > 1Mb or an image which is not a jpg or png, the team creation won't be successful as far as the user is concerned and he'll resubmit the team application form with an appropriate image. But when he does again, we would have already created a slug for that team name resulting in a misleading message of `Sorry, there is already a team using <slug>.` when in fact the `slug` was created because we wrote the team details to the db first.\n\n", "before_files": [{"content": "import zipfile\nfrom cStringIO import StringIO\n\nimport requests\n\ndef imgize(image, image_type):\n large = None\n small = None\n crops = requests.post( 'http://gip.rocks/v1',\n data=image,\n headers={'Content-Type': image_type}\n )\n if crops.status_code == 200:\n zf = zipfile.ZipFile(StringIO(crops.content))\n large = zf.open('160').read()\n small = zf.open('48').read()\n\n return crops.status_code, large, small", "path": "gratipay/utils/images.py"}], "after_files": [{"content": "import zipfile\nfrom cStringIO import StringIO\n\nimport requests\n\ndef imgize(image, image_type):\n large = None\n small = None\n crops = requests.post( 'http://gip.rocks/v1',\n data=image,\n headers={'Content-Type': image_type})\n\n if crops.status_code == 200:\n zf = zipfile.ZipFile(StringIO(crops.content))\n large = zf.open('160').read()\n small = zf.open('48').read()\n return large, small\n elif crops.status_code == 413:\n raise ImageTooLarge\n elif crops.status_code == 415:\n raise InvalidImageType\n else:\n raise UnknownImageError\n\nclass ImageTooLarge(Exception): pass\n\nclass InvalidImageType(Exception): pass\n\nclass UnknownImageError(Exception): pass\n", "path": "gratipay/utils/images.py"}]}
638
247
gh_patches_debug_8456
rasdani/github-patches
git_diff
microsoft__ptvsd-797
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add ability to launch the debugger in non-debug mode Currently we can only launch the debugger in non-debug mode when using `-m`. I'd like to have the same feature by importing PTVSD and invoking a function, similar to debugging using the `debug` function in `debugger.py` Basically this is necessary to launch the debugger in non-debug mode when using a launcher script. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ptvsd/debugger.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. See LICENSE in the project root 3 # for license information. 4 5 import sys 6 7 from ptvsd._local import run_module, run_file 8 9 10 # TODO: not needed? 11 DONT_DEBUG = [] 12 13 LOCALHOST = 'localhost' 14 15 RUNNERS = { 16 'module': run_module, # python -m spam 17 'script': run_file, # python spam.py 18 'code': run_file, # python -c 'print("spam")' 19 None: run_file, # catchall 20 } 21 22 23 def debug(filename, port_num, debug_id, debug_options, run_as, 24 _runners=RUNNERS, _extra=None, *args, **kwargs): 25 # TODO: docstring 26 if _extra is None: 27 _extra = sys.argv[1:] 28 address = (LOCALHOST, port_num) 29 try: 30 run = _runners[run_as] 31 except KeyError: 32 # TODO: fail? 33 run = _runners[None] 34 if _extra: 35 args = _extra + list(args) 36 kwargs.setdefault('singlesession', True) 37 run(address, filename, *args, **kwargs) 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py --- a/ptvsd/debugger.py +++ b/ptvsd/debugger.py @@ -4,7 +4,7 @@ import sys -from ptvsd._local import run_module, run_file +from ptvsd._local import run_module, run_file, run_main # TODO: not needed? @@ -35,3 +35,9 @@ args = _extra + list(args) kwargs.setdefault('singlesession', True) run(address, filename, *args, **kwargs) + + +def run(filename, port_num, run_as, + *args, **kwargs): + address = (LOCALHOST, port_num) + run_main(address, filename, run_as, *args, **kwargs)
{"golden_diff": "diff --git a/ptvsd/debugger.py b/ptvsd/debugger.py\n--- a/ptvsd/debugger.py\n+++ b/ptvsd/debugger.py\n@@ -4,7 +4,7 @@\n \n import sys\n \n-from ptvsd._local import run_module, run_file\n+from ptvsd._local import run_module, run_file, run_main\n \n \n # TODO: not needed?\n@@ -35,3 +35,9 @@\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n+\n+\n+def run(filename, port_num, run_as,\n+ *args, **kwargs):\n+ address = (LOCALHOST, port_num)\n+ run_main(address, filename, run_as, *args, **kwargs)\n", "issue": "Add ability to launch the debugger in non-debug mode\nCurrently we can only launch the debugger in non-debug mode when using `-m`.\r\nI'd like to have the same feature by importing PTVSD and invoking a function, similar to debugging using the `debug` function in `debugger.py`\r\n\r\nBasically this is necessary to launch the debugger in non-debug mode when using a launcher script.\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\nfrom ptvsd._local import run_module, run_file\n\n\n# TODO: not needed?\nDONT_DEBUG = []\n\nLOCALHOST = 'localhost'\n\nRUNNERS = {\n 'module': run_module, # python -m spam\n 'script': run_file, # python spam.py\n 'code': run_file, # python -c 'print(\"spam\")'\n None: run_file, # catchall\n}\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n # TODO: docstring\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n try:\n run = _runners[run_as]\n except KeyError:\n # TODO: fail?\n run = _runners[None]\n if _extra:\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n", "path": "ptvsd/debugger.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport sys\n\nfrom ptvsd._local import run_module, run_file, run_main\n\n\n# TODO: not needed?\nDONT_DEBUG = []\n\nLOCALHOST = 'localhost'\n\nRUNNERS = {\n 'module': run_module, # python -m spam\n 'script': run_file, # python spam.py\n 'code': run_file, # python -c 'print(\"spam\")'\n None: run_file, # catchall\n}\n\n\ndef debug(filename, port_num, debug_id, debug_options, run_as,\n _runners=RUNNERS, _extra=None, *args, **kwargs):\n # TODO: docstring\n if _extra is None:\n _extra = sys.argv[1:]\n address = (LOCALHOST, port_num)\n try:\n run = _runners[run_as]\n except KeyError:\n # TODO: fail?\n run = _runners[None]\n if _extra:\n args = _extra + list(args)\n kwargs.setdefault('singlesession', True)\n run(address, filename, *args, **kwargs)\n\n\ndef run(filename, port_num, run_as,\n *args, **kwargs):\n address = (LOCALHOST, port_num)\n run_main(address, filename, run_as, *args, **kwargs)\n", "path": "ptvsd/debugger.py"}]}
677
183
gh_patches_debug_1811
rasdani/github-patches
git_diff
iterative__dvc-2364
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- status: change nothing to reproduce message If I use DVC only to version data/models and don't care about pipelines, this message: `Pipelines are up to date. Nothing to reproduce.` looks really strange. Let's change it to something more generic: `Data and pipelines are up to date.` or something similar --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/command/status.py` Content: ``` 1 from __future__ import unicode_literals 2 3 import logging 4 5 from dvc.command.data_sync import CmdDataBase 6 from dvc.utils.compat import str 7 8 9 logger = logging.getLogger(__name__) 10 11 12 class CmdDataStatus(CmdDataBase): 13 STATUS_LEN = 20 14 STATUS_INDENT = "\t" 15 UP_TO_DATE_MSG = "Pipelines are up to date. Nothing to reproduce." 16 17 def _normalize(self, s): 18 s += ":" 19 assert len(s) < self.STATUS_LEN 20 return s + (self.STATUS_LEN - len(s)) * " " 21 22 def _show(self, status, indent=0): 23 ind = indent * self.STATUS_INDENT 24 25 if isinstance(status, str): 26 logger.info("{}{}".format(ind, status)) 27 return 28 29 if isinstance(status, list): 30 for entry in status: 31 self._show(entry, indent) 32 return 33 34 assert isinstance(status, dict) 35 36 for key, value in status.items(): 37 if isinstance(value, str): 38 logger.info("{}{}{}".format(ind, self._normalize(value), key)) 39 elif value: 40 logger.info("{}{}:".format(ind, key)) 41 self._show(value, indent + 1) 42 43 def run(self): 44 indent = 1 if self.args.cloud else 0 45 try: 46 st = self.repo.status( 47 targets=self.args.targets, 48 jobs=self.args.jobs, 49 cloud=self.args.cloud, 50 remote=self.args.remote, 51 all_branches=self.args.all_branches, 52 all_tags=self.args.all_tags, 53 with_deps=self.args.with_deps, 54 ) 55 if st: 56 if self.args.quiet: 57 return 1 58 else: 59 self._show(st, indent) 60 else: 61 logger.info(self.UP_TO_DATE_MSG) 62 63 except Exception: 64 logger.exception("failed to obtain data status") 65 return 1 66 return 0 67 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/command/status.py b/dvc/command/status.py --- a/dvc/command/status.py +++ b/dvc/command/status.py @@ -12,7 +12,7 @@ class CmdDataStatus(CmdDataBase): STATUS_LEN = 20 STATUS_INDENT = "\t" - UP_TO_DATE_MSG = "Pipelines are up to date. Nothing to reproduce." + UP_TO_DATE_MSG = "Data and pipelines are up to date." def _normalize(self, s): s += ":"
{"golden_diff": "diff --git a/dvc/command/status.py b/dvc/command/status.py\n--- a/dvc/command/status.py\n+++ b/dvc/command/status.py\n@@ -12,7 +12,7 @@\n class CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n- UP_TO_DATE_MSG = \"Pipelines are up to date. Nothing to reproduce.\"\n+ UP_TO_DATE_MSG = \"Data and pipelines are up to date.\"\n \n def _normalize(self, s):\n s += \":\"\n", "issue": "status: change nothing to reproduce message\nIf I use DVC only to version data/models and don't care about pipelines, this message:\r\n\r\n`Pipelines are up to date. Nothing to reproduce.` \r\n\r\nlooks really strange.\r\n\r\nLet's change it to something more generic:\r\n\r\n`Data and pipelines are up to date.` \r\n\r\nor something similar\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.command.data_sync import CmdDataBase\nfrom dvc.utils.compat import str\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n UP_TO_DATE_MSG = \"Pipelines are up to date. Nothing to reproduce.\"\n\n def _normalize(self, s):\n s += \":\"\n assert len(s) < self.STATUS_LEN\n return s + (self.STATUS_LEN - len(s)) * \" \"\n\n def _show(self, status, indent=0):\n ind = indent * self.STATUS_INDENT\n\n if isinstance(status, str):\n logger.info(\"{}{}\".format(ind, status))\n return\n\n if isinstance(status, list):\n for entry in status:\n self._show(entry, indent)\n return\n\n assert isinstance(status, dict)\n\n for key, value in status.items():\n if isinstance(value, str):\n logger.info(\"{}{}{}\".format(ind, self._normalize(value), key))\n elif value:\n logger.info(\"{}{}:\".format(ind, key))\n self._show(value, indent + 1)\n\n def run(self):\n indent = 1 if self.args.cloud else 0\n try:\n st = self.repo.status(\n targets=self.args.targets,\n jobs=self.args.jobs,\n cloud=self.args.cloud,\n remote=self.args.remote,\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n with_deps=self.args.with_deps,\n )\n if st:\n if self.args.quiet:\n return 1\n else:\n self._show(st, indent)\n else:\n logger.info(self.UP_TO_DATE_MSG)\n\n except Exception:\n logger.exception(\"failed to obtain data status\")\n return 1\n return 0\n", "path": "dvc/command/status.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\n\nfrom dvc.command.data_sync import CmdDataBase\nfrom dvc.utils.compat import str\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass CmdDataStatus(CmdDataBase):\n STATUS_LEN = 20\n STATUS_INDENT = \"\\t\"\n UP_TO_DATE_MSG = \"Data and pipelines are up to date.\"\n\n def _normalize(self, s):\n s += \":\"\n assert len(s) < self.STATUS_LEN\n return s + (self.STATUS_LEN - len(s)) * \" \"\n\n def _show(self, status, indent=0):\n ind = indent * self.STATUS_INDENT\n\n if isinstance(status, str):\n logger.info(\"{}{}\".format(ind, status))\n return\n\n if isinstance(status, list):\n for entry in status:\n self._show(entry, indent)\n return\n\n assert isinstance(status, dict)\n\n for key, value in status.items():\n if isinstance(value, str):\n logger.info(\"{}{}{}\".format(ind, self._normalize(value), key))\n elif value:\n logger.info(\"{}{}:\".format(ind, key))\n self._show(value, indent + 1)\n\n def run(self):\n indent = 1 if self.args.cloud else 0\n try:\n st = self.repo.status(\n targets=self.args.targets,\n jobs=self.args.jobs,\n cloud=self.args.cloud,\n remote=self.args.remote,\n all_branches=self.args.all_branches,\n all_tags=self.args.all_tags,\n with_deps=self.args.with_deps,\n )\n if st:\n if self.args.quiet:\n return 1\n else:\n self._show(st, indent)\n else:\n logger.info(self.UP_TO_DATE_MSG)\n\n except Exception:\n logger.exception(\"failed to obtain data status\")\n return 1\n return 0\n", "path": "dvc/command/status.py"}]}
857
117
gh_patches_debug_16628
rasdani/github-patches
git_diff
jazzband__pip-tools-595
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- README broken on PyPI (must be reStructuredText) The [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown. Solution A: Convert to reST --------------------- 1. Rename the current `README.md` to `README.rst` 1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst)) 1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50)) Solution B: Process before Upload ------------------- 1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14)) 1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49)) ------------ Both solutions above will render a nicely formatted, HTML-styled package description on PyPI. Quality Assurance -------------- Optionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 """ 2 pip-tools keeps your pinned dependencies fresh. 3 """ 4 from setuptools import find_packages, setup 5 6 setup( 7 name='pip-tools', 8 use_scm_version=True, 9 url='https://github.com/jazzband/pip-tools/', 10 license='BSD', 11 author='Vincent Driessen', 12 author_email='[email protected]', 13 description=__doc__, 14 packages=find_packages(exclude=['tests']), 15 setup_requires=['setuptools_scm'], 16 install_requires=[ 17 'click>=6', 18 'first', 19 'six', 20 'setuptools' 21 ], 22 extras_require={ 23 ':python_version < "3.0"': ['contextlib2'] 24 }, 25 zip_safe=False, 26 entry_points={ 27 'console_scripts': [ 28 'pip-compile = piptools.scripts.compile:cli', 29 'pip-sync = piptools.scripts.sync:cli', 30 ], 31 }, 32 platforms='any', 33 classifiers=[ 34 'Development Status :: 5 - Production/Stable', 35 'Intended Audience :: Developers', 36 'Intended Audience :: System Administrators', 37 'License :: OSI Approved :: BSD License', 38 'Operating System :: OS Independent', 39 'Programming Language :: Python', 40 'Programming Language :: Python :: 2', 41 'Programming Language :: Python :: 2.7', 42 'Programming Language :: Python :: 3', 43 'Programming Language :: Python :: 3.4', 44 'Programming Language :: Python :: 3.5', 45 'Programming Language :: Python :: 3.6', 46 'Topic :: System :: Systems Administration', 47 ] 48 ) 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,8 +1,14 @@ """ pip-tools keeps your pinned dependencies fresh. """ +from os.path import abspath, dirname, join from setuptools import find_packages, setup +def read_file(filename): + """Read the contents of a file located relative to setup.py""" + with open(join(abspath(dirname(__file__)), filename)) as thefile: + return thefile.read() + setup( name='pip-tools', use_scm_version=True, @@ -11,6 +17,7 @@ author='Vincent Driessen', author_email='[email protected]', description=__doc__, + long_description=read_file('README.rst'), packages=find_packages(exclude=['tests']), setup_requires=['setuptools_scm'], install_requires=[
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,14 @@\n \"\"\"\n pip-tools keeps your pinned dependencies fresh.\n \"\"\"\n+from os.path import abspath, dirname, join\n from setuptools import find_packages, setup\n \n+def read_file(filename):\n+ \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n+ with open(join(abspath(dirname(__file__)), filename)) as thefile:\n+ return thefile.read()\n+\n setup(\n name='pip-tools',\n use_scm_version=True,\n@@ -11,6 +17,7 @@\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n+ long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n", "issue": "README broken on PyPI (must be reStructuredText)\nThe [package description](https://pypi.python.org/pypi/pip-tools/) on PyPI is unreadable since PyPI expects the README in [reStructuredText](http://www.sphinx-doc.org/en/stable/rest.html) file format and we use MarkDown.\r\n\r\nSolution A: Convert to reST\r\n---------------------\r\n\r\n1. Rename the current `README.md` to `README.rst`\r\n1. Replace the markdown of the badges and the code samples ([example](https://github.com/Organice/djangocms-maps/blob/master/README.rst))\r\n1. Add a `long_description=read_file('README.rst')` line to `setup.py` ([example](https://github.com/Organice/djangocms-maps/blob/master/setup.py#L50))\r\n\r\nSolution B: Process before Upload\r\n-------------------\r\n\r\n1. Integrate [pypandoc](https://pypi.python.org/pypi/pypandoc) in `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L7-L14))\r\n1. Add a `long_description=convert('README.md', 'rst')` line to `setup.py` ([example](https://github.com/jrief/djangocms-cascade/blob/master/setup.py#L49))\r\n\r\n------------\r\n\r\nBoth solutions above will render a nicely formatted, HTML-styled package description on PyPI.\r\n\r\nQuality Assurance\r\n--------------\r\n\r\nOptionally, you may check your README with [checkdocs](https://github.com/Organice/djangocms-maps/blob/master/tox.ini#L13-L19) before uploading the package to PyPI, because sometimes the reST-to-HTML conversion that PyPI uses fails -- and renders a still hard-to-read, broken, unformatted package description.\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom setuptools import find_packages, setup\n\nsetup(\n name='pip-tools',\n use_scm_version=True,\n url='https://github.com/jazzband/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n 'click>=6',\n 'first',\n 'six',\n 'setuptools'\n ],\n extras_require={\n ':python_version < \"3.0\"': ['contextlib2']\n },\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'pip-compile = piptools.scripts.compile:cli',\n 'pip-sync = piptools.scripts.sync:cli',\n ],\n },\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nfrom os.path import abspath, dirname, join\nfrom setuptools import find_packages, setup\n\ndef read_file(filename):\n \"\"\"Read the contents of a file located relative to setup.py\"\"\"\n with open(join(abspath(dirname(__file__)), filename)) as thefile:\n return thefile.read()\n\nsetup(\n name='pip-tools',\n use_scm_version=True,\n url='https://github.com/jazzband/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n long_description=read_file('README.rst'),\n packages=find_packages(exclude=['tests']),\n setup_requires=['setuptools_scm'],\n install_requires=[\n 'click>=6',\n 'first',\n 'six',\n 'setuptools'\n ],\n extras_require={\n ':python_version < \"3.0\"': ['contextlib2']\n },\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'pip-compile = piptools.scripts.compile:cli',\n 'pip-sync = piptools.scripts.sync:cli',\n ],\n },\n platforms='any',\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]}
1,066
192
gh_patches_debug_21303
rasdani/github-patches
git_diff
nltk__nltk-2819
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- WordNetLemmatizer in nltk.stem module What's the parameter of WordNetLemmatizer.lemmatize() in nltk.stem module? Turn to the document, what are the candidate value of the parameter **'pos'**? ![image](https://user-images.githubusercontent.com/62245023/134791412-1ff85ba5-5eb9-4859-a3f1-3b48bdd5a6fa.png) The default value is 'Noun'. But use the function pos_tag() to get the pos of the word, the value appears to come from several options. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nltk/stem/wordnet.py` Content: ``` 1 # Natural Language Toolkit: WordNet stemmer interface 2 # 3 # Copyright (C) 2001-2021 NLTK Project 4 # Author: Steven Bird <[email protected]> 5 # Edward Loper <[email protected]> 6 # URL: <http://nltk.org/> 7 # For license information, see LICENSE.TXT 8 9 from nltk.corpus import wordnet 10 from nltk.corpus.reader.wordnet import NOUN 11 12 13 class WordNetLemmatizer: 14 """ 15 WordNet Lemmatizer 16 17 Lemmatize using WordNet's built-in morphy function. 18 Returns the input word unchanged if it cannot be found in WordNet. 19 20 >>> from nltk.stem import WordNetLemmatizer 21 >>> wnl = WordNetLemmatizer() 22 >>> print(wnl.lemmatize('dogs')) 23 dog 24 >>> print(wnl.lemmatize('churches')) 25 church 26 >>> print(wnl.lemmatize('aardwolves')) 27 aardwolf 28 >>> print(wnl.lemmatize('abaci')) 29 abacus 30 >>> print(wnl.lemmatize('hardrock')) 31 hardrock 32 """ 33 34 def __init__(self): 35 pass 36 37 def lemmatize(self, word, pos=NOUN): 38 lemmas = wordnet._morphy(word, pos) 39 return min(lemmas, key=len) if lemmas else word 40 41 def __repr__(self): 42 return "<WordNetLemmatizer>" 43 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nltk/stem/wordnet.py b/nltk/stem/wordnet.py --- a/nltk/stem/wordnet.py +++ b/nltk/stem/wordnet.py @@ -6,8 +6,7 @@ # URL: <http://nltk.org/> # For license information, see LICENSE.TXT -from nltk.corpus import wordnet -from nltk.corpus.reader.wordnet import NOUN +from nltk.corpus import wordnet as wn class WordNetLemmatizer: @@ -31,11 +30,19 @@ hardrock """ - def __init__(self): - pass - - def lemmatize(self, word, pos=NOUN): - lemmas = wordnet._morphy(word, pos) + def lemmatize(self, word: str, pos: str = wn.NOUN) -> str: + """Lemmatize `word` using WordNet's built-in morphy function. + Returns the input word unchanged if it cannot be found in WordNet. + + :param word: The input word to lemmatize. + :type word: str + :param pos: The Part Of Speech tag. Valid options are `"n"` for nouns, + `"v"` for verbs, `"a"` for adjectives, `"r"` for adverbs and `"s"` + for satellite adjectives. + :param pos: str + :return: The lemma of `word`, for the given `pos`. + """ + lemmas = wn._morphy(word, pos) return min(lemmas, key=len) if lemmas else word def __repr__(self):
{"golden_diff": "diff --git a/nltk/stem/wordnet.py b/nltk/stem/wordnet.py\n--- a/nltk/stem/wordnet.py\n+++ b/nltk/stem/wordnet.py\n@@ -6,8 +6,7 @@\n # URL: <http://nltk.org/>\n # For license information, see LICENSE.TXT\n \n-from nltk.corpus import wordnet\n-from nltk.corpus.reader.wordnet import NOUN\n+from nltk.corpus import wordnet as wn\n \n \n class WordNetLemmatizer:\n@@ -31,11 +30,19 @@\n hardrock\n \"\"\"\n \n- def __init__(self):\n- pass\n-\n- def lemmatize(self, word, pos=NOUN):\n- lemmas = wordnet._morphy(word, pos)\n+ def lemmatize(self, word: str, pos: str = wn.NOUN) -> str:\n+ \"\"\"Lemmatize `word` using WordNet's built-in morphy function.\n+ Returns the input word unchanged if it cannot be found in WordNet.\n+\n+ :param word: The input word to lemmatize.\n+ :type word: str\n+ :param pos: The Part Of Speech tag. Valid options are `\"n\"` for nouns,\n+ `\"v\"` for verbs, `\"a\"` for adjectives, `\"r\"` for adverbs and `\"s\"`\n+ for satellite adjectives.\n+ :param pos: str\n+ :return: The lemma of `word`, for the given `pos`.\n+ \"\"\"\n+ lemmas = wn._morphy(word, pos)\n return min(lemmas, key=len) if lemmas else word\n \n def __repr__(self):\n", "issue": "WordNetLemmatizer in nltk.stem module\nWhat's the parameter of WordNetLemmatizer.lemmatize() in nltk.stem module?\r\nTurn to the document, what are the candidate value of the parameter **'pos'**?\r\n![image](https://user-images.githubusercontent.com/62245023/134791412-1ff85ba5-5eb9-4859-a3f1-3b48bdd5a6fa.png)\r\nThe default value is 'Noun'. But use the function pos_tag() to get the pos of the word, the value appears to come from several options.\n", "before_files": [{"content": "# Natural Language Toolkit: WordNet stemmer interface\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Author: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\nfrom nltk.corpus import wordnet\nfrom nltk.corpus.reader.wordnet import NOUN\n\n\nclass WordNetLemmatizer:\n \"\"\"\n WordNet Lemmatizer\n\n Lemmatize using WordNet's built-in morphy function.\n Returns the input word unchanged if it cannot be found in WordNet.\n\n >>> from nltk.stem import WordNetLemmatizer\n >>> wnl = WordNetLemmatizer()\n >>> print(wnl.lemmatize('dogs'))\n dog\n >>> print(wnl.lemmatize('churches'))\n church\n >>> print(wnl.lemmatize('aardwolves'))\n aardwolf\n >>> print(wnl.lemmatize('abaci'))\n abacus\n >>> print(wnl.lemmatize('hardrock'))\n hardrock\n \"\"\"\n\n def __init__(self):\n pass\n\n def lemmatize(self, word, pos=NOUN):\n lemmas = wordnet._morphy(word, pos)\n return min(lemmas, key=len) if lemmas else word\n\n def __repr__(self):\n return \"<WordNetLemmatizer>\"\n", "path": "nltk/stem/wordnet.py"}], "after_files": [{"content": "# Natural Language Toolkit: WordNet stemmer interface\n#\n# Copyright (C) 2001-2021 NLTK Project\n# Author: Steven Bird <[email protected]>\n# Edward Loper <[email protected]>\n# URL: <http://nltk.org/>\n# For license information, see LICENSE.TXT\n\nfrom nltk.corpus import wordnet as wn\n\n\nclass WordNetLemmatizer:\n \"\"\"\n WordNet Lemmatizer\n\n Lemmatize using WordNet's built-in morphy function.\n Returns the input word unchanged if it cannot be found in WordNet.\n\n >>> from nltk.stem import WordNetLemmatizer\n >>> wnl = WordNetLemmatizer()\n >>> print(wnl.lemmatize('dogs'))\n dog\n >>> print(wnl.lemmatize('churches'))\n church\n >>> print(wnl.lemmatize('aardwolves'))\n aardwolf\n >>> print(wnl.lemmatize('abaci'))\n abacus\n >>> print(wnl.lemmatize('hardrock'))\n hardrock\n \"\"\"\n\n def lemmatize(self, word: str, pos: str = wn.NOUN) -> str:\n \"\"\"Lemmatize `word` using WordNet's built-in morphy function.\n Returns the input word unchanged if it cannot be found in WordNet.\n\n :param word: The input word to lemmatize.\n :type word: str\n :param pos: The Part Of Speech tag. Valid options are `\"n\"` for nouns,\n `\"v\"` for verbs, `\"a\"` for adjectives, `\"r\"` for adverbs and `\"s\"`\n for satellite adjectives.\n :param pos: str\n :return: The lemma of `word`, for the given `pos`.\n \"\"\"\n lemmas = wn._morphy(word, pos)\n return min(lemmas, key=len) if lemmas else word\n\n def __repr__(self):\n return \"<WordNetLemmatizer>\"\n", "path": "nltk/stem/wordnet.py"}]}
817
376
gh_patches_debug_21029
rasdani/github-patches
git_diff
PlasmaPy__PlasmaPy-655
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Create classes to represent ionization state distributions My plan for this PR is to create classes to represent the ionization state distributions of one or more elements. I am going to add in a bunch of dunder methods like `__getitem__` and maybe `__call__` to help making access to the ionization states more straightfoward and intuitive. Any suggestions on the naming convention will be helpful so that we can maximize readability. Eventually we'll need a way to calculate ionization state distributions assuming collisional ionization equilibrium, but that will be for a different PR. The purpose of this PR is to set up how to store and access the ionization distributions. This will be discussed in #352. This will address some of #352. It will probably be best to wait until after the `0.1.0` release to merge this, since this PR is only for a partial implementation anyway. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `plasmapy/examples/plot_dispersion_function.py` Content: ``` 1 """ 2 The plasma dispersion function 3 ============================== 4 5 Let's import some basics (and `PlasmaPy`!) 6 """ 7 8 9 import numpy as np 10 import matplotlib.pyplot as plt 11 import plasmapy 12 13 14 ####################################################################### 15 help(plasmapy.mathematics.plasma_dispersion_func) 16 17 18 ####################################################################### 19 # We'll now make some sample data to visualize the dispersion function: 20 21 x = np.linspace(-1, 1, 1000) 22 X, Y = np.meshgrid(x, x) 23 Z = X + 1j * Y 24 print(Z.shape) 25 26 ####################################################################### 27 # Before we start plotting, let's make a visualization function first: 28 29 30 def plot_complex(X, Y, Z, N=50): 31 fig, (real_axis, imag_axis) = plt.subplots(1, 2) 32 real_axis.contourf(X, Y, Z.real, N) 33 imag_axis.contourf(X, Y, Z.imag, N) 34 real_axis.set_title("Real values") 35 imag_axis.set_title("Imaginary values") 36 for ax in [real_axis, imag_axis]: 37 ax.set_xlabel("Real values") 38 ax.set_ylabel("Imaginary values") 39 fig.tight_layout() 40 41 42 plot_complex(X, Y, Z) 43 44 ####################################################################### 45 # We can now apply our visualization function to our simple 46 47 F = plasmapy.mathematics.plasma_dispersion_func(Z) 48 plot_complex(X, Y, F) 49 50 51 ####################################################################### 52 # So this is going to be a hack and I'm not 100% sure the dispersion function 53 # is quite what I think it is, but let's find the area where the dispersion 54 # function has a lesser than zero real part because I think it may be important 55 # (brb reading Fried and Conte): 56 57 plot_complex(X, Y, F.real < 0) 58 59 60 ####################################################################### 61 # We can also visualize the derivative: 62 63 F = plasmapy.mathematics.plasma_dispersion_func_deriv(Z) 64 plot_complex(X, Y, F) 65 66 ####################################################################### 67 # Plotting the same function on a larger area: 68 69 x = np.linspace(-2, 2, 2000) 70 X, Y = np.meshgrid(x, x) 71 Z = X + 1j * Y 72 print(Z.shape) 73 74 ####################################################################### 75 76 F = plasmapy.mathematics.plasma_dispersion_func(Z) 77 plot_complex(X, Y, F, 100) 78 79 ####################################################################### 80 # Now we examine the derivative of the dispersion function as a function 81 # of the phase velocity of an electromagnetic wave propagating through 82 # the plasma. This is recreating figure 5.1 in: 83 # J. Sheffield, D. Froula, S. H. Glenzer, and N. C. Luhmann Jr, 84 # Plasma scattering of electromagnetic radiation: theory and measurement 85 # techniques. Chapter 5 Pg 106 (Academic press, 2010). 86 87 xs = np.linspace(0, 4, 100) 88 ws = (-1 / 2) * plasmapy.mathematics.plasma_dispersion_func_deriv(xs) 89 wRe = np.real(ws) 90 wIm = np.imag(ws) 91 92 plt.plot(xs, wRe, label="Re") 93 plt.plot(xs, wIm, label="Im") 94 plt.axis([0, 4, -0.3, 1]) 95 plt.legend(loc='upper right', 96 frameon=False, 97 labelspacing=0.001, 98 fontsize=14, 99 borderaxespad=0.1) 100 plt.show() ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/plasmapy/examples/plot_dispersion_function.py b/plasmapy/examples/plot_dispersion_function.py --- a/plasmapy/examples/plot_dispersion_function.py +++ b/plasmapy/examples/plot_dispersion_function.py @@ -10,7 +10,6 @@ import matplotlib.pyplot as plt import plasmapy - ####################################################################### help(plasmapy.mathematics.plasma_dispersion_func) @@ -41,9 +40,10 @@ plot_complex(X, Y, Z) -####################################################################### -# We can now apply our visualization function to our simple +############################################################################### +# We can now apply our visualization function to our simple dispersion relation +# sphinx_gallery_thumbnail_number = 2 F = plasmapy.mathematics.plasma_dispersion_func(Z) plot_complex(X, Y, F) @@ -97,4 +97,4 @@ labelspacing=0.001, fontsize=14, borderaxespad=0.1) -plt.show() \ No newline at end of file +plt.show()
{"golden_diff": "diff --git a/plasmapy/examples/plot_dispersion_function.py b/plasmapy/examples/plot_dispersion_function.py\n--- a/plasmapy/examples/plot_dispersion_function.py\n+++ b/plasmapy/examples/plot_dispersion_function.py\n@@ -10,7 +10,6 @@\n import matplotlib.pyplot as plt\n import plasmapy\n \n-\n #######################################################################\n help(plasmapy.mathematics.plasma_dispersion_func)\n \n@@ -41,9 +40,10 @@\n \n plot_complex(X, Y, Z)\n \n-#######################################################################\n-# We can now apply our visualization function to our simple\n+###############################################################################\n+# We can now apply our visualization function to our simple dispersion relation\n \n+# sphinx_gallery_thumbnail_number = 2\n F = plasmapy.mathematics.plasma_dispersion_func(Z)\n plot_complex(X, Y, F)\n \n@@ -97,4 +97,4 @@\n labelspacing=0.001,\n fontsize=14,\n borderaxespad=0.1)\n-plt.show()\n\\ No newline at end of file\n+plt.show()\n", "issue": "Create classes to represent ionization state distributions\nMy plan for this PR is to create classes to represent the ionization state distributions of one or more elements. I am going to add in a bunch of dunder methods like `__getitem__` and maybe `__call__` to help making access to the ionization states more straightfoward and intuitive. Any suggestions on the naming convention will be helpful so that we can maximize readability. \r\n\r\nEventually we'll need a way to calculate ionization state distributions assuming collisional ionization equilibrium, but that will be for a different PR. The purpose of this PR is to set up how to store and access the ionization distributions. This will be discussed in #352.\r\n\r\nThis will address some of #352. It will probably be best to wait until after the `0.1.0` release to merge this, since this PR is only for a partial implementation anyway.\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThe plasma dispersion function\n==============================\n\nLet's import some basics (and `PlasmaPy`!)\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plasmapy\n\n\n#######################################################################\nhelp(plasmapy.mathematics.plasma_dispersion_func)\n\n\n#######################################################################\n# We'll now make some sample data to visualize the dispersion function:\n\nx = np.linspace(-1, 1, 1000)\nX, Y = np.meshgrid(x, x)\nZ = X + 1j * Y\nprint(Z.shape)\n\n#######################################################################\n# Before we start plotting, let's make a visualization function first:\n\n\ndef plot_complex(X, Y, Z, N=50):\n fig, (real_axis, imag_axis) = plt.subplots(1, 2)\n real_axis.contourf(X, Y, Z.real, N)\n imag_axis.contourf(X, Y, Z.imag, N)\n real_axis.set_title(\"Real values\")\n imag_axis.set_title(\"Imaginary values\")\n for ax in [real_axis, imag_axis]:\n ax.set_xlabel(\"Real values\")\n ax.set_ylabel(\"Imaginary values\")\n fig.tight_layout()\n\n\nplot_complex(X, Y, Z)\n\n#######################################################################\n# We can now apply our visualization function to our simple\n\nF = plasmapy.mathematics.plasma_dispersion_func(Z)\nplot_complex(X, Y, F)\n\n\n#######################################################################\n# So this is going to be a hack and I'm not 100% sure the dispersion function\n# is quite what I think it is, but let's find the area where the dispersion\n# function has a lesser than zero real part because I think it may be important\n# (brb reading Fried and Conte):\n\nplot_complex(X, Y, F.real < 0)\n\n\n#######################################################################\n# We can also visualize the derivative:\n\nF = plasmapy.mathematics.plasma_dispersion_func_deriv(Z)\nplot_complex(X, Y, F)\n\n#######################################################################\n# Plotting the same function on a larger area:\n\nx = np.linspace(-2, 2, 2000)\nX, Y = np.meshgrid(x, x)\nZ = X + 1j * Y\nprint(Z.shape)\n\n#######################################################################\n\nF = plasmapy.mathematics.plasma_dispersion_func(Z)\nplot_complex(X, Y, F, 100)\n\n#######################################################################\n# Now we examine the derivative of the dispersion function as a function\n# of the phase velocity of an electromagnetic wave propagating through\n# the plasma. This is recreating figure 5.1 in:\n# J. Sheffield, D. Froula, S. H. Glenzer, and N. C. Luhmann Jr,\n# Plasma scattering of electromagnetic radiation: theory and measurement\n# techniques. Chapter 5 Pg 106 (Academic press, 2010).\n\nxs = np.linspace(0, 4, 100)\nws = (-1 / 2) * plasmapy.mathematics.plasma_dispersion_func_deriv(xs)\nwRe = np.real(ws)\nwIm = np.imag(ws)\n\nplt.plot(xs, wRe, label=\"Re\")\nplt.plot(xs, wIm, label=\"Im\")\nplt.axis([0, 4, -0.3, 1])\nplt.legend(loc='upper right',\n frameon=False,\n labelspacing=0.001,\n fontsize=14,\n borderaxespad=0.1)\nplt.show()", "path": "plasmapy/examples/plot_dispersion_function.py"}], "after_files": [{"content": "\"\"\"\nThe plasma dispersion function\n==============================\n\nLet's import some basics (and `PlasmaPy`!)\n\"\"\"\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport plasmapy\n\n#######################################################################\nhelp(plasmapy.mathematics.plasma_dispersion_func)\n\n\n#######################################################################\n# We'll now make some sample data to visualize the dispersion function:\n\nx = np.linspace(-1, 1, 1000)\nX, Y = np.meshgrid(x, x)\nZ = X + 1j * Y\nprint(Z.shape)\n\n#######################################################################\n# Before we start plotting, let's make a visualization function first:\n\n\ndef plot_complex(X, Y, Z, N=50):\n fig, (real_axis, imag_axis) = plt.subplots(1, 2)\n real_axis.contourf(X, Y, Z.real, N)\n imag_axis.contourf(X, Y, Z.imag, N)\n real_axis.set_title(\"Real values\")\n imag_axis.set_title(\"Imaginary values\")\n for ax in [real_axis, imag_axis]:\n ax.set_xlabel(\"Real values\")\n ax.set_ylabel(\"Imaginary values\")\n fig.tight_layout()\n\n\nplot_complex(X, Y, Z)\n\n###############################################################################\n# We can now apply our visualization function to our simple dispersion relation\n\n# sphinx_gallery_thumbnail_number = 2\nF = plasmapy.mathematics.plasma_dispersion_func(Z)\nplot_complex(X, Y, F)\n\n\n#######################################################################\n# So this is going to be a hack and I'm not 100% sure the dispersion function\n# is quite what I think it is, but let's find the area where the dispersion\n# function has a lesser than zero real part because I think it may be important\n# (brb reading Fried and Conte):\n\nplot_complex(X, Y, F.real < 0)\n\n\n#######################################################################\n# We can also visualize the derivative:\n\nF = plasmapy.mathematics.plasma_dispersion_func_deriv(Z)\nplot_complex(X, Y, F)\n\n#######################################################################\n# Plotting the same function on a larger area:\n\nx = np.linspace(-2, 2, 2000)\nX, Y = np.meshgrid(x, x)\nZ = X + 1j * Y\nprint(Z.shape)\n\n#######################################################################\n\nF = plasmapy.mathematics.plasma_dispersion_func(Z)\nplot_complex(X, Y, F, 100)\n\n#######################################################################\n# Now we examine the derivative of the dispersion function as a function\n# of the phase velocity of an electromagnetic wave propagating through\n# the plasma. This is recreating figure 5.1 in:\n# J. Sheffield, D. Froula, S. H. Glenzer, and N. C. Luhmann Jr,\n# Plasma scattering of electromagnetic radiation: theory and measurement\n# techniques. Chapter 5 Pg 106 (Academic press, 2010).\n\nxs = np.linspace(0, 4, 100)\nws = (-1 / 2) * plasmapy.mathematics.plasma_dispersion_func_deriv(xs)\nwRe = np.real(ws)\nwIm = np.imag(ws)\n\nplt.plot(xs, wRe, label=\"Re\")\nplt.plot(xs, wIm, label=\"Im\")\nplt.axis([0, 4, -0.3, 1])\nplt.legend(loc='upper right',\n frameon=False,\n labelspacing=0.001,\n fontsize=14,\n borderaxespad=0.1)\nplt.show()\n", "path": "plasmapy/examples/plot_dispersion_function.py"}]}
1,404
239
gh_patches_debug_10872
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-1887
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exception processing E3037 for AWS::S3::Bucket.Transition.TransitionDate ``` $ cfn-lint --version cfn-lint 0.44.5 ``` The `TransitionDate` property is defined with `PrimitiveType: "Timestamp"`: ```yaml AWSTemplateFormatVersion: 2010-09-09 Resources: Bucket: Type: AWS::S3::Bucket Properties: LifecycleConfiguration: Rules: - Status: Enabled Transitions: - StorageClass: INTELLIGENT_TIERING TransitionDate: 2021-01-01T00:00:00.000Z ``` This is a valid template and can be successfully deployed, but `cfn-lint` fails with: ``` $ cfn-lint scratch.yml E0002 Unknown exception while processing rule E3037: Object of type datetime is not JSON serializable scratch.yml:1:1 ``` Running with `--debug` shows the exception is generated at https://github.com/aws-cloudformation/cfn-python-lint/blob/c7658511bd7066417682103f21f71983c67ea6d0/src/cfnlint/rules/resources/properties/ListDuplicates.py#L36 Quoting the TransitionDate value suppresses this error, e.g. `TransitionDate: "2021-01-01T00:00:00.000Z"` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/resources/properties/ListDuplicates.py` Content: ``` 1 """ 2 Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 SPDX-License-Identifier: MIT-0 4 """ 5 import hashlib 6 import json 7 from cfnlint.rules import CloudFormationLintRule 8 from cfnlint.rules import RuleMatch 9 10 from cfnlint.helpers import RESOURCE_SPECS 11 12 13 class ListDuplicates(CloudFormationLintRule): 14 """Check if duplicates exist in a List""" 15 id = 'E3037' 16 shortdesc = 'Check if a list has duplicate values' 17 description = 'Certain lists don\'t support duplicate items. ' \ 18 'Check when duplicates are provided but not supported.' 19 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue' 20 tags = ['resources', 'property', 'list'] 21 22 def initialize(self, cfn): 23 """Initialize the rule""" 24 for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'): 25 self.resource_property_types.append(resource_type_spec) 26 for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'): 27 self.resource_sub_property_types.append(property_type_spec) 28 29 def _check_duplicates(self, values, path, scenario=None): 30 """ Check for Duplicates """ 31 matches = [] 32 33 list_items = [] 34 if isinstance(values, list): 35 for index, value in enumerate(values): 36 value_hash = hashlib.sha1(json.dumps( 37 value, sort_keys=True).encode('utf-8')).hexdigest() 38 if value_hash in list_items: 39 if not scenario: 40 message = 'List has a duplicate value at {0}' 41 matches.append( 42 RuleMatch(path + [index], message.format('/'.join(map(str, path + [index]))))) 43 else: 44 scenario_text = ' and '.join( 45 ['condition "%s" is %s' % (k, v) for (k, v) in scenario.items()]) 46 message = 'List has a duplicate value at {0} when {1}' 47 matches.append(RuleMatch(path, message.format( 48 '/'.join(map(str, path)), scenario_text))) 49 50 list_items.append(value_hash) 51 52 return matches 53 54 def check_duplicates(self, values, path, cfn): 55 """ Check for duplicates """ 56 matches = [] 57 58 if isinstance(values, list): 59 matches.extend(self._check_duplicates(values, path)) 60 elif isinstance(values, dict): 61 props = cfn.get_object_without_conditions(values) 62 for prop in props: 63 matches.extend(self._check_duplicates( 64 prop.get('Object'), path, prop.get('Scenario'))) 65 66 return matches 67 68 def check(self, cfn, properties, value_specs, path): 69 """Check itself""" 70 matches = list() 71 for p_value, p_path in properties.items_safe(path[:]): 72 for prop in p_value: 73 if prop in value_specs: 74 property_type = value_specs.get(prop).get('Type') 75 duplicates_allowed = value_specs.get(prop).get('DuplicatesAllowed', True) 76 if property_type == 'List' and not duplicates_allowed: 77 matches.extend( 78 self.check_duplicates( 79 p_value[prop], p_path + [prop], cfn 80 ) 81 ) 82 83 return matches 84 85 def match_resource_sub_properties(self, properties, property_type, path, cfn): 86 """Match for sub properties""" 87 matches = list() 88 89 specs = RESOURCE_SPECS.get(cfn.regions[0]).get( 90 'PropertyTypes').get(property_type, {}).get('Properties', {}) 91 matches.extend(self.check(cfn, properties, specs, path)) 92 93 return matches 94 95 def match_resource_properties(self, properties, resource_type, path, cfn): 96 """Check CloudFormation Properties""" 97 matches = list() 98 99 specs = RESOURCE_SPECS.get(cfn.regions[0]).get( 100 'ResourceTypes').get(resource_type, {}).get('Properties', {}) 101 matches.extend(self.check(cfn, properties, specs, path)) 102 103 return matches 104 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/resources/properties/ListDuplicates.py b/src/cfnlint/rules/resources/properties/ListDuplicates.py --- a/src/cfnlint/rules/resources/properties/ListDuplicates.py +++ b/src/cfnlint/rules/resources/properties/ListDuplicates.py @@ -34,7 +34,7 @@ if isinstance(values, list): for index, value in enumerate(values): value_hash = hashlib.sha1(json.dumps( - value, sort_keys=True).encode('utf-8')).hexdigest() + value, sort_keys=True, default=str).encode('utf-8')).hexdigest() if value_hash in list_items: if not scenario: message = 'List has a duplicate value at {0}'
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/ListDuplicates.py b/src/cfnlint/rules/resources/properties/ListDuplicates.py\n--- a/src/cfnlint/rules/resources/properties/ListDuplicates.py\n+++ b/src/cfnlint/rules/resources/properties/ListDuplicates.py\n@@ -34,7 +34,7 @@\n if isinstance(values, list):\n for index, value in enumerate(values):\n value_hash = hashlib.sha1(json.dumps(\n- value, sort_keys=True).encode('utf-8')).hexdigest()\n+ value, sort_keys=True, default=str).encode('utf-8')).hexdigest()\n if value_hash in list_items:\n if not scenario:\n message = 'List has a duplicate value at {0}'\n", "issue": "Exception processing E3037 for AWS::S3::Bucket.Transition.TransitionDate\n```\r\n$ cfn-lint --version\r\ncfn-lint 0.44.5\r\n```\r\n\r\nThe `TransitionDate` property is defined with `PrimitiveType: \"Timestamp\"`:\r\n\r\n```yaml\r\nAWSTemplateFormatVersion: 2010-09-09\r\n\r\nResources:\r\n Bucket:\r\n Type: AWS::S3::Bucket\r\n Properties:\r\n LifecycleConfiguration:\r\n Rules:\r\n - Status: Enabled\r\n Transitions:\r\n - StorageClass: INTELLIGENT_TIERING\r\n TransitionDate: 2021-01-01T00:00:00.000Z\r\n```\r\n\r\nThis is a valid template and can be successfully deployed, but `cfn-lint` fails with:\r\n\r\n```\r\n$ cfn-lint scratch.yml\r\nE0002 Unknown exception while processing rule E3037: Object of type datetime is not JSON serializable\r\nscratch.yml:1:1\r\n```\r\n\r\nRunning with `--debug` shows the exception is generated at https://github.com/aws-cloudformation/cfn-python-lint/blob/c7658511bd7066417682103f21f71983c67ea6d0/src/cfnlint/rules/resources/properties/ListDuplicates.py#L36\r\n\r\nQuoting the TransitionDate value suppresses this error, e.g. `TransitionDate: \"2021-01-01T00:00:00.000Z\"`\n", "before_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport hashlib\nimport json\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass ListDuplicates(CloudFormationLintRule):\n \"\"\"Check if duplicates exist in a List\"\"\"\n id = 'E3037'\n shortdesc = 'Check if a list has duplicate values'\n description = 'Certain lists don\\'t support duplicate items. ' \\\n 'Check when duplicates are provided but not supported.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue'\n tags = ['resources', 'property', 'list']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _check_duplicates(self, values, path, scenario=None):\n \"\"\" Check for Duplicates \"\"\"\n matches = []\n\n list_items = []\n if isinstance(values, list):\n for index, value in enumerate(values):\n value_hash = hashlib.sha1(json.dumps(\n value, sort_keys=True).encode('utf-8')).hexdigest()\n if value_hash in list_items:\n if not scenario:\n message = 'List has a duplicate value at {0}'\n matches.append(\n RuleMatch(path + [index], message.format('/'.join(map(str, path + [index])))))\n else:\n scenario_text = ' and '.join(\n ['condition \"%s\" is %s' % (k, v) for (k, v) in scenario.items()])\n message = 'List has a duplicate value at {0} when {1}'\n matches.append(RuleMatch(path, message.format(\n '/'.join(map(str, path)), scenario_text)))\n\n list_items.append(value_hash)\n\n return matches\n\n def check_duplicates(self, values, path, cfn):\n \"\"\" Check for duplicates \"\"\"\n matches = []\n\n if isinstance(values, list):\n matches.extend(self._check_duplicates(values, path))\n elif isinstance(values, dict):\n props = cfn.get_object_without_conditions(values)\n for prop in props:\n matches.extend(self._check_duplicates(\n prop.get('Object'), path, prop.get('Scenario')))\n\n return matches\n\n def check(self, cfn, properties, value_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n property_type = value_specs.get(prop).get('Type')\n duplicates_allowed = value_specs.get(prop).get('DuplicatesAllowed', True)\n if property_type == 'List' and not duplicates_allowed:\n matches.extend(\n self.check_duplicates(\n p_value[prop], p_path + [prop], cfn\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'PropertyTypes').get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ResourceTypes').get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ListDuplicates.py"}], "after_files": [{"content": "\"\"\"\nCopyright Amazon.com, Inc. or its affiliates. All Rights Reserved.\nSPDX-License-Identifier: MIT-0\n\"\"\"\nimport hashlib\nimport json\nfrom cfnlint.rules import CloudFormationLintRule\nfrom cfnlint.rules import RuleMatch\n\nfrom cfnlint.helpers import RESOURCE_SPECS\n\n\nclass ListDuplicates(CloudFormationLintRule):\n \"\"\"Check if duplicates exist in a List\"\"\"\n id = 'E3037'\n shortdesc = 'Check if a list has duplicate values'\n description = 'Certain lists don\\'t support duplicate items. ' \\\n 'Check when duplicates are provided but not supported.'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#allowedvalue'\n tags = ['resources', 'property', 'list']\n\n def initialize(self, cfn):\n \"\"\"Initialize the rule\"\"\"\n for resource_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('ResourceTypes'):\n self.resource_property_types.append(resource_type_spec)\n for property_type_spec in RESOURCE_SPECS.get(cfn.regions[0]).get('PropertyTypes'):\n self.resource_sub_property_types.append(property_type_spec)\n\n def _check_duplicates(self, values, path, scenario=None):\n \"\"\" Check for Duplicates \"\"\"\n matches = []\n\n list_items = []\n if isinstance(values, list):\n for index, value in enumerate(values):\n value_hash = hashlib.sha1(json.dumps(\n value, sort_keys=True, default=str).encode('utf-8')).hexdigest()\n if value_hash in list_items:\n if not scenario:\n message = 'List has a duplicate value at {0}'\n matches.append(\n RuleMatch(path + [index], message.format('/'.join(map(str, path + [index])))))\n else:\n scenario_text = ' and '.join(\n ['condition \"%s\" is %s' % (k, v) for (k, v) in scenario.items()])\n message = 'List has a duplicate value at {0} when {1}'\n matches.append(RuleMatch(path, message.format(\n '/'.join(map(str, path)), scenario_text)))\n\n list_items.append(value_hash)\n\n return matches\n\n def check_duplicates(self, values, path, cfn):\n \"\"\" Check for duplicates \"\"\"\n matches = []\n\n if isinstance(values, list):\n matches.extend(self._check_duplicates(values, path))\n elif isinstance(values, dict):\n props = cfn.get_object_without_conditions(values)\n for prop in props:\n matches.extend(self._check_duplicates(\n prop.get('Object'), path, prop.get('Scenario')))\n\n return matches\n\n def check(self, cfn, properties, value_specs, path):\n \"\"\"Check itself\"\"\"\n matches = list()\n for p_value, p_path in properties.items_safe(path[:]):\n for prop in p_value:\n if prop in value_specs:\n property_type = value_specs.get(prop).get('Type')\n duplicates_allowed = value_specs.get(prop).get('DuplicatesAllowed', True)\n if property_type == 'List' and not duplicates_allowed:\n matches.extend(\n self.check_duplicates(\n p_value[prop], p_path + [prop], cfn\n )\n )\n\n return matches\n\n def match_resource_sub_properties(self, properties, property_type, path, cfn):\n \"\"\"Match for sub properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'PropertyTypes').get(property_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n\n def match_resource_properties(self, properties, resource_type, path, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = list()\n\n specs = RESOURCE_SPECS.get(cfn.regions[0]).get(\n 'ResourceTypes').get(resource_type, {}).get('Properties', {})\n matches.extend(self.check(cfn, properties, specs, path))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/ListDuplicates.py"}]}
1,674
155
gh_patches_debug_33558
rasdani/github-patches
git_diff
wagtail__wagtail-170
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Broken URL for jquery.ui.datepicker when 'en-US' used as lang This isn't a big deal at all, but wanted to post just in case anyone wants to take a look. When loading a page with `jquery.ui.datepicker.js`, I notice in console that a call to http://jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-en-US.js returns a 404. I searched out the CDN for the directory in which the file is attempting to be called: http://jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/ As you can see, there is no `../jquery.ui.datepicker-en-US.js` present (not that there necessarily ought to be) The call stems from: https://github.com/torchbox/wagtail/blob/master/wagtail/wagtailadmin/templatetags/localize.py#L42 The interpolation inserts `en-US` into the URI Again, no big deal... just FYI Cheers, all! Edit: I should add, this issue does _not_ break usability - a fallback seems to be in place. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wagtail/wagtailadmin/templatetags/localize.py` Content: ``` 1 from django import template 2 from django.conf import settings 3 from django.utils import formats 4 from django.utils.translation import get_language 5 6 register = template.Library() 7 8 # For reasons unkown, the el (greek) locale in django/conf/locale/el/formats.py 9 # *did not* contain a DATE_INPUT_FORMATS -- so it fell back to using the US 10 # date format (mm/dd/yy) which is not the correct one for Greece (dd/mm/yy). 11 # This means that if we used a localized datepicker django *won't* be able to 12 # parse the dates! So a test here checks if DATE_INPUT_FORMATS is actually 13 # defined in a format module. If yes then it will just return an empty string 14 # so that the normal, localized date format from datepicker will be used. 15 # If DATE_INPUT_FORMATS is not defined then it will return 16 @register.assignment_tag 17 def get_date_format_override(): 18 if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True: 19 20 for m in formats.get_format_modules(): 21 if hasattr(m, 'DATE_INPUT_FORMATS'): 22 return '' 23 else: # fall back to the ISO to be sure date will be parsed 24 return 'yy-mm-dd' 25 else: # Fall back to ISO if I18N is *not* used 26 return 'yy-mm-dd' 27 28 # Get the correct i18n + l10n settings for datepicker depending on current 29 # thread language 30 @register.simple_tag 31 def get_localized_datepicker_js(): 32 if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True and \ 33 hasattr(settings, 'USE_L10N') and settings.USE_L10N==True: 34 35 lang = get_language() 36 37 if '-' in lang: 38 lang_parts = lang.split('-') 39 lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper() 40 else: 41 lang=lang.lower() 42 return '<script src="//jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-{0}.js"></script>'.format( 43 lang 44 ) 45 46 else: # Don't write anything if we don't use I18N and L10N 47 return '' 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wagtail/wagtailadmin/templatetags/localize.py b/wagtail/wagtailadmin/templatetags/localize.py --- a/wagtail/wagtailadmin/templatetags/localize.py +++ b/wagtail/wagtailadmin/templatetags/localize.py @@ -1,5 +1,6 @@ from django import template from django.conf import settings +from django.templatetags.static import static from django.utils import formats from django.utils.translation import get_language @@ -25,6 +26,15 @@ else: # Fall back to ISO if I18N is *not* used return 'yy-mm-dd' +# This is a list of all supported langs for jquery-ui datepicker which exist in +# wagtailadmin/js/venor/i18n/. In case any new translations are added there the +# language code should also be added in this list. +SUPPORTED_DATEPICKER_LANGS = ['af', 'ar-DZ', 'ar', 'az', 'be', 'bg', 'bs', 'ca', 'cs', 'cy-GB', 'da', 'de', + 'el', 'en-AU', 'en-GB', 'en-NZ', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fo', 'fr-CA', 'fr-CH', 'fr', 'gl', + 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'ka', 'kk', 'km', 'ko', 'ky', 'lb', 'lt', 'lv', + 'mk', 'ml', 'ms', 'nb', 'nl-BE', 'nl', 'nn', 'no', 'pl', 'pt-BR', 'pt', 'rm', 'ro', 'ru', 'sk', 'sl', 'sq', + 'sr-SR', 'sr', 'sv', 'ta', 'th', 'tj', 'tr', 'uk', 'vi', 'zh-CN', 'zh-HK', 'zh-TW' +] # Get the correct i18n + l10n settings for datepicker depending on current # thread language @register.simple_tag @@ -39,10 +49,14 @@ lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper() else: lang=lang.lower() - return '<script src="//jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-{0}.js"></script>'.format( - lang - ) + if lang in SUPPORTED_DATEPICKER_LANGS: + translation_file = static("wagtailadmin/js/vendor/i18n/jquery.ui.datepicker-{0}.js".format( + lang + )) + return '<script src="{0}"></script>'.format(translation_file) + else: # Don't return anything if language is not supported + return '' - else: # Don't write anything if we don't use I18N and L10N + else: # Don't return anything if we don't use I18N and L10N return '' \ No newline at end of file
{"golden_diff": "diff --git a/wagtail/wagtailadmin/templatetags/localize.py b/wagtail/wagtailadmin/templatetags/localize.py\n--- a/wagtail/wagtailadmin/templatetags/localize.py\n+++ b/wagtail/wagtailadmin/templatetags/localize.py\n@@ -1,5 +1,6 @@\n from django import template\n from django.conf import settings\n+from django.templatetags.static import static\n from django.utils import formats\n from django.utils.translation import get_language\n \n@@ -25,6 +26,15 @@\n else: # Fall back to ISO if I18N is *not* used\n return 'yy-mm-dd'\n \n+# This is a list of all supported langs for jquery-ui datepicker which exist in\n+# wagtailadmin/js/venor/i18n/. In case any new translations are added there the\n+# language code should also be added in this list.\n+SUPPORTED_DATEPICKER_LANGS = ['af', 'ar-DZ', 'ar', 'az', 'be', 'bg', 'bs', 'ca', 'cs', 'cy-GB', 'da', 'de',\n+ 'el', 'en-AU', 'en-GB', 'en-NZ', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fo', 'fr-CA', 'fr-CH', 'fr', 'gl',\n+ 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'ka', 'kk', 'km', 'ko', 'ky', 'lb', 'lt', 'lv',\n+ 'mk', 'ml', 'ms', 'nb', 'nl-BE', 'nl', 'nn', 'no', 'pl', 'pt-BR', 'pt', 'rm', 'ro', 'ru', 'sk', 'sl', 'sq',\n+ 'sr-SR', 'sr', 'sv', 'ta', 'th', 'tj', 'tr', 'uk', 'vi', 'zh-CN', 'zh-HK', 'zh-TW'\n+]\n # Get the correct i18n + l10n settings for datepicker depending on current \n # thread language \n @register.simple_tag\n@@ -39,10 +49,14 @@\n lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper()\n else:\n lang=lang.lower()\n- return '<script src=\"//jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-{0}.js\"></script>'.format(\n- lang\n- )\n+ if lang in SUPPORTED_DATEPICKER_LANGS:\n+ translation_file = static(\"wagtailadmin/js/vendor/i18n/jquery.ui.datepicker-{0}.js\".format(\n+ lang\n+ ))\n+ return '<script src=\"{0}\"></script>'.format(translation_file)\n+ else: # Don't return anything if language is not supported\n+ return ''\n \n- else: # Don't write anything if we don't use I18N and L10N\n+ else: # Don't return anything if we don't use I18N and L10N\n return '' \n \n\\ No newline at end of file\n", "issue": "Broken URL for jquery.ui.datepicker when 'en-US' used as lang \nThis isn't a big deal at all, but wanted to post just in case anyone wants to take a look.\n\nWhen loading a page with `jquery.ui.datepicker.js`, I notice in console that a call to http://jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-en-US.js returns a 404.\n\nI searched out the CDN for the directory in which the file is attempting to be called:\nhttp://jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/\n\nAs you can see, there is no `../jquery.ui.datepicker-en-US.js` present (not that there necessarily ought to be)\n\nThe call stems from:\nhttps://github.com/torchbox/wagtail/blob/master/wagtail/wagtailadmin/templatetags/localize.py#L42\n\nThe interpolation inserts `en-US` into the URI\n\nAgain, no big deal... just FYI\n\nCheers, all!\n\nEdit:\n\nI should add, this issue does _not_ break usability - a fallback seems to be in place.\n\n", "before_files": [{"content": "from django import template\nfrom django.conf import settings\nfrom django.utils import formats\nfrom django.utils.translation import get_language\n\nregister = template.Library()\n\n# For reasons unkown, the el (greek) locale in django/conf/locale/el/formats.py \n# *did not* contain a DATE_INPUT_FORMATS -- so it fell back to using the US \n# date format (mm/dd/yy) which is not the correct one for Greece (dd/mm/yy). \n# This means that if we used a localized datepicker django *won't* be able to\n# parse the dates! So a test here checks if DATE_INPUT_FORMATS is actually \n# defined in a format module. If yes then it will just return an empty string \n# so that the normal, localized date format from datepicker will be used.\n# If DATE_INPUT_FORMATS is not defined then it will return\[email protected]_tag\ndef get_date_format_override():\n if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True:\n \n for m in formats.get_format_modules():\n if hasattr(m, 'DATE_INPUT_FORMATS'):\n return ''\n else: # fall back to the ISO to be sure date will be parsed\n return 'yy-mm-dd'\n else: # Fall back to ISO if I18N is *not* used\n return 'yy-mm-dd'\n\n# Get the correct i18n + l10n settings for datepicker depending on current \n# thread language \[email protected]_tag\ndef get_localized_datepicker_js():\n if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True and \\\n hasattr(settings, 'USE_L10N') and settings.USE_L10N==True:\n \n lang = get_language()\n \n if '-' in lang:\n lang_parts = lang.split('-')\n lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper()\n else:\n lang=lang.lower()\n return '<script src=\"//jquery-ui.googlecode.com/svn/tags/latest/ui/i18n/jquery.ui.datepicker-{0}.js\"></script>'.format(\n lang\n )\n \n else: # Don't write anything if we don't use I18N and L10N\n return '' \n ", "path": "wagtail/wagtailadmin/templatetags/localize.py"}], "after_files": [{"content": "from django import template\nfrom django.conf import settings\nfrom django.templatetags.static import static\nfrom django.utils import formats\nfrom django.utils.translation import get_language\n\nregister = template.Library()\n\n# For reasons unkown, the el (greek) locale in django/conf/locale/el/formats.py \n# *did not* contain a DATE_INPUT_FORMATS -- so it fell back to using the US \n# date format (mm/dd/yy) which is not the correct one for Greece (dd/mm/yy). \n# This means that if we used a localized datepicker django *won't* be able to\n# parse the dates! So a test here checks if DATE_INPUT_FORMATS is actually \n# defined in a format module. If yes then it will just return an empty string \n# so that the normal, localized date format from datepicker will be used.\n# If DATE_INPUT_FORMATS is not defined then it will return\[email protected]_tag\ndef get_date_format_override():\n if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True:\n \n for m in formats.get_format_modules():\n if hasattr(m, 'DATE_INPUT_FORMATS'):\n return ''\n else: # fall back to the ISO to be sure date will be parsed\n return 'yy-mm-dd'\n else: # Fall back to ISO if I18N is *not* used\n return 'yy-mm-dd'\n\n# This is a list of all supported langs for jquery-ui datepicker which exist in\n# wagtailadmin/js/venor/i18n/. In case any new translations are added there the\n# language code should also be added in this list.\nSUPPORTED_DATEPICKER_LANGS = ['af', 'ar-DZ', 'ar', 'az', 'be', 'bg', 'bs', 'ca', 'cs', 'cy-GB', 'da', 'de',\n 'el', 'en-AU', 'en-GB', 'en-NZ', 'eo', 'es', 'et', 'eu', 'fa', 'fi', 'fo', 'fr-CA', 'fr-CH', 'fr', 'gl',\n 'he', 'hi', 'hr', 'hu', 'hy', 'id', 'is', 'it', 'ja', 'ka', 'kk', 'km', 'ko', 'ky', 'lb', 'lt', 'lv',\n 'mk', 'ml', 'ms', 'nb', 'nl-BE', 'nl', 'nn', 'no', 'pl', 'pt-BR', 'pt', 'rm', 'ro', 'ru', 'sk', 'sl', 'sq',\n 'sr-SR', 'sr', 'sv', 'ta', 'th', 'tj', 'tr', 'uk', 'vi', 'zh-CN', 'zh-HK', 'zh-TW'\n]\n# Get the correct i18n + l10n settings for datepicker depending on current \n# thread language \[email protected]_tag\ndef get_localized_datepicker_js():\n if hasattr(settings, 'USE_I18N') and settings.USE_I18N==True and \\\n hasattr(settings, 'USE_L10N') and settings.USE_L10N==True:\n \n lang = get_language()\n \n if '-' in lang:\n lang_parts = lang.split('-')\n lang = lang_parts[0].lower() +'-'+ lang_parts[1].upper()\n else:\n lang=lang.lower()\n if lang in SUPPORTED_DATEPICKER_LANGS:\n translation_file = static(\"wagtailadmin/js/vendor/i18n/jquery.ui.datepicker-{0}.js\".format(\n lang\n ))\n return '<script src=\"{0}\"></script>'.format(translation_file)\n else: # Don't return anything if language is not supported\n return ''\n \n else: # Don't return anything if we don't use I18N and L10N\n return '' \n ", "path": "wagtail/wagtailadmin/templatetags/localize.py"}]}
1,086
735
gh_patches_debug_6828
rasdani/github-patches
git_diff
kartoza__prj.app-162
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Landing page gives a 404 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django_project/base/views/error_views.py` Content: ``` 1 # coding=utf-8 2 """Our custom error views""" 3 from django.shortcuts import render_to_response 4 from django.template import RequestContext 5 from base.models.project import Project 6 7 8 def custom_404(request, template_name='404.html'): 9 """Our custom 404 view 10 11 We want to include a list of all public and approved Projects in the 404 12 view 13 :param request: Request obj 14 :type request: HttpRequest 15 16 :param template_name: The template to render 17 :type template_name: str 18 19 :return: Response obj 20 :rtype: HttpResponse 21 22 """ 23 public_projects = Project.objects.filter(approved=True, private=False) 24 return render_to_response(template_name, { 25 'request_path': request.path, 26 'projects': public_projects 27 }, context_instance=RequestContext(request)) 28 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django_project/base/views/error_views.py b/django_project/base/views/error_views.py --- a/django_project/base/views/error_views.py +++ b/django_project/base/views/error_views.py @@ -21,7 +21,11 @@ """ public_projects = Project.objects.filter(approved=True, private=False) - return render_to_response(template_name, { - 'request_path': request.path, - 'projects': public_projects - }, context_instance=RequestContext(request)) + + response = render_to_response( + template_name, { + 'request_path': request.path, + 'projects': public_projects}, + context_instance=RequestContext(request)) + response.status_code = 404 + return response
{"golden_diff": "diff --git a/django_project/base/views/error_views.py b/django_project/base/views/error_views.py\n--- a/django_project/base/views/error_views.py\n+++ b/django_project/base/views/error_views.py\n@@ -21,7 +21,11 @@\n \n \"\"\"\n public_projects = Project.objects.filter(approved=True, private=False)\n- return render_to_response(template_name, {\n- 'request_path': request.path,\n- 'projects': public_projects\n- }, context_instance=RequestContext(request))\n+\n+ response = render_to_response(\n+ template_name, {\n+ 'request_path': request.path,\n+ 'projects': public_projects},\n+ context_instance=RequestContext(request))\n+ response.status_code = 404\n+ return response\n", "issue": "Landing page gives a 404\n\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"Our custom error views\"\"\"\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom base.models.project import Project\n\n\ndef custom_404(request, template_name='404.html'):\n \"\"\"Our custom 404 view\n\n We want to include a list of all public and approved Projects in the 404\n view\n :param request: Request obj\n :type request: HttpRequest\n\n :param template_name: The template to render\n :type template_name: str\n\n :return: Response obj\n :rtype: HttpResponse\n\n \"\"\"\n public_projects = Project.objects.filter(approved=True, private=False)\n return render_to_response(template_name, {\n 'request_path': request.path,\n 'projects': public_projects\n }, context_instance=RequestContext(request))\n", "path": "django_project/base/views/error_views.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"Our custom error views\"\"\"\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom base.models.project import Project\n\n\ndef custom_404(request, template_name='404.html'):\n \"\"\"Our custom 404 view\n\n We want to include a list of all public and approved Projects in the 404\n view\n :param request: Request obj\n :type request: HttpRequest\n\n :param template_name: The template to render\n :type template_name: str\n\n :return: Response obj\n :rtype: HttpResponse\n\n \"\"\"\n public_projects = Project.objects.filter(approved=True, private=False)\n\n response = render_to_response(\n template_name, {\n 'request_path': request.path,\n 'projects': public_projects},\n context_instance=RequestContext(request))\n response.status_code = 404\n return response\n", "path": "django_project/base/views/error_views.py"}]}
503
169
gh_patches_debug_38348
rasdani/github-patches
git_diff
PaddlePaddle__models-312
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- resnet模型配置的问题 目前resnet的配置有一些问题,可见 https://github.com/PaddlePaddle/models/issues/308#issuecomment-331384031 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `image_classification/resnet.py` Content: ``` 1 import paddle.v2 as paddle 2 3 __all__ = ['resnet_imagenet', 'resnet_cifar10'] 4 5 6 def conv_bn_layer(input, 7 ch_out, 8 filter_size, 9 stride, 10 padding, 11 active_type=paddle.activation.Relu(), 12 ch_in=None): 13 tmp = paddle.layer.img_conv( 14 input=input, 15 filter_size=filter_size, 16 num_channels=ch_in, 17 num_filters=ch_out, 18 stride=stride, 19 padding=padding, 20 act=paddle.activation.Linear(), 21 bias_attr=False) 22 return paddle.layer.batch_norm(input=tmp, act=active_type) 23 24 25 def shortcut(input, ch_in, ch_out, stride): 26 if ch_in != ch_out: 27 return conv_bn_layer(input, ch_out, 1, stride, 0, 28 paddle.activation.Linear()) 29 else: 30 return input 31 32 33 def basicblock(input, ch_in, ch_out, stride): 34 short = shortcut(input, ch_in, ch_out, stride) 35 conv1 = conv_bn_layer(input, ch_out, 3, stride, 1) 36 conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear()) 37 return paddle.layer.addto( 38 input=[short, conv2], act=paddle.activation.Relu()) 39 40 41 def bottleneck(input, ch_in, ch_out, stride): 42 short = shortcut(input, ch_in, ch_out * 4, stride) 43 conv1 = conv_bn_layer(input, ch_out, 1, stride, 0) 44 conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1) 45 conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, 46 paddle.activation.Linear()) 47 return paddle.layer.addto( 48 input=[short, conv3], act=paddle.activation.Relu()) 49 50 51 def layer_warp(block_func, input, ch_in, ch_out, count, stride): 52 conv = block_func(input, ch_in, ch_out, stride) 53 for i in range(1, count): 54 conv = block_func(conv, ch_out, ch_out, 1) 55 return conv 56 57 58 def resnet_imagenet(input, class_dim, depth=50): 59 cfg = { 60 18: ([2, 2, 2, 1], basicblock), 61 34: ([3, 4, 6, 3], basicblock), 62 50: ([3, 4, 6, 3], bottleneck), 63 101: ([3, 4, 23, 3], bottleneck), 64 152: ([3, 8, 36, 3], bottleneck) 65 } 66 stages, block_func = cfg[depth] 67 conv1 = conv_bn_layer( 68 input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3) 69 pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2) 70 res1 = layer_warp(block_func, pool1, 64, 64, stages[0], 1) 71 res2 = layer_warp(block_func, res1, 64, 128, stages[1], 2) 72 res3 = layer_warp(block_func, res2, 128, 256, stages[2], 2) 73 res4 = layer_warp(block_func, res3, 256, 512, stages[3], 2) 74 pool2 = paddle.layer.img_pool( 75 input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg()) 76 out = paddle.layer.fc( 77 input=pool2, size=class_dim, act=paddle.activation.Softmax()) 78 return out 79 80 81 def resnet_cifar10(input, class_dim, depth=32): 82 # depth should be one of 20, 32, 44, 56, 110, 1202 83 assert (depth - 2) % 6 == 0 84 n = (depth - 2) / 6 85 nStages = {16, 64, 128} 86 conv1 = conv_bn_layer( 87 input, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1) 88 res1 = layer_warp(basicblock, conv1, 16, 16, n, 1) 89 res2 = layer_warp(basicblock, res1, 16, 32, n, 2) 90 res3 = layer_warp(basicblock, res2, 32, 64, n, 2) 91 pool = paddle.layer.img_pool( 92 input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg()) 93 out = paddle.layer.fc( 94 input=pool, size=class_dim, act=paddle.activation.Softmax()) 95 return out 96 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/image_classification/resnet.py b/image_classification/resnet.py --- a/image_classification/resnet.py +++ b/image_classification/resnet.py @@ -22,24 +22,24 @@ return paddle.layer.batch_norm(input=tmp, act=active_type) -def shortcut(input, ch_in, ch_out, stride): - if ch_in != ch_out: +def shortcut(input, ch_out, stride): + if input.num_filters != ch_out: return conv_bn_layer(input, ch_out, 1, stride, 0, paddle.activation.Linear()) else: return input -def basicblock(input, ch_in, ch_out, stride): - short = shortcut(input, ch_in, ch_out, stride) +def basicblock(input, ch_out, stride): + short = shortcut(input, ch_out, stride) conv1 = conv_bn_layer(input, ch_out, 3, stride, 1) conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear()) return paddle.layer.addto( input=[short, conv2], act=paddle.activation.Relu()) -def bottleneck(input, ch_in, ch_out, stride): - short = shortcut(input, ch_in, ch_out * 4, stride) +def bottleneck(input, ch_out, stride): + short = shortcut(input, ch_out * 4, stride) conv1 = conv_bn_layer(input, ch_out, 1, stride, 0) conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1) conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0, @@ -48,10 +48,10 @@ input=[short, conv3], act=paddle.activation.Relu()) -def layer_warp(block_func, input, ch_in, ch_out, count, stride): - conv = block_func(input, ch_in, ch_out, stride) +def layer_warp(block_func, input, ch_out, count, stride): + conv = block_func(input, ch_out, stride) for i in range(1, count): - conv = block_func(conv, ch_out, ch_out, 1) + conv = block_func(conv, ch_out, 1) return conv @@ -67,10 +67,10 @@ conv1 = conv_bn_layer( input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3) pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2) - res1 = layer_warp(block_func, pool1, 64, 64, stages[0], 1) - res2 = layer_warp(block_func, res1, 64, 128, stages[1], 2) - res3 = layer_warp(block_func, res2, 128, 256, stages[2], 2) - res4 = layer_warp(block_func, res3, 256, 512, stages[3], 2) + res1 = layer_warp(block_func, pool1, 64, stages[0], 1) + res2 = layer_warp(block_func, res1, 128, stages[1], 2) + res3 = layer_warp(block_func, res2, 256, stages[2], 2) + res4 = layer_warp(block_func, res3, 512, stages[3], 2) pool2 = paddle.layer.img_pool( input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg()) out = paddle.layer.fc(
{"golden_diff": "diff --git a/image_classification/resnet.py b/image_classification/resnet.py\n--- a/image_classification/resnet.py\n+++ b/image_classification/resnet.py\n@@ -22,24 +22,24 @@\n return paddle.layer.batch_norm(input=tmp, act=active_type)\n \n \n-def shortcut(input, ch_in, ch_out, stride):\n- if ch_in != ch_out:\n+def shortcut(input, ch_out, stride):\n+ if input.num_filters != ch_out:\n return conv_bn_layer(input, ch_out, 1, stride, 0,\n paddle.activation.Linear())\n else:\n return input\n \n \n-def basicblock(input, ch_in, ch_out, stride):\n- short = shortcut(input, ch_in, ch_out, stride)\n+def basicblock(input, ch_out, stride):\n+ short = shortcut(input, ch_out, stride)\n conv1 = conv_bn_layer(input, ch_out, 3, stride, 1)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear())\n return paddle.layer.addto(\n input=[short, conv2], act=paddle.activation.Relu())\n \n \n-def bottleneck(input, ch_in, ch_out, stride):\n- short = shortcut(input, ch_in, ch_out * 4, stride)\n+def bottleneck(input, ch_out, stride):\n+ short = shortcut(input, ch_out * 4, stride)\n conv1 = conv_bn_layer(input, ch_out, 1, stride, 0)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1)\n conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0,\n@@ -48,10 +48,10 @@\n input=[short, conv3], act=paddle.activation.Relu())\n \n \n-def layer_warp(block_func, input, ch_in, ch_out, count, stride):\n- conv = block_func(input, ch_in, ch_out, stride)\n+def layer_warp(block_func, input, ch_out, count, stride):\n+ conv = block_func(input, ch_out, stride)\n for i in range(1, count):\n- conv = block_func(conv, ch_out, ch_out, 1)\n+ conv = block_func(conv, ch_out, 1)\n return conv\n \n \n@@ -67,10 +67,10 @@\n conv1 = conv_bn_layer(\n input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3)\n pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2)\n- res1 = layer_warp(block_func, pool1, 64, 64, stages[0], 1)\n- res2 = layer_warp(block_func, res1, 64, 128, stages[1], 2)\n- res3 = layer_warp(block_func, res2, 128, 256, stages[2], 2)\n- res4 = layer_warp(block_func, res3, 256, 512, stages[3], 2)\n+ res1 = layer_warp(block_func, pool1, 64, stages[0], 1)\n+ res2 = layer_warp(block_func, res1, 128, stages[1], 2)\n+ res3 = layer_warp(block_func, res2, 256, stages[2], 2)\n+ res4 = layer_warp(block_func, res3, 512, stages[3], 2)\n pool2 = paddle.layer.img_pool(\n input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg())\n out = paddle.layer.fc(\n", "issue": "resnet\u6a21\u578b\u914d\u7f6e\u7684\u95ee\u9898\n\u76ee\u524dresnet\u7684\u914d\u7f6e\u6709\u4e00\u4e9b\u95ee\u9898\uff0c\u53ef\u89c1 https://github.com/PaddlePaddle/models/issues/308#issuecomment-331384031\n", "before_files": [{"content": "import paddle.v2 as paddle\n\n__all__ = ['resnet_imagenet', 'resnet_cifar10']\n\n\ndef conv_bn_layer(input,\n ch_out,\n filter_size,\n stride,\n padding,\n active_type=paddle.activation.Relu(),\n ch_in=None):\n tmp = paddle.layer.img_conv(\n input=input,\n filter_size=filter_size,\n num_channels=ch_in,\n num_filters=ch_out,\n stride=stride,\n padding=padding,\n act=paddle.activation.Linear(),\n bias_attr=False)\n return paddle.layer.batch_norm(input=tmp, act=active_type)\n\n\ndef shortcut(input, ch_in, ch_out, stride):\n if ch_in != ch_out:\n return conv_bn_layer(input, ch_out, 1, stride, 0,\n paddle.activation.Linear())\n else:\n return input\n\n\ndef basicblock(input, ch_in, ch_out, stride):\n short = shortcut(input, ch_in, ch_out, stride)\n conv1 = conv_bn_layer(input, ch_out, 3, stride, 1)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear())\n return paddle.layer.addto(\n input=[short, conv2], act=paddle.activation.Relu())\n\n\ndef bottleneck(input, ch_in, ch_out, stride):\n short = shortcut(input, ch_in, ch_out * 4, stride)\n conv1 = conv_bn_layer(input, ch_out, 1, stride, 0)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1)\n conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0,\n paddle.activation.Linear())\n return paddle.layer.addto(\n input=[short, conv3], act=paddle.activation.Relu())\n\n\ndef layer_warp(block_func, input, ch_in, ch_out, count, stride):\n conv = block_func(input, ch_in, ch_out, stride)\n for i in range(1, count):\n conv = block_func(conv, ch_out, ch_out, 1)\n return conv\n\n\ndef resnet_imagenet(input, class_dim, depth=50):\n cfg = {\n 18: ([2, 2, 2, 1], basicblock),\n 34: ([3, 4, 6, 3], basicblock),\n 50: ([3, 4, 6, 3], bottleneck),\n 101: ([3, 4, 23, 3], bottleneck),\n 152: ([3, 8, 36, 3], bottleneck)\n }\n stages, block_func = cfg[depth]\n conv1 = conv_bn_layer(\n input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3)\n pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2)\n res1 = layer_warp(block_func, pool1, 64, 64, stages[0], 1)\n res2 = layer_warp(block_func, res1, 64, 128, stages[1], 2)\n res3 = layer_warp(block_func, res2, 128, 256, stages[2], 2)\n res4 = layer_warp(block_func, res3, 256, 512, stages[3], 2)\n pool2 = paddle.layer.img_pool(\n input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg())\n out = paddle.layer.fc(\n input=pool2, size=class_dim, act=paddle.activation.Softmax())\n return out\n\n\ndef resnet_cifar10(input, class_dim, depth=32):\n # depth should be one of 20, 32, 44, 56, 110, 1202\n assert (depth - 2) % 6 == 0\n n = (depth - 2) / 6\n nStages = {16, 64, 128}\n conv1 = conv_bn_layer(\n input, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1)\n res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)\n res2 = layer_warp(basicblock, res1, 16, 32, n, 2)\n res3 = layer_warp(basicblock, res2, 32, 64, n, 2)\n pool = paddle.layer.img_pool(\n input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg())\n out = paddle.layer.fc(\n input=pool, size=class_dim, act=paddle.activation.Softmax())\n return out\n", "path": "image_classification/resnet.py"}], "after_files": [{"content": "import paddle.v2 as paddle\n\n__all__ = ['resnet_imagenet', 'resnet_cifar10']\n\n\ndef conv_bn_layer(input,\n ch_out,\n filter_size,\n stride,\n padding,\n active_type=paddle.activation.Relu(),\n ch_in=None):\n tmp = paddle.layer.img_conv(\n input=input,\n filter_size=filter_size,\n num_channels=ch_in,\n num_filters=ch_out,\n stride=stride,\n padding=padding,\n act=paddle.activation.Linear(),\n bias_attr=False)\n return paddle.layer.batch_norm(input=tmp, act=active_type)\n\n\ndef shortcut(input, ch_out, stride):\n if input.num_filters != ch_out:\n return conv_bn_layer(input, ch_out, 1, stride, 0,\n paddle.activation.Linear())\n else:\n return input\n\n\ndef basicblock(input, ch_out, stride):\n short = shortcut(input, ch_out, stride)\n conv1 = conv_bn_layer(input, ch_out, 3, stride, 1)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1, paddle.activation.Linear())\n return paddle.layer.addto(\n input=[short, conv2], act=paddle.activation.Relu())\n\n\ndef bottleneck(input, ch_out, stride):\n short = shortcut(input, ch_out * 4, stride)\n conv1 = conv_bn_layer(input, ch_out, 1, stride, 0)\n conv2 = conv_bn_layer(conv1, ch_out, 3, 1, 1)\n conv3 = conv_bn_layer(conv2, ch_out * 4, 1, 1, 0,\n paddle.activation.Linear())\n return paddle.layer.addto(\n input=[short, conv3], act=paddle.activation.Relu())\n\n\ndef layer_warp(block_func, input, ch_out, count, stride):\n conv = block_func(input, ch_out, stride)\n for i in range(1, count):\n conv = block_func(conv, ch_out, 1)\n return conv\n\n\ndef resnet_imagenet(input, class_dim, depth=50):\n cfg = {\n 18: ([2, 2, 2, 1], basicblock),\n 34: ([3, 4, 6, 3], basicblock),\n 50: ([3, 4, 6, 3], bottleneck),\n 101: ([3, 4, 23, 3], bottleneck),\n 152: ([3, 8, 36, 3], bottleneck)\n }\n stages, block_func = cfg[depth]\n conv1 = conv_bn_layer(\n input, ch_in=3, ch_out=64, filter_size=7, stride=2, padding=3)\n pool1 = paddle.layer.img_pool(input=conv1, pool_size=3, stride=2)\n res1 = layer_warp(block_func, pool1, 64, stages[0], 1)\n res2 = layer_warp(block_func, res1, 128, stages[1], 2)\n res3 = layer_warp(block_func, res2, 256, stages[2], 2)\n res4 = layer_warp(block_func, res3, 512, stages[3], 2)\n pool2 = paddle.layer.img_pool(\n input=res4, pool_size=7, stride=1, pool_type=paddle.pooling.Avg())\n out = paddle.layer.fc(\n input=pool2, size=class_dim, act=paddle.activation.Softmax())\n return out\n\n\ndef resnet_cifar10(input, class_dim, depth=32):\n # depth should be one of 20, 32, 44, 56, 110, 1202\n assert (depth - 2) % 6 == 0\n n = (depth - 2) / 6\n nStages = {16, 64, 128}\n conv1 = conv_bn_layer(\n input, ch_in=3, ch_out=16, filter_size=3, stride=1, padding=1)\n res1 = layer_warp(basicblock, conv1, 16, 16, n, 1)\n res2 = layer_warp(basicblock, res1, 16, 32, n, 2)\n res3 = layer_warp(basicblock, res2, 32, 64, n, 2)\n pool = paddle.layer.img_pool(\n input=res3, pool_size=8, stride=1, pool_type=paddle.pooling.Avg())\n out = paddle.layer.fc(\n input=pool, size=class_dim, act=paddle.activation.Softmax())\n return out\n", "path": "image_classification/resnet.py"}]}
1,603
850
gh_patches_debug_30359
rasdani/github-patches
git_diff
apluslms__a-plus-1293
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Course staff may create duplicate student groups Course staff may create student groups (course/models.py class StudentGroup) that contain exactly the same group members as an existing group. Duplicate groups should not be allowed. The course staff UI for editing groups is in the URL http://localhost:8000/def/current/teachers/groups/ (in the left navigation menu, it is the "Groups" link under the heading Course staff). Course staff may also create new groups (or edit existing groups) that are empty (no members) or only have one member. Groups should always have at least two members. When students create groups in the "form a group" page (with user personal codes), A+ already prevents empty and duplicate groups. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `course/forms.py` Content: ``` 1 from typing import Any 2 3 from django import forms 4 from django.contrib.humanize.templatetags.humanize import ordinal 5 from django.utils.safestring import mark_safe 6 from django.utils.text import format_lazy 7 from django.utils.translation import gettext_lazy as _ 8 9 from aplus.api import api_reverse 10 from exercise.models import SubmissionDraft 11 from lib.fields import UsersSearchSelectField 12 from .models import Enrollment, StudentGroup 13 from userprofile.models import UserProfile 14 15 16 class GroupsForm(forms.Form): 17 18 def __init__(self, *args, **kwargs): 19 self.profile = kwargs.pop('profile') 20 self.instance = kwargs.pop('instance') 21 self.content = kwargs.pop('content') 22 super().__init__(*args, **kwargs) 23 total = self.content.total() 24 min_size = max(total.min_group_size, 2) 25 max_size = total.max_group_size 26 27 for n in range(2, max_size + 1): 28 widget = forms.TextInput(attrs={'class':'form-control'}) 29 field = forms.CharField(widget=widget, required=(n <= min_size)) 30 field.label = mark_safe(format_lazy(_('GROUP_MEMBER_LABEL -- {num}'), num=ordinal(n))) 31 self.fields['member{:d}'.format(n)] = field 32 33 def clean(self): 34 super().clean() 35 36 self.member_profiles = [self.profile] 37 for key in self.fields.keys(): 38 if key in self.cleaned_data and self.cleaned_data[key]: 39 enrollment = Enrollment.objects.filter( 40 course_instance=self.instance, 41 personal_code=self.cleaned_data[key].upper() 42 ).first() 43 if not enrollment: 44 self.add_error(key, _('ERROR_CODE_NOT_RECOGNIZED')) 45 elif enrollment.user_profile in self.member_profiles: 46 self.add_error(key, _('ERROR_USER_ALREADY_IN_GROUP')) 47 else: 48 self.member_profiles.append(enrollment.user_profile) 49 50 if not self.errors and len(self.member_profiles) > 1: 51 if StudentGroup.get_exact(self.instance, self.member_profiles): 52 self.add_error(None, _('ERROR_GROUP_ALREADY_EXISTS')) 53 54 return self.cleaned_data 55 56 def save(self): 57 group = StudentGroup(course_instance=self.instance) 58 group.save() 59 group.members.add(*self.member_profiles) 60 return group 61 62 63 class GroupSelectForm(forms.Form): 64 group = forms.IntegerField(required=True) 65 66 def __init__(self, *args, **kwargs): 67 self.profile = kwargs.pop('profile') 68 self.instance = kwargs.pop('instance') 69 super().__init__(*args, **kwargs) 70 71 def clean(self): 72 super().clean() 73 self.selected_group = None 74 if 'group' in self.cleaned_data: 75 gid = self.cleaned_data['group'] 76 if gid != 0: 77 group = self.profile.groups.filter(id=gid, course_instance=self.instance).first() 78 if group: 79 self.selected_group = group 80 else: 81 self.add_error('group', 'Invalid group id') 82 return self.cleaned_data 83 84 def save(self) -> Enrollment: 85 enrollment = self.instance.get_enrollment_for(self.profile.user) 86 enrollment.selected_group = self.selected_group 87 enrollment.save() 88 # Deactivate all drafts when changing groups. 89 SubmissionDraft.objects.filter( 90 exercise__course_module__course_instance=self.instance, 91 submitter=self.profile, 92 active=True, 93 ).update(active=False) 94 return enrollment 95 96 97 class GroupEditForm(forms.ModelForm): 98 99 members = UsersSearchSelectField(queryset=UserProfile.objects.none(), 100 initial_queryset=UserProfile.objects.none(), 101 label=_('LABEL_MEMBERS'), 102 ) 103 104 def __init__(self, *args: Any, **kwargs: Any) -> None: 105 course_instance = kwargs.get('instance').course_instance 106 super().__init__(*args, **kwargs) 107 self.fields['members'].widget.search_api_url = api_reverse( 108 "course-students-list", 109 kwargs={'course_id': course_instance.id}, 110 ) 111 self.fields["members"].queryset = course_instance.get_student_profiles() 112 # Course staff may use this form for modifying and creating student groups. 113 # If an existing group is being modified, its current members must be 114 # set to the initial queryset. 115 if self.instance.id: 116 self.fields["members"].initial_queryset = self.instance.members.all() 117 118 class Meta: 119 model = StudentGroup 120 fields = ['members'] 121 122 123 class EnrollStudentsForm(forms.Form): 124 125 user_profiles = UsersSearchSelectField(queryset=UserProfile.objects.all(), 126 initial_queryset=UserProfile.objects.none(), 127 label=_('LABEL_USERS'), 128 required=False, 129 ) 130 131 def __init__(self, *args: Any, **kwargs: Any) -> None: 132 self.instance = kwargs.pop('instance') 133 super().__init__(*args, **kwargs) 134 self.fields['user_profiles'].widget.search_api_url = api_reverse("user-list") 135 if self.instance.sis_id: 136 self.fields['sis'] = forms.BooleanField( 137 required=False, 138 label=_('LABEL_ENROLL_FROM_SIS'), 139 ) 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/course/forms.py b/course/forms.py --- a/course/forms.py +++ b/course/forms.py @@ -5,6 +5,7 @@ from django.utils.safestring import mark_safe from django.utils.text import format_lazy from django.utils.translation import gettext_lazy as _ +from django.db.models import Count from aplus.api import api_reverse from exercise.models import SubmissionDraft @@ -115,6 +116,30 @@ if self.instance.id: self.fields["members"].initial_queryset = self.instance.members.all() + def clean(self): + super().clean() + members = self.cleaned_data.get('members') + if members: + if len(members) == 1: + self.add_error('members', _('MUST_HAVE_TWO_MEMBERS')) + course_instance = self.instance.course_instance + # Filter all groups with course instance and that have one or more similar members as in the members list + filtered_groups = StudentGroup.objects.filter(course_instance=course_instance, members__in=members) + # Count number of members in each group + groups_with_member_count = filtered_groups.annotate(member_count=Count('members')) + # Filter only those groups that have same number of members + groups_with_exact_member_count = groups_with_member_count.filter(member_count=len(members)) + # Loop through the returned groups and check if any group with exact same members exist + group_exists = False + for group in groups_with_exact_member_count: + group_members = group.members.all() + if list(group_members) == list(members): + group_exists = True + if group_exists: + self.add_error('members', _('ERROR_GROUP_ALREADY_EXISTS')) + return self.cleaned_data + + class Meta: model = StudentGroup fields = ['members']
{"golden_diff": "diff --git a/course/forms.py b/course/forms.py\n--- a/course/forms.py\n+++ b/course/forms.py\n@@ -5,6 +5,7 @@\n from django.utils.safestring import mark_safe\n from django.utils.text import format_lazy\n from django.utils.translation import gettext_lazy as _\n+from django.db.models import Count\n \n from aplus.api import api_reverse\n from exercise.models import SubmissionDraft\n@@ -115,6 +116,30 @@\n if self.instance.id:\n self.fields[\"members\"].initial_queryset = self.instance.members.all()\n \n+ def clean(self):\n+ super().clean()\n+ members = self.cleaned_data.get('members')\n+ if members:\n+ if len(members) == 1:\n+ self.add_error('members', _('MUST_HAVE_TWO_MEMBERS'))\n+ course_instance = self.instance.course_instance\n+ # Filter all groups with course instance and that have one or more similar members as in the members list\n+ filtered_groups = StudentGroup.objects.filter(course_instance=course_instance, members__in=members)\n+ # Count number of members in each group\n+ groups_with_member_count = filtered_groups.annotate(member_count=Count('members'))\n+ # Filter only those groups that have same number of members\n+ groups_with_exact_member_count = groups_with_member_count.filter(member_count=len(members))\n+ # Loop through the returned groups and check if any group with exact same members exist\n+ group_exists = False\n+ for group in groups_with_exact_member_count:\n+ group_members = group.members.all()\n+ if list(group_members) == list(members):\n+ group_exists = True\n+ if group_exists:\n+ self.add_error('members', _('ERROR_GROUP_ALREADY_EXISTS'))\n+ return self.cleaned_data\n+\n+\n class Meta:\n model = StudentGroup\n fields = ['members']\n", "issue": "Course staff may create duplicate student groups\nCourse staff may create student groups (course/models.py class StudentGroup) that contain exactly the same group members as an existing group. Duplicate groups should not be allowed. The course staff UI for editing groups is in the URL http://localhost:8000/def/current/teachers/groups/ (in the left navigation menu, it is the \"Groups\" link under the heading Course staff).\r\n\r\nCourse staff may also create new groups (or edit existing groups) that are empty (no members) or only have one member. Groups should always have at least two members.\r\n\r\nWhen students create groups in the \"form a group\" page (with user personal codes), A+ already prevents empty and duplicate groups.\n", "before_files": [{"content": "from typing import Any\n\nfrom django import forms\nfrom django.contrib.humanize.templatetags.humanize import ordinal\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\n\nfrom aplus.api import api_reverse\nfrom exercise.models import SubmissionDraft\nfrom lib.fields import UsersSearchSelectField\nfrom .models import Enrollment, StudentGroup\nfrom userprofile.models import UserProfile\n\n\nclass GroupsForm(forms.Form):\n\n def __init__(self, *args, **kwargs):\n self.profile = kwargs.pop('profile')\n self.instance = kwargs.pop('instance')\n self.content = kwargs.pop('content')\n super().__init__(*args, **kwargs)\n total = self.content.total()\n min_size = max(total.min_group_size, 2)\n max_size = total.max_group_size\n\n for n in range(2, max_size + 1):\n widget = forms.TextInput(attrs={'class':'form-control'})\n field = forms.CharField(widget=widget, required=(n <= min_size))\n field.label = mark_safe(format_lazy(_('GROUP_MEMBER_LABEL -- {num}'), num=ordinal(n)))\n self.fields['member{:d}'.format(n)] = field\n\n def clean(self):\n super().clean()\n\n self.member_profiles = [self.profile]\n for key in self.fields.keys():\n if key in self.cleaned_data and self.cleaned_data[key]:\n enrollment = Enrollment.objects.filter(\n course_instance=self.instance,\n personal_code=self.cleaned_data[key].upper()\n ).first()\n if not enrollment:\n self.add_error(key, _('ERROR_CODE_NOT_RECOGNIZED'))\n elif enrollment.user_profile in self.member_profiles:\n self.add_error(key, _('ERROR_USER_ALREADY_IN_GROUP'))\n else:\n self.member_profiles.append(enrollment.user_profile)\n\n if not self.errors and len(self.member_profiles) > 1:\n if StudentGroup.get_exact(self.instance, self.member_profiles):\n self.add_error(None, _('ERROR_GROUP_ALREADY_EXISTS'))\n\n return self.cleaned_data\n\n def save(self):\n group = StudentGroup(course_instance=self.instance)\n group.save()\n group.members.add(*self.member_profiles)\n return group\n\n\nclass GroupSelectForm(forms.Form):\n group = forms.IntegerField(required=True)\n\n def __init__(self, *args, **kwargs):\n self.profile = kwargs.pop('profile')\n self.instance = kwargs.pop('instance')\n super().__init__(*args, **kwargs)\n\n def clean(self):\n super().clean()\n self.selected_group = None\n if 'group' in self.cleaned_data:\n gid = self.cleaned_data['group']\n if gid != 0:\n group = self.profile.groups.filter(id=gid, course_instance=self.instance).first()\n if group:\n self.selected_group = group\n else:\n self.add_error('group', 'Invalid group id')\n return self.cleaned_data\n\n def save(self) -> Enrollment:\n enrollment = self.instance.get_enrollment_for(self.profile.user)\n enrollment.selected_group = self.selected_group\n enrollment.save()\n # Deactivate all drafts when changing groups.\n SubmissionDraft.objects.filter(\n exercise__course_module__course_instance=self.instance,\n submitter=self.profile,\n active=True,\n ).update(active=False)\n return enrollment\n\n\nclass GroupEditForm(forms.ModelForm):\n\n members = UsersSearchSelectField(queryset=UserProfile.objects.none(),\n initial_queryset=UserProfile.objects.none(),\n label=_('LABEL_MEMBERS'),\n )\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n course_instance = kwargs.get('instance').course_instance\n super().__init__(*args, **kwargs)\n self.fields['members'].widget.search_api_url = api_reverse(\n \"course-students-list\",\n kwargs={'course_id': course_instance.id},\n )\n self.fields[\"members\"].queryset = course_instance.get_student_profiles()\n # Course staff may use this form for modifying and creating student groups.\n # If an existing group is being modified, its current members must be\n # set to the initial queryset.\n if self.instance.id:\n self.fields[\"members\"].initial_queryset = self.instance.members.all()\n\n class Meta:\n model = StudentGroup\n fields = ['members']\n\n\nclass EnrollStudentsForm(forms.Form):\n\n user_profiles = UsersSearchSelectField(queryset=UserProfile.objects.all(),\n initial_queryset=UserProfile.objects.none(),\n label=_('LABEL_USERS'),\n required=False,\n )\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n self.instance = kwargs.pop('instance')\n super().__init__(*args, **kwargs)\n self.fields['user_profiles'].widget.search_api_url = api_reverse(\"user-list\")\n if self.instance.sis_id:\n self.fields['sis'] = forms.BooleanField(\n required=False,\n label=_('LABEL_ENROLL_FROM_SIS'),\n )\n", "path": "course/forms.py"}], "after_files": [{"content": "from typing import Any\n\nfrom django import forms\nfrom django.contrib.humanize.templatetags.humanize import ordinal\nfrom django.utils.safestring import mark_safe\nfrom django.utils.text import format_lazy\nfrom django.utils.translation import gettext_lazy as _\nfrom django.db.models import Count\n\nfrom aplus.api import api_reverse\nfrom exercise.models import SubmissionDraft\nfrom lib.fields import UsersSearchSelectField\nfrom .models import Enrollment, StudentGroup\nfrom userprofile.models import UserProfile\n\n\nclass GroupsForm(forms.Form):\n\n def __init__(self, *args, **kwargs):\n self.profile = kwargs.pop('profile')\n self.instance = kwargs.pop('instance')\n self.content = kwargs.pop('content')\n super().__init__(*args, **kwargs)\n total = self.content.total()\n min_size = max(total.min_group_size, 2)\n max_size = total.max_group_size\n\n for n in range(2, max_size + 1):\n widget = forms.TextInput(attrs={'class':'form-control'})\n field = forms.CharField(widget=widget, required=(n <= min_size))\n field.label = mark_safe(format_lazy(_('GROUP_MEMBER_LABEL -- {num}'), num=ordinal(n)))\n self.fields['member{:d}'.format(n)] = field\n\n def clean(self):\n super().clean()\n\n self.member_profiles = [self.profile]\n for key in self.fields.keys():\n if key in self.cleaned_data and self.cleaned_data[key]:\n enrollment = Enrollment.objects.filter(\n course_instance=self.instance,\n personal_code=self.cleaned_data[key].upper()\n ).first()\n if not enrollment:\n self.add_error(key, _('ERROR_CODE_NOT_RECOGNIZED'))\n elif enrollment.user_profile in self.member_profiles:\n self.add_error(key, _('ERROR_USER_ALREADY_IN_GROUP'))\n else:\n self.member_profiles.append(enrollment.user_profile)\n\n if not self.errors and len(self.member_profiles) > 1:\n if StudentGroup.get_exact(self.instance, self.member_profiles):\n self.add_error(None, _('ERROR_GROUP_ALREADY_EXISTS'))\n\n return self.cleaned_data\n\n def save(self):\n group = StudentGroup(course_instance=self.instance)\n group.save()\n group.members.add(*self.member_profiles)\n return group\n\n\nclass GroupSelectForm(forms.Form):\n group = forms.IntegerField(required=True)\n\n def __init__(self, *args, **kwargs):\n self.profile = kwargs.pop('profile')\n self.instance = kwargs.pop('instance')\n super().__init__(*args, **kwargs)\n\n def clean(self):\n super().clean()\n self.selected_group = None\n if 'group' in self.cleaned_data:\n gid = self.cleaned_data['group']\n if gid != 0:\n group = self.profile.groups.filter(id=gid, course_instance=self.instance).first()\n if group:\n self.selected_group = group\n else:\n self.add_error('group', 'Invalid group id')\n return self.cleaned_data\n\n def save(self) -> Enrollment:\n enrollment = self.instance.get_enrollment_for(self.profile.user)\n enrollment.selected_group = self.selected_group\n enrollment.save()\n # Deactivate all drafts when changing groups.\n SubmissionDraft.objects.filter(\n exercise__course_module__course_instance=self.instance,\n submitter=self.profile,\n active=True,\n ).update(active=False)\n return enrollment\n\n\nclass GroupEditForm(forms.ModelForm):\n\n members = UsersSearchSelectField(queryset=UserProfile.objects.none(),\n initial_queryset=UserProfile.objects.none(),\n label=_('LABEL_MEMBERS'),\n )\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n course_instance = kwargs.get('instance').course_instance\n super().__init__(*args, **kwargs)\n self.fields['members'].widget.search_api_url = api_reverse(\n \"course-students-list\",\n kwargs={'course_id': course_instance.id},\n )\n self.fields[\"members\"].queryset = course_instance.get_student_profiles()\n # Course staff may use this form for modifying and creating student groups.\n # If an existing group is being modified, its current members must be\n # set to the initial queryset.\n if self.instance.id:\n self.fields[\"members\"].initial_queryset = self.instance.members.all()\n\n def clean(self):\n super().clean()\n members = self.cleaned_data.get('members')\n if members:\n if len(members) == 1:\n self.add_error('members', _('MUST_HAVE_TWO_MEMBERS'))\n course_instance = self.instance.course_instance\n # Filter all groups with course instance and that have one or more similar members as in the members list\n filtered_groups = StudentGroup.objects.filter(course_instance=course_instance, members__in=members)\n # Count number of members in each group\n groups_with_member_count = filtered_groups.annotate(member_count=Count('members'))\n # Filter only those groups that have same number of members\n groups_with_exact_member_count = groups_with_member_count.filter(member_count=len(members))\n # Loop through the returned groups and check if any group with exact same members exist\n group_exists = False\n for group in groups_with_exact_member_count:\n group_members = group.members.all()\n if list(group_members) == list(members):\n group_exists = True\n if group_exists:\n self.add_error('members', _('ERROR_GROUP_ALREADY_EXISTS'))\n return self.cleaned_data\n\n\n class Meta:\n model = StudentGroup\n fields = ['members']\n\n\nclass EnrollStudentsForm(forms.Form):\n\n user_profiles = UsersSearchSelectField(queryset=UserProfile.objects.all(),\n initial_queryset=UserProfile.objects.none(),\n label=_('LABEL_USERS'),\n required=False,\n )\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n self.instance = kwargs.pop('instance')\n super().__init__(*args, **kwargs)\n self.fields['user_profiles'].widget.search_api_url = api_reverse(\"user-list\")\n if self.instance.sis_id:\n self.fields['sis'] = forms.BooleanField(\n required=False,\n label=_('LABEL_ENROLL_FROM_SIS'),\n )\n", "path": "course/forms.py"}]}
1,768
399
gh_patches_debug_24936
rasdani/github-patches
git_diff
DDMAL__CantusDB-733
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Links to unpublished sources should not appear on Provenance detail pages Example: visit http://206.12.93.196/provenance/3665 (while logged out), click on first link. We get a 403 Forbidden error, since the source is unpublished. Unpublished sources should not be listed on the Provenance Detail page. Credit to @zhannaklimanova and her link checker script for catching this bug! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `django/cantusdb_project/main_app/views/provenance.py` Content: ``` 1 from django.views.generic import DetailView 2 from main_app.models import Provenance 3 4 5 class ProvenanceDetailView(DetailView): 6 model = Provenance 7 context_object_name = "provenance" 8 template_name = "provenance_detail.html" 9 ``` Path: `django/cantusdb_project/main_app/views/century.py` Content: ``` 1 from django.views.generic import DetailView 2 from main_app.models import Century, Source 3 from typing import Any 4 5 6 class CenturyDetailView(DetailView): 7 model = Century 8 context_object_name = "century" 9 template_name = "century_detail.html" 10 11 def get_context_data(self, **kwargs: Any) -> dict[str, Any]: 12 context = super().get_context_data(**kwargs) 13 century = self.get_object() 14 user = self.request.user 15 display_unpublished = user.is_authenticated 16 sources = Source.objects.filter(century=century) 17 if not display_unpublished: 18 sources = sources.filter(published=True) 19 sources = sources.only("title", "id") 20 context["sources"] = sources 21 return context 22 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py --- a/django/cantusdb_project/main_app/views/century.py +++ b/django/cantusdb_project/main_app/views/century.py @@ -16,6 +16,6 @@ sources = Source.objects.filter(century=century) if not display_unpublished: sources = sources.filter(published=True) - sources = sources.only("title", "id") + sources = sources.only("title", "id", "siglum") context["sources"] = sources return context diff --git a/django/cantusdb_project/main_app/views/provenance.py b/django/cantusdb_project/main_app/views/provenance.py --- a/django/cantusdb_project/main_app/views/provenance.py +++ b/django/cantusdb_project/main_app/views/provenance.py @@ -1,8 +1,21 @@ from django.views.generic import DetailView -from main_app.models import Provenance +from main_app.models import Provenance, Source +from typing import Any class ProvenanceDetailView(DetailView): model = Provenance context_object_name = "provenance" template_name = "provenance_detail.html" + + def get_context_data(self, **kwargs: Any) -> dict[str, Any]: + context = super().get_context_data(**kwargs) + provenance = self.get_object() + user = self.request.user + display_unpublished = user.is_authenticated + sources = Source.objects.filter(provenance=provenance) + if not display_unpublished: + sources = sources.filter(published=True) + sources = sources.only("title", "id", "siglum") + context["sources"] = sources + return context
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/views/century.py b/django/cantusdb_project/main_app/views/century.py\n--- a/django/cantusdb_project/main_app/views/century.py\n+++ b/django/cantusdb_project/main_app/views/century.py\n@@ -16,6 +16,6 @@\n sources = Source.objects.filter(century=century)\n if not display_unpublished:\n sources = sources.filter(published=True)\n- sources = sources.only(\"title\", \"id\")\n+ sources = sources.only(\"title\", \"id\", \"siglum\")\n context[\"sources\"] = sources\n return context\ndiff --git a/django/cantusdb_project/main_app/views/provenance.py b/django/cantusdb_project/main_app/views/provenance.py\n--- a/django/cantusdb_project/main_app/views/provenance.py\n+++ b/django/cantusdb_project/main_app/views/provenance.py\n@@ -1,8 +1,21 @@\n from django.views.generic import DetailView\n-from main_app.models import Provenance\n+from main_app.models import Provenance, Source\n+from typing import Any\n \n \n class ProvenanceDetailView(DetailView):\n model = Provenance\n context_object_name = \"provenance\"\n template_name = \"provenance_detail.html\"\n+\n+ def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n+ context = super().get_context_data(**kwargs)\n+ provenance = self.get_object()\n+ user = self.request.user\n+ display_unpublished = user.is_authenticated\n+ sources = Source.objects.filter(provenance=provenance)\n+ if not display_unpublished:\n+ sources = sources.filter(published=True)\n+ sources = sources.only(\"title\", \"id\", \"siglum\")\n+ context[\"sources\"] = sources\n+ return context\n", "issue": "Links to unpublished sources should not appear on Provenance detail pages\nExample: visit http://206.12.93.196/provenance/3665 (while logged out), click on first link. We get a 403 Forbidden error, since the source is unpublished.\r\n\r\nUnpublished sources should not be listed on the Provenance Detail page.\r\n\r\nCredit to @zhannaklimanova and her link checker script for catching this bug!\n", "before_files": [{"content": "from django.views.generic import DetailView\nfrom main_app.models import Provenance\n\n\nclass ProvenanceDetailView(DetailView):\n model = Provenance\n context_object_name = \"provenance\"\n template_name = \"provenance_detail.html\"\n", "path": "django/cantusdb_project/main_app/views/provenance.py"}, {"content": "from django.views.generic import DetailView\nfrom main_app.models import Century, Source\nfrom typing import Any\n\n\nclass CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n\n def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n context = super().get_context_data(**kwargs)\n century = self.get_object()\n user = self.request.user\n display_unpublished = user.is_authenticated\n sources = Source.objects.filter(century=century)\n if not display_unpublished:\n sources = sources.filter(published=True)\n sources = sources.only(\"title\", \"id\")\n context[\"sources\"] = sources\n return context\n", "path": "django/cantusdb_project/main_app/views/century.py"}], "after_files": [{"content": "from django.views.generic import DetailView\nfrom main_app.models import Provenance, Source\nfrom typing import Any\n\n\nclass ProvenanceDetailView(DetailView):\n model = Provenance\n context_object_name = \"provenance\"\n template_name = \"provenance_detail.html\"\n\n def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n context = super().get_context_data(**kwargs)\n provenance = self.get_object()\n user = self.request.user\n display_unpublished = user.is_authenticated\n sources = Source.objects.filter(provenance=provenance)\n if not display_unpublished:\n sources = sources.filter(published=True)\n sources = sources.only(\"title\", \"id\", \"siglum\")\n context[\"sources\"] = sources\n return context\n", "path": "django/cantusdb_project/main_app/views/provenance.py"}, {"content": "from django.views.generic import DetailView\nfrom main_app.models import Century, Source\nfrom typing import Any\n\n\nclass CenturyDetailView(DetailView):\n model = Century\n context_object_name = \"century\"\n template_name = \"century_detail.html\"\n\n def get_context_data(self, **kwargs: Any) -> dict[str, Any]:\n context = super().get_context_data(**kwargs)\n century = self.get_object()\n user = self.request.user\n display_unpublished = user.is_authenticated\n sources = Source.objects.filter(century=century)\n if not display_unpublished:\n sources = sources.filter(published=True)\n sources = sources.only(\"title\", \"id\", \"siglum\")\n context[\"sources\"] = sources\n return context\n", "path": "django/cantusdb_project/main_app/views/century.py"}]}
650
425
gh_patches_debug_39481
rasdani/github-patches
git_diff
alltheplaces__alltheplaces-3317
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Spider lowes is broken During the global build at 2021-06-02-14-42-40, spider **lowes** failed with **0 features** and **0 errors**. Here's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/lowes.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lowes.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lowes.geojson)) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `locations/spiders/lowes.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 import scrapy 3 import re 4 import json 5 from locations.items import GeojsonPointItem 6 from locations.hours import OpeningHours 7 8 9 day_mapping = {'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th', 'Friday': 'Fr', 'Saturday': 'Sa', 10 'Sunday': 'Su'} 11 12 13 class LowesSpider(scrapy.Spider): 14 """"This spider scrapes Lowes retail store locations""" 15 name = "lowes" 16 item_attributes = { 'brand': "Lowe's", 'brand_wikidata': "Q1373493" } 17 allowed_domains = ["lowes.com"] 18 start_urls = ('https://www.lowes.com/Lowes-Stores',) 19 download_delay = 0.5 20 21 custom_settings = { 22 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36', 23 } 24 25 def parse_hours(self, store_hours): 26 opening_hours = OpeningHours() 27 28 for weekday in store_hours: 29 day = weekday.get('day').get('day') 30 open_time = weekday.get('day').get('open') 31 hour, minute, sec = open_time.split('.') 32 open_time_formatted = hour + ':' + minute 33 34 close = weekday.get('day').get('close') 35 hour, minute, sec = close.split('.') 36 close_time_formatted = hour + ':' + minute 37 38 if close_time_formatted in {'00:00', '24:00'}: 39 close_time_formatted = "23:59" 40 41 opening_hours.add_range(day=day_mapping[day], 42 open_time=open_time_formatted, 43 close_time=close_time_formatted) 44 45 return opening_hours.as_opening_hours() 46 47 def parse_store(self, response): 48 ref = re.search(r'.+/(.+)', response.url).group(1) 49 50 script_content = response.xpath('//script[contains(text(),"storeHours")]/text()').extract_first() 51 if not script_content: 52 return 53 54 # effectively strip off leading "window.__PRELOADED_STATE__ = " where 55 # the rest is a json blob 56 script_data = script_content.split(" = ", 1)[-1] 57 json_data = json.loads(script_data) 58 store_hours = json_data.get('storeHours') 59 60 state_texts = response.xpath('//span[@itemprop="addressRegion"]/text()').extract() 61 properties = { 62 'lat': float(json_data['storeDetails']['lat']), 63 'lon': float(json_data['storeDetails']['long']), 64 'ref': ref, 65 'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/text())').extract_first(), 66 'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(), 67 'state': " ".join(text.strip() for text in state_texts if text.strip()), 68 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(), 69 'phone': response.xpath('normalize-space(//meta[@itemprop="telephone"]/@content)').extract_first(), 70 'website': response.request.url, 71 'opening_hours': self.parse_hours(store_hours), 72 'extras': { 73 'amenity:toilets': True, 74 }, 75 } 76 77 yield GeojsonPointItem(**properties) 78 79 def parse_state(self, response): 80 city_urls = response.xpath('//div[@class="v-spacing-small"]/a/@href').extract() 81 for path in city_urls: 82 yield scrapy.Request(response.urljoin(path), callback=self.parse_store) 83 84 def parse(self, response): 85 urls = response.xpath('//div[@id="mainContent"]//li[@role="listitem"]/a/@href').extract() 86 for path in urls: 87 yield scrapy.Request(response.urljoin(path), callback=self.parse_state) 88 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/locations/spiders/lowes.py b/locations/spiders/lowes.py --- a/locations/spiders/lowes.py +++ b/locations/spiders/lowes.py @@ -6,16 +6,23 @@ from locations.hours import OpeningHours -day_mapping = {'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th', 'Friday': 'Fr', 'Saturday': 'Sa', - 'Sunday': 'Su'} +day_mapping = { + 'Monday': 'Mo', + 'Tuesday': 'Tu', + 'Wednesday': 'We', + 'Thursday': 'Th', + 'Friday': 'Fr', + 'Saturday': 'Sa', + 'Sunday': 'Su', +} class LowesSpider(scrapy.Spider): """"This spider scrapes Lowes retail store locations""" name = "lowes" - item_attributes = { 'brand': "Lowe's", 'brand_wikidata': "Q1373493" } + item_attributes = {'brand': "Lowe's", 'brand_wikidata': "Q1373493"} allowed_domains = ["lowes.com"] - start_urls = ('https://www.lowes.com/Lowes-Stores',) + start_urls = ('https://www.lowes.com/sitemap/store0.xml',) download_delay = 0.5 custom_settings = { @@ -59,14 +66,14 @@ state_texts = response.xpath('//span[@itemprop="addressRegion"]/text()').extract() properties = { - 'lat': float(json_data['storeDetails']['lat']), - 'lon': float(json_data['storeDetails']['long']), - 'ref': ref, - 'addr_full': response.xpath('normalize-space(//span[@itemprop="streetAddress"]/text())').extract_first(), - 'city': response.xpath('normalize-space(//span[@itemprop="addressLocality"]/text())').extract_first(), - 'state': " ".join(text.strip() for text in state_texts if text.strip()), - 'postcode': response.xpath('normalize-space(//span[@itemprop="postalCode"]/text())').extract_first(), - 'phone': response.xpath('normalize-space(//meta[@itemprop="telephone"]/@content)').extract_first(), + 'lat': json_data['storeDetails']['lat'], + 'lon': json_data['storeDetails']['long'], + 'ref': json_data['storeDetails']['id'], + 'addr_full': json_data['storeDetails']['address'], + 'city': json_data['storeDetails']['city'], + 'state': json_data['storeDetails']['state'], + 'postcode': json_data['storeDetails']['zip'], + 'phone': json_data['storeDetails']['phone'], 'website': response.request.url, 'opening_hours': self.parse_hours(store_hours), 'extras': { @@ -76,12 +83,9 @@ yield GeojsonPointItem(**properties) - def parse_state(self, response): - city_urls = response.xpath('//div[@class="v-spacing-small"]/a/@href').extract() - for path in city_urls: - yield scrapy.Request(response.urljoin(path), callback=self.parse_store) - def parse(self, response): - urls = response.xpath('//div[@id="mainContent"]//li[@role="listitem"]/a/@href').extract() - for path in urls: - yield scrapy.Request(response.urljoin(path), callback=self.parse_state) + response.selector.remove_namespaces() + urls = response.xpath('//url/loc/text()').extract() + + for url in urls: + yield scrapy.Request(url, callback=self.parse_store)
{"golden_diff": "diff --git a/locations/spiders/lowes.py b/locations/spiders/lowes.py\n--- a/locations/spiders/lowes.py\n+++ b/locations/spiders/lowes.py\n@@ -6,16 +6,23 @@\n from locations.hours import OpeningHours\n \n \n-day_mapping = {'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th', 'Friday': 'Fr', 'Saturday': 'Sa',\n- 'Sunday': 'Su'}\n+day_mapping = {\n+ 'Monday': 'Mo',\n+ 'Tuesday': 'Tu',\n+ 'Wednesday': 'We',\n+ 'Thursday': 'Th',\n+ 'Friday': 'Fr',\n+ 'Saturday': 'Sa',\n+ 'Sunday': 'Su',\n+}\n \n \n class LowesSpider(scrapy.Spider):\n \"\"\"\"This spider scrapes Lowes retail store locations\"\"\"\n name = \"lowes\"\n- item_attributes = { 'brand': \"Lowe's\", 'brand_wikidata': \"Q1373493\" }\n+ item_attributes = {'brand': \"Lowe's\", 'brand_wikidata': \"Q1373493\"}\n allowed_domains = [\"lowes.com\"]\n- start_urls = ('https://www.lowes.com/Lowes-Stores',)\n+ start_urls = ('https://www.lowes.com/sitemap/store0.xml',)\n download_delay = 0.5\n \n custom_settings = {\n@@ -59,14 +66,14 @@\n \n state_texts = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract()\n properties = {\n- 'lat': float(json_data['storeDetails']['lat']),\n- 'lon': float(json_data['storeDetails']['long']),\n- 'ref': ref,\n- 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first(),\n- 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first(),\n- 'state': \" \".join(text.strip() for text in state_texts if text.strip()),\n- 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n- 'phone': response.xpath('normalize-space(//meta[@itemprop=\"telephone\"]/@content)').extract_first(),\n+ 'lat': json_data['storeDetails']['lat'],\n+ 'lon': json_data['storeDetails']['long'],\n+ 'ref': json_data['storeDetails']['id'],\n+ 'addr_full': json_data['storeDetails']['address'],\n+ 'city': json_data['storeDetails']['city'],\n+ 'state': json_data['storeDetails']['state'],\n+ 'postcode': json_data['storeDetails']['zip'],\n+ 'phone': json_data['storeDetails']['phone'],\n 'website': response.request.url,\n 'opening_hours': self.parse_hours(store_hours),\n 'extras': {\n@@ -76,12 +83,9 @@\n \n yield GeojsonPointItem(**properties)\n \n- def parse_state(self, response):\n- city_urls = response.xpath('//div[@class=\"v-spacing-small\"]/a/@href').extract()\n- for path in city_urls:\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_store)\n-\n def parse(self, response):\n- urls = response.xpath('//div[@id=\"mainContent\"]//li[@role=\"listitem\"]/a/@href').extract()\n- for path in urls:\n- yield scrapy.Request(response.urljoin(path), callback=self.parse_state)\n+ response.selector.remove_namespaces()\n+ urls = response.xpath('//url/loc/text()').extract()\n+\n+ for url in urls:\n+ yield scrapy.Request(url, callback=self.parse_store)\n", "issue": "Spider lowes is broken\nDuring the global build at 2021-06-02-14-42-40, spider **lowes** failed with **0 features** and **0 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/logs/lowes.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lowes.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-06-02-14-42-40/output/lowes.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nday_mapping = {'Monday': 'Mo', 'Tuesday': 'Tu', 'Wednesday': 'We', 'Thursday': 'Th', 'Friday': 'Fr', 'Saturday': 'Sa',\n 'Sunday': 'Su'}\n\n\nclass LowesSpider(scrapy.Spider):\n \"\"\"\"This spider scrapes Lowes retail store locations\"\"\"\n name = \"lowes\"\n item_attributes = { 'brand': \"Lowe's\", 'brand_wikidata': \"Q1373493\" }\n allowed_domains = [\"lowes.com\"]\n start_urls = ('https://www.lowes.com/Lowes-Stores',)\n download_delay = 0.5\n\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n day = weekday.get('day').get('day')\n open_time = weekday.get('day').get('open')\n hour, minute, sec = open_time.split('.')\n open_time_formatted = hour + ':' + minute\n\n close = weekday.get('day').get('close')\n hour, minute, sec = close.split('.')\n close_time_formatted = hour + ':' + minute\n\n if close_time_formatted in {'00:00', '24:00'}:\n close_time_formatted = \"23:59\"\n\n opening_hours.add_range(day=day_mapping[day],\n open_time=open_time_formatted,\n close_time=close_time_formatted)\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n ref = re.search(r'.+/(.+)', response.url).group(1)\n\n script_content = response.xpath('//script[contains(text(),\"storeHours\")]/text()').extract_first()\n if not script_content:\n return\n\n # effectively strip off leading \"window.__PRELOADED_STATE__ = \" where\n # the rest is a json blob\n script_data = script_content.split(\" = \", 1)[-1]\n json_data = json.loads(script_data)\n store_hours = json_data.get('storeHours')\n\n state_texts = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract()\n properties = {\n 'lat': float(json_data['storeDetails']['lat']),\n 'lon': float(json_data['storeDetails']['long']),\n 'ref': ref,\n 'addr_full': response.xpath('normalize-space(//span[@itemprop=\"streetAddress\"]/text())').extract_first(),\n 'city': response.xpath('normalize-space(//span[@itemprop=\"addressLocality\"]/text())').extract_first(),\n 'state': \" \".join(text.strip() for text in state_texts if text.strip()),\n 'postcode': response.xpath('normalize-space(//span[@itemprop=\"postalCode\"]/text())').extract_first(),\n 'phone': response.xpath('normalize-space(//meta[@itemprop=\"telephone\"]/@content)').extract_first(),\n 'website': response.request.url,\n 'opening_hours': self.parse_hours(store_hours),\n 'extras': {\n 'amenity:toilets': True,\n },\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse_state(self, response):\n city_urls = response.xpath('//div[@class=\"v-spacing-small\"]/a/@href').extract()\n for path in city_urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_store)\n\n def parse(self, response):\n urls = response.xpath('//div[@id=\"mainContent\"]//li[@role=\"listitem\"]/a/@href').extract()\n for path in urls:\n yield scrapy.Request(response.urljoin(path), callback=self.parse_state)\n", "path": "locations/spiders/lowes.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport re\nimport json\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nday_mapping = {\n 'Monday': 'Mo',\n 'Tuesday': 'Tu',\n 'Wednesday': 'We',\n 'Thursday': 'Th',\n 'Friday': 'Fr',\n 'Saturday': 'Sa',\n 'Sunday': 'Su',\n}\n\n\nclass LowesSpider(scrapy.Spider):\n \"\"\"\"This spider scrapes Lowes retail store locations\"\"\"\n name = \"lowes\"\n item_attributes = {'brand': \"Lowe's\", 'brand_wikidata': \"Q1373493\"}\n allowed_domains = [\"lowes.com\"]\n start_urls = ('https://www.lowes.com/sitemap/store0.xml',)\n download_delay = 0.5\n\n custom_settings = {\n 'USER_AGENT': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36',\n }\n\n def parse_hours(self, store_hours):\n opening_hours = OpeningHours()\n\n for weekday in store_hours:\n day = weekday.get('day').get('day')\n open_time = weekday.get('day').get('open')\n hour, minute, sec = open_time.split('.')\n open_time_formatted = hour + ':' + minute\n\n close = weekday.get('day').get('close')\n hour, minute, sec = close.split('.')\n close_time_formatted = hour + ':' + minute\n\n if close_time_formatted in {'00:00', '24:00'}:\n close_time_formatted = \"23:59\"\n\n opening_hours.add_range(day=day_mapping[day],\n open_time=open_time_formatted,\n close_time=close_time_formatted)\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n ref = re.search(r'.+/(.+)', response.url).group(1)\n\n script_content = response.xpath('//script[contains(text(),\"storeHours\")]/text()').extract_first()\n if not script_content:\n return\n\n # effectively strip off leading \"window.__PRELOADED_STATE__ = \" where\n # the rest is a json blob\n script_data = script_content.split(\" = \", 1)[-1]\n json_data = json.loads(script_data)\n store_hours = json_data.get('storeHours')\n\n state_texts = response.xpath('//span[@itemprop=\"addressRegion\"]/text()').extract()\n properties = {\n 'lat': json_data['storeDetails']['lat'],\n 'lon': json_data['storeDetails']['long'],\n 'ref': json_data['storeDetails']['id'],\n 'addr_full': json_data['storeDetails']['address'],\n 'city': json_data['storeDetails']['city'],\n 'state': json_data['storeDetails']['state'],\n 'postcode': json_data['storeDetails']['zip'],\n 'phone': json_data['storeDetails']['phone'],\n 'website': response.request.url,\n 'opening_hours': self.parse_hours(store_hours),\n 'extras': {\n 'amenity:toilets': True,\n },\n }\n\n yield GeojsonPointItem(**properties)\n\n def parse(self, response):\n response.selector.remove_namespaces()\n urls = response.xpath('//url/loc/text()').extract()\n\n for url in urls:\n yield scrapy.Request(url, callback=self.parse_store)\n", "path": "locations/spiders/lowes.py"}]}
1,487
830
gh_patches_debug_28882
rasdani/github-patches
git_diff
GeotrekCE__Geotrek-admin-1391
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- WYSIWYG for static pages Client-side WYSIWYG : - http://sofish.github.io/pen/ - https://github.com/mduvall/grande.js - http://imperavi.com/redactor/ - https://github.com/tholman/zenpen --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `geotrek/flatpages/views.py` Content: ``` 1 from rest_framework import viewsets 2 3 from geotrek.flatpages.serializers import FlatPageSerializer 4 from geotrek.flatpages import models as flatpages_models 5 6 7 class FlatPageViewSet(viewsets.ModelViewSet): 8 """ 9 A viewset for viewing and editing flat pages instances. 10 """ 11 serializer_class = FlatPageSerializer 12 queryset = flatpages_models.FlatPage.objects.all() 13 ``` Path: `geotrek/flatpages/admin.py` Content: ``` 1 from django.contrib import admin 2 from django.conf import settings 3 4 from modeltranslation.admin import TranslationAdmin 5 6 from geotrek.flatpages import models as flatpages_models 7 8 9 class FlatPagesAdmin(TranslationAdmin): 10 list_display = ('title', 'published', 'publication_date', 'target') 11 search_fields = ('title', 'content') 12 13 14 if settings.FLATPAGES_ENABLED: 15 admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin) 16 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/geotrek/flatpages/admin.py b/geotrek/flatpages/admin.py --- a/geotrek/flatpages/admin.py +++ b/geotrek/flatpages/admin.py @@ -2,6 +2,7 @@ from django.conf import settings from modeltranslation.admin import TranslationAdmin +from tinymce.widgets import TinyMCE from geotrek.flatpages import models as flatpages_models @@ -10,6 +11,11 @@ list_display = ('title', 'published', 'publication_date', 'target') search_fields = ('title', 'content') + def formfield_for_dbfield(self, db_field, **kwargs): + if db_field.name[:7] == 'content': + return db_field.formfield(widget=TinyMCE) + return super(FlatPagesAdmin, self).formfield_for_dbfield(db_field, **kwargs) + if settings.FLATPAGES_ENABLED: admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin) diff --git a/geotrek/flatpages/views.py b/geotrek/flatpages/views.py --- a/geotrek/flatpages/views.py +++ b/geotrek/flatpages/views.py @@ -1,3 +1,4 @@ +from rest_framework import permissions as rest_permissions from rest_framework import viewsets from geotrek.flatpages.serializers import FlatPageSerializer @@ -8,5 +9,9 @@ """ A viewset for viewing and editing flat pages instances. """ + model = flatpages_models.FlatPage serializer_class = FlatPageSerializer - queryset = flatpages_models.FlatPage.objects.all() + permission_classes = [rest_permissions.DjangoModelPermissionsOrAnonReadOnly] + + def get_queryset(self): + return flatpages_models.FlatPage.objects.filter(published=True)
{"golden_diff": "diff --git a/geotrek/flatpages/admin.py b/geotrek/flatpages/admin.py\n--- a/geotrek/flatpages/admin.py\n+++ b/geotrek/flatpages/admin.py\n@@ -2,6 +2,7 @@\n from django.conf import settings\n \n from modeltranslation.admin import TranslationAdmin\n+from tinymce.widgets import TinyMCE\n \n from geotrek.flatpages import models as flatpages_models\n \n@@ -10,6 +11,11 @@\n list_display = ('title', 'published', 'publication_date', 'target')\n search_fields = ('title', 'content')\n \n+ def formfield_for_dbfield(self, db_field, **kwargs):\n+ if db_field.name[:7] == 'content':\n+ return db_field.formfield(widget=TinyMCE)\n+ return super(FlatPagesAdmin, self).formfield_for_dbfield(db_field, **kwargs)\n+\n \n if settings.FLATPAGES_ENABLED:\n admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin)\ndiff --git a/geotrek/flatpages/views.py b/geotrek/flatpages/views.py\n--- a/geotrek/flatpages/views.py\n+++ b/geotrek/flatpages/views.py\n@@ -1,3 +1,4 @@\n+from rest_framework import permissions as rest_permissions\n from rest_framework import viewsets\n \n from geotrek.flatpages.serializers import FlatPageSerializer\n@@ -8,5 +9,9 @@\n \"\"\"\n A viewset for viewing and editing flat pages instances.\n \"\"\"\n+ model = flatpages_models.FlatPage\n serializer_class = FlatPageSerializer\n- queryset = flatpages_models.FlatPage.objects.all()\n+ permission_classes = [rest_permissions.DjangoModelPermissionsOrAnonReadOnly]\n+\n+ def get_queryset(self):\n+ return flatpages_models.FlatPage.objects.filter(published=True)\n", "issue": "WYSIWYG for static pages\nClient-side WYSIWYG : \n- http://sofish.github.io/pen/\n- https://github.com/mduvall/grande.js\n- http://imperavi.com/redactor/\n- https://github.com/tholman/zenpen\n\n", "before_files": [{"content": "from rest_framework import viewsets\n\nfrom geotrek.flatpages.serializers import FlatPageSerializer\nfrom geotrek.flatpages import models as flatpages_models\n\n\nclass FlatPageViewSet(viewsets.ModelViewSet):\n \"\"\"\n A viewset for viewing and editing flat pages instances.\n \"\"\"\n serializer_class = FlatPageSerializer\n queryset = flatpages_models.FlatPage.objects.all()\n", "path": "geotrek/flatpages/views.py"}, {"content": "from django.contrib import admin\nfrom django.conf import settings\n\nfrom modeltranslation.admin import TranslationAdmin\n\nfrom geotrek.flatpages import models as flatpages_models\n\n\nclass FlatPagesAdmin(TranslationAdmin):\n list_display = ('title', 'published', 'publication_date', 'target')\n search_fields = ('title', 'content')\n\n\nif settings.FLATPAGES_ENABLED:\n admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin)\n", "path": "geotrek/flatpages/admin.py"}], "after_files": [{"content": "from rest_framework import permissions as rest_permissions\nfrom rest_framework import viewsets\n\nfrom geotrek.flatpages.serializers import FlatPageSerializer\nfrom geotrek.flatpages import models as flatpages_models\n\n\nclass FlatPageViewSet(viewsets.ModelViewSet):\n \"\"\"\n A viewset for viewing and editing flat pages instances.\n \"\"\"\n model = flatpages_models.FlatPage\n serializer_class = FlatPageSerializer\n permission_classes = [rest_permissions.DjangoModelPermissionsOrAnonReadOnly]\n\n def get_queryset(self):\n return flatpages_models.FlatPage.objects.filter(published=True)\n", "path": "geotrek/flatpages/views.py"}, {"content": "from django.contrib import admin\nfrom django.conf import settings\n\nfrom modeltranslation.admin import TranslationAdmin\nfrom tinymce.widgets import TinyMCE\n\nfrom geotrek.flatpages import models as flatpages_models\n\n\nclass FlatPagesAdmin(TranslationAdmin):\n list_display = ('title', 'published', 'publication_date', 'target')\n search_fields = ('title', 'content')\n\n def formfield_for_dbfield(self, db_field, **kwargs):\n if db_field.name[:7] == 'content':\n return db_field.formfield(widget=TinyMCE)\n return super(FlatPagesAdmin, self).formfield_for_dbfield(db_field, **kwargs)\n\n\nif settings.FLATPAGES_ENABLED:\n admin.site.register(flatpages_models.FlatPage, FlatPagesAdmin)\n", "path": "geotrek/flatpages/admin.py"}]}
559
400
gh_patches_debug_7795
rasdani/github-patches
git_diff
cloud-custodian__cloud-custodian-4076
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- azure - unpinn EventGrid SDK version We need AdvancedFilters to be added to the stable version. https://pypi.org/project/azure-mgmt-eventgrid/ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `tools/c7n_azure/setup.py` Content: ``` 1 # Copyright 2018 Capital One Services, LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from io import open 16 from os import path 17 from setuptools import setup, find_packages 18 19 # read the contents of your README file 20 this_directory = path.abspath(path.dirname(__file__)) 21 readme = path.join(this_directory, 'readme.md') 22 long_description = '' 23 if path.exists(readme): 24 with open(readme, encoding='utf-8') as f: 25 long_description = f.read() 26 27 setup( 28 name="c7n_azure", 29 version='0.5.3', 30 description="Cloud Custodian - Azure Support", 31 long_description=long_description, 32 long_description_content_type='text/markdown', 33 classifiers=[ 34 "Topic :: System :: Systems Administration", 35 "Topic :: System :: Distributed Computing" 36 ], 37 url="https://github.com/cloud-custodian/cloud-custodian", 38 license="Apache-2.0", 39 packages=find_packages(), 40 entry_points={ 41 "custodian.resources": [ 42 'azure = c7n_azure.entry:initialize_azure'] 43 }, 44 install_requires=["azure-mgmt-authorization", 45 "azure-mgmt-applicationinsights==0.1.1", 46 "azure-mgmt-batch", 47 "azure-mgmt-cognitiveservices", 48 "azure-mgmt-cosmosdb", 49 "azure-mgmt-compute", 50 "azure-mgmt-cdn", 51 "azure-mgmt-containerregistry", 52 "azure-mgmt-containerservice", 53 "azure-mgmt-datalake-store", 54 "azure-mgmt-datafactory", 55 "azure-mgmt-iothub", 56 "azure-mgmt-keyvault", 57 "azure-mgmt-managementgroups", 58 "azure-mgmt-network", 59 "azure-mgmt-redis", 60 "azure-mgmt-resource==2.1.0", 61 "azure-mgmt-sql", 62 "azure-mgmt-storage", 63 "azure-mgmt-web", 64 "azure-mgmt-monitor", 65 "azure-mgmt-policyinsights", 66 "azure-mgmt-eventgrid==2.0.0rc2", # RC2 supports AdvancedFilters 67 "azure-graphrbac", 68 "azure-keyvault", 69 "azure-storage-blob", 70 "azure-storage-queue", 71 "distlib", 72 "requests", 73 "PyJWT", 74 "c7n", 75 "requests", 76 "azure-cli-core", 77 "adal", 78 "backports.functools_lru_cache", 79 "futures>=3.1.1", 80 "netaddr"], 81 package_data={str(''): [str('function_binding_resources/bin/*.dll'), 82 str('function_binding_resources/*.csproj'), 83 str('function_binding_resources/bin/*.json')]} 84 ) 85 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/tools/c7n_azure/setup.py b/tools/c7n_azure/setup.py --- a/tools/c7n_azure/setup.py +++ b/tools/c7n_azure/setup.py @@ -63,7 +63,7 @@ "azure-mgmt-web", "azure-mgmt-monitor", "azure-mgmt-policyinsights", - "azure-mgmt-eventgrid==2.0.0rc2", # RC2 supports AdvancedFilters + "azure-mgmt-eventgrid", "azure-graphrbac", "azure-keyvault", "azure-storage-blob",
{"golden_diff": "diff --git a/tools/c7n_azure/setup.py b/tools/c7n_azure/setup.py\n--- a/tools/c7n_azure/setup.py\n+++ b/tools/c7n_azure/setup.py\n@@ -63,7 +63,7 @@\n \"azure-mgmt-web\",\n \"azure-mgmt-monitor\",\n \"azure-mgmt-policyinsights\",\n- \"azure-mgmt-eventgrid==2.0.0rc2\", # RC2 supports AdvancedFilters\n+ \"azure-mgmt-eventgrid\",\n \"azure-graphrbac\",\n \"azure-keyvault\",\n \"azure-storage-blob\",\n", "issue": "azure - unpinn EventGrid SDK version\nWe need AdvancedFilters to be added to the stable version.\r\n\r\nhttps://pypi.org/project/azure-mgmt-eventgrid/\n", "before_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom io import open\nfrom os import path\nfrom setuptools import setup, find_packages\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nreadme = path.join(this_directory, 'readme.md')\nlong_description = ''\nif path.exists(readme):\n with open(readme, encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"c7n_azure\",\n version='0.5.3',\n description=\"Cloud Custodian - Azure Support\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/cloud-custodian/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n \"custodian.resources\": [\n 'azure = c7n_azure.entry:initialize_azure']\n },\n install_requires=[\"azure-mgmt-authorization\",\n \"azure-mgmt-applicationinsights==0.1.1\",\n \"azure-mgmt-batch\",\n \"azure-mgmt-cognitiveservices\",\n \"azure-mgmt-cosmosdb\",\n \"azure-mgmt-compute\",\n \"azure-mgmt-cdn\",\n \"azure-mgmt-containerregistry\",\n \"azure-mgmt-containerservice\",\n \"azure-mgmt-datalake-store\",\n \"azure-mgmt-datafactory\",\n \"azure-mgmt-iothub\",\n \"azure-mgmt-keyvault\",\n \"azure-mgmt-managementgroups\",\n \"azure-mgmt-network\",\n \"azure-mgmt-redis\",\n \"azure-mgmt-resource==2.1.0\",\n \"azure-mgmt-sql\",\n \"azure-mgmt-storage\",\n \"azure-mgmt-web\",\n \"azure-mgmt-monitor\",\n \"azure-mgmt-policyinsights\",\n \"azure-mgmt-eventgrid==2.0.0rc2\", # RC2 supports AdvancedFilters\n \"azure-graphrbac\",\n \"azure-keyvault\",\n \"azure-storage-blob\",\n \"azure-storage-queue\",\n \"distlib\",\n \"requests\",\n \"PyJWT\",\n \"c7n\",\n \"requests\",\n \"azure-cli-core\",\n \"adal\",\n \"backports.functools_lru_cache\",\n \"futures>=3.1.1\",\n \"netaddr\"],\n package_data={str(''): [str('function_binding_resources/bin/*.dll'),\n str('function_binding_resources/*.csproj'),\n str('function_binding_resources/bin/*.json')]}\n)\n", "path": "tools/c7n_azure/setup.py"}], "after_files": [{"content": "# Copyright 2018 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom io import open\nfrom os import path\nfrom setuptools import setup, find_packages\n\n# read the contents of your README file\nthis_directory = path.abspath(path.dirname(__file__))\nreadme = path.join(this_directory, 'readme.md')\nlong_description = ''\nif path.exists(readme):\n with open(readme, encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(\n name=\"c7n_azure\",\n version='0.5.3',\n description=\"Cloud Custodian - Azure Support\",\n long_description=long_description,\n long_description_content_type='text/markdown',\n classifiers=[\n \"Topic :: System :: Systems Administration\",\n \"Topic :: System :: Distributed Computing\"\n ],\n url=\"https://github.com/cloud-custodian/cloud-custodian\",\n license=\"Apache-2.0\",\n packages=find_packages(),\n entry_points={\n \"custodian.resources\": [\n 'azure = c7n_azure.entry:initialize_azure']\n },\n install_requires=[\"azure-mgmt-authorization\",\n \"azure-mgmt-applicationinsights==0.1.1\",\n \"azure-mgmt-batch\",\n \"azure-mgmt-cognitiveservices\",\n \"azure-mgmt-cosmosdb\",\n \"azure-mgmt-compute\",\n \"azure-mgmt-cdn\",\n \"azure-mgmt-containerregistry\",\n \"azure-mgmt-containerservice\",\n \"azure-mgmt-datalake-store\",\n \"azure-mgmt-datafactory\",\n \"azure-mgmt-iothub\",\n \"azure-mgmt-keyvault\",\n \"azure-mgmt-managementgroups\",\n \"azure-mgmt-network\",\n \"azure-mgmt-redis\",\n \"azure-mgmt-resource==2.1.0\",\n \"azure-mgmt-sql\",\n \"azure-mgmt-storage\",\n \"azure-mgmt-web\",\n \"azure-mgmt-monitor\",\n \"azure-mgmt-policyinsights\",\n \"azure-mgmt-eventgrid\",\n \"azure-graphrbac\",\n \"azure-keyvault\",\n \"azure-storage-blob\",\n \"azure-storage-queue\",\n \"distlib\",\n \"requests\",\n \"PyJWT\",\n \"c7n\",\n \"requests\",\n \"azure-cli-core\",\n \"adal\",\n \"backports.functools_lru_cache\",\n \"futures>=3.1.1\",\n \"netaddr\"],\n package_data={str(''): [str('function_binding_resources/bin/*.dll'),\n str('function_binding_resources/*.csproj'),\n str('function_binding_resources/bin/*.json')]}\n)\n", "path": "tools/c7n_azure/setup.py"}]}
1,147
133