problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
10.2k
| golden_diff
stringlengths 151
4.94k
| verification_info
stringlengths 582
21k
| num_tokens
int64 271
2.05k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_10357 | rasdani/github-patches | git_diff | Parsl__parsl-2450 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
walltime app function parameter modifies task_record func_name
**Describe the bug**
When the walltime special parameter is passed to a Parsl app the `task_record['func_name']` parameter is set to "wrapper" instead of to the function's name.
**To Reproduce**
Launch the code below using parsl version 1.2.0:
```
import parsl
print(parsl.__version__, flush = True)
from parsl.app.app import python_app, bash_app
from parsl.config import Config
from parsl.executors import HighThroughputExecutor
@python_app
def test(stdout='std.out', stderr = 'std.err', walltime = 5):
from time import sleep
sleep(1)
Fail = 1/0
return 'Hello'
def retry_handler(exception, task_record):
print(task_record['func_name'], flush = True)
return 1
if __name__ == '__main__':
config = Config(
executors = [HighThroughputExecutor()],
retries = 2,
retry_handler = retry_handler
)
print('Loading Parsl Config', flush = True)
parsl.load(config)
fut = test()
print(fut.result())
```
It will print "wrapper" if the walltime parameter is present and test otherwise.
**Expected behavior**
The code should print the function's name (test).
**Environment**
- OS: Linux
- Python version: 3.10.4
- Parsl version: 1.2.0
**Distributed Environment**
- Where are you running the Parsl script from ? Local
- Where do you need the workers to run ? Local
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/app/python.py`
Content:
```
1 import logging
2
3 import tblib.pickling_support
4 tblib.pickling_support.install()
5
6 from parsl.app.app import AppBase
7 from parsl.app.errors import wrap_error
8 from parsl.dataflow.dflow import DataFlowKernelLoader
9
10
11 logger = logging.getLogger(__name__)
12
13
14 def timeout(f, seconds):
15 def wrapper(*args, **kwargs):
16 import threading
17 import ctypes
18 import parsl.app.errors
19
20 def inject_exception(thread):
21 ctypes.pythonapi.PyThreadState_SetAsyncExc(
22 ctypes.c_long(thread),
23 ctypes.py_object(parsl.app.errors.AppTimeout)
24 )
25
26 thread = threading.current_thread().ident
27 timer = threading.Timer(seconds, inject_exception, args=[thread])
28 timer.start()
29 result = f(*args, **kwargs)
30 timer.cancel()
31 return result
32 return wrapper
33
34
35 class PythonApp(AppBase):
36 """Extends AppBase to cover the Python App."""
37
38 def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=[], join=False):
39 super().__init__(
40 wrap_error(func),
41 data_flow_kernel=data_flow_kernel,
42 executors=executors,
43 cache=cache,
44 ignore_for_cache=ignore_for_cache
45 )
46 self.join = join
47
48 def __call__(self, *args, **kwargs):
49 """This is where the call to a python app is handled.
50
51 Args:
52 - Arbitrary
53 Kwargs:
54 - Arbitrary
55
56 Returns:
57 App_fut
58
59 """
60 invocation_kwargs = {}
61 invocation_kwargs.update(self.kwargs)
62 invocation_kwargs.update(kwargs)
63
64 if self.data_flow_kernel is None:
65 dfk = DataFlowKernelLoader.dfk()
66 else:
67 dfk = self.data_flow_kernel
68
69 walltime = invocation_kwargs.get('walltime')
70 if walltime is not None:
71 func = timeout(self.func, walltime)
72 else:
73 func = self.func
74
75 app_fut = dfk.submit(func, app_args=args,
76 executors=self.executors,
77 cache=self.cache,
78 ignore_for_cache=self.ignore_for_cache,
79 app_kwargs=invocation_kwargs,
80 join=self.join)
81
82 return app_fut
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/app/python.py b/parsl/app/python.py
--- a/parsl/app/python.py
+++ b/parsl/app/python.py
@@ -3,6 +3,8 @@
import tblib.pickling_support
tblib.pickling_support.install()
+from functools import wraps
+
from parsl.app.app import AppBase
from parsl.app.errors import wrap_error
from parsl.dataflow.dflow import DataFlowKernelLoader
@@ -12,6 +14,7 @@
def timeout(f, seconds):
+ @wraps(f)
def wrapper(*args, **kwargs):
import threading
import ctypes
| {"golden_diff": "diff --git a/parsl/app/python.py b/parsl/app/python.py\n--- a/parsl/app/python.py\n+++ b/parsl/app/python.py\n@@ -3,6 +3,8 @@\n import tblib.pickling_support\n tblib.pickling_support.install()\n \n+from functools import wraps\n+\n from parsl.app.app import AppBase\n from parsl.app.errors import wrap_error\n from parsl.dataflow.dflow import DataFlowKernelLoader\n@@ -12,6 +14,7 @@\n \n \n def timeout(f, seconds):\n+ @wraps(f)\n def wrapper(*args, **kwargs):\n import threading\n import ctypes\n", "issue": "walltime app function parameter modifies task_record func_name\n**Describe the bug**\r\nWhen the walltime special parameter is passed to a Parsl app the `task_record['func_name']` parameter is set to \"wrapper\" instead of to the function's name. \r\n\r\n**To Reproduce**\r\nLaunch the code below using parsl version 1.2.0:\r\n```\r\nimport parsl\r\nprint(parsl.__version__, flush = True)\r\nfrom parsl.app.app import python_app, bash_app\r\nfrom parsl.config import Config\r\nfrom parsl.executors import HighThroughputExecutor\r\n\r\n\r\n@python_app\r\ndef test(stdout='std.out', stderr = 'std.err', walltime = 5):\r\n from time import sleep\r\n sleep(1)\r\n Fail = 1/0\r\n return 'Hello'\r\n\r\ndef retry_handler(exception, task_record):\r\n print(task_record['func_name'], flush = True)\r\n return 1\r\n\r\nif __name__ == '__main__':\r\n\r\n config = Config(\r\n executors = [HighThroughputExecutor()],\r\n retries = 2,\r\n retry_handler = retry_handler\r\n )\r\n print('Loading Parsl Config', flush = True)\r\n parsl.load(config)\r\n\r\n fut = test()\r\n\r\n print(fut.result())\r\n\r\n```\r\n\r\nIt will print \"wrapper\" if the walltime parameter is present and test otherwise. \r\n\r\n**Expected behavior**\r\nThe code should print the function's name (test).\r\n\r\n**Environment**\r\n- OS: Linux\r\n- Python version: 3.10.4\r\n- Parsl version: 1.2.0\r\n\r\n\r\n**Distributed Environment**\r\n- Where are you running the Parsl script from ? Local\r\n- Where do you need the workers to run ? Local\r\n\n", "before_files": [{"content": "import logging\n\nimport tblib.pickling_support\ntblib.pickling_support.install()\n\nfrom parsl.app.app import AppBase\nfrom parsl.app.errors import wrap_error\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef timeout(f, seconds):\n def wrapper(*args, **kwargs):\n import threading\n import ctypes\n import parsl.app.errors\n\n def inject_exception(thread):\n ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread),\n ctypes.py_object(parsl.app.errors.AppTimeout)\n )\n\n thread = threading.current_thread().ident\n timer = threading.Timer(seconds, inject_exception, args=[thread])\n timer.start()\n result = f(*args, **kwargs)\n timer.cancel()\n return result\n return wrapper\n\n\nclass PythonApp(AppBase):\n \"\"\"Extends AppBase to cover the Python App.\"\"\"\n\n def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=[], join=False):\n super().__init__(\n wrap_error(func),\n data_flow_kernel=data_flow_kernel,\n executors=executors,\n cache=cache,\n ignore_for_cache=ignore_for_cache\n )\n self.join = join\n\n def __call__(self, *args, **kwargs):\n \"\"\"This is where the call to a python app is handled.\n\n Args:\n - Arbitrary\n Kwargs:\n - Arbitrary\n\n Returns:\n App_fut\n\n \"\"\"\n invocation_kwargs = {}\n invocation_kwargs.update(self.kwargs)\n invocation_kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n walltime = invocation_kwargs.get('walltime')\n if walltime is not None:\n func = timeout(self.func, walltime)\n else:\n func = self.func\n\n app_fut = dfk.submit(func, app_args=args,\n executors=self.executors,\n cache=self.cache,\n ignore_for_cache=self.ignore_for_cache,\n app_kwargs=invocation_kwargs,\n join=self.join)\n\n return app_fut\n", "path": "parsl/app/python.py"}], "after_files": [{"content": "import logging\n\nimport tblib.pickling_support\ntblib.pickling_support.install()\n\nfrom functools import wraps\n\nfrom parsl.app.app import AppBase\nfrom parsl.app.errors import wrap_error\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef timeout(f, seconds):\n @wraps(f)\n def wrapper(*args, **kwargs):\n import threading\n import ctypes\n import parsl.app.errors\n\n def inject_exception(thread):\n ctypes.pythonapi.PyThreadState_SetAsyncExc(\n ctypes.c_long(thread),\n ctypes.py_object(parsl.app.errors.AppTimeout)\n )\n\n thread = threading.current_thread().ident\n timer = threading.Timer(seconds, inject_exception, args=[thread])\n timer.start()\n result = f(*args, **kwargs)\n timer.cancel()\n return result\n return wrapper\n\n\nclass PythonApp(AppBase):\n \"\"\"Extends AppBase to cover the Python App.\"\"\"\n\n def __init__(self, func, data_flow_kernel=None, cache=False, executors='all', ignore_for_cache=[], join=False):\n super().__init__(\n wrap_error(func),\n data_flow_kernel=data_flow_kernel,\n executors=executors,\n cache=cache,\n ignore_for_cache=ignore_for_cache\n )\n self.join = join\n\n def __call__(self, *args, **kwargs):\n \"\"\"This is where the call to a python app is handled.\n\n Args:\n - Arbitrary\n Kwargs:\n - Arbitrary\n\n Returns:\n App_fut\n\n \"\"\"\n invocation_kwargs = {}\n invocation_kwargs.update(self.kwargs)\n invocation_kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n walltime = invocation_kwargs.get('walltime')\n if walltime is not None:\n func = timeout(self.func, walltime)\n else:\n func = self.func\n\n app_fut = dfk.submit(func, app_args=args,\n executors=self.executors,\n cache=self.cache,\n ignore_for_cache=self.ignore_for_cache,\n app_kwargs=invocation_kwargs,\n join=self.join)\n\n return app_fut\n", "path": "parsl/app/python.py"}]} | 1,257 | 142 |
gh_patches_debug_24727 | rasdani/github-patches | git_diff | sublimelsp__LSP-1852 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to save modified file when dragged to a new window
Description
Editing typescript project, dragged a tab out to a separate window then modified some lines and tried to save. Sublime won't save unless I drag the tab back into the open project. (see sublime issue - https://github.com/sublimehq/sublime_text/issues/4623)
Steps to reproduce
Start Sublime Text, open a directory containing typescript files. (make sure to have the LSP plugin installed)
open multiple files in tabs
drag one of the tabs out to a separate window
modify the tab that's been dragged out, then try to save
Expected behavior
Expect the separate window/modified file to save.
Actual behavior

The separate window doesn't save with cmd+s (can still see the modified indication on top (circle icon/symbol)
Environment
Sublime Build: 4112
Operating system and version: macOS 11.4,
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/save_command.py`
Content:
```
1 from .core.registry import LspTextCommand
2 from .core.settings import userprefs
3 from .core.typing import Callable, List, Type
4 from abc import ABCMeta, abstractmethod
5 import sublime
6 import sublime_plugin
7
8
9 class SaveTask(metaclass=ABCMeta):
10 """
11 Base class for tasks that run on save.
12
13 Note: The whole task runs on the async thread.
14 """
15
16 @classmethod
17 @abstractmethod
18 def is_applicable(cls, view: sublime.View) -> bool:
19 pass
20
21 def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):
22 self._task_runner = task_runner
23 self._on_done = on_done
24 self._completed = False
25 self._cancelled = False
26 self._status_key = 'lsp_save_task_timeout'
27
28 def run_async(self) -> None:
29 self._erase_view_status()
30 sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)
31
32 def _on_timeout(self) -> None:
33 if not self._completed and not self._cancelled:
34 self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))
35 self._cancelled = True
36 self._on_done()
37
38 def cancel(self) -> None:
39 self._cancelled = True
40
41 def _set_view_status(self, text: str) -> None:
42 self._task_runner.view.set_status(self._status_key, text)
43 sublime.set_timeout_async(self._erase_view_status, 5000)
44
45 def _erase_view_status(self) -> None:
46 self._task_runner.view.erase_status(self._status_key)
47
48 def _on_complete(self) -> None:
49 assert not self._completed
50 self._completed = True
51 if not self._cancelled:
52 self._on_done()
53
54 def _purge_changes_async(self) -> None:
55 # Supermassive hack that will go away later.
56 listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])
57 for listener in listeners:
58 if listener.__class__.__name__ == 'DocumentSyncListener':
59 listener.purge_changes_async() # type: ignore
60 break
61
62
63 class LspSaveCommand(LspTextCommand):
64 """
65 A command used as a substitute for native save command. Runs code actions and document
66 formatting before triggering the native save command.
67 """
68 _tasks = [] # type: List[Type[SaveTask]]
69
70 @classmethod
71 def register_task(cls, task: Type[SaveTask]) -> None:
72 assert task not in cls._tasks
73 cls._tasks.append(task)
74
75 def __init__(self, view: sublime.View) -> None:
76 super().__init__(view)
77 self._pending_tasks = [] # type: List[SaveTask]
78
79 def run(self, edit: sublime.Edit) -> None:
80 if self._pending_tasks:
81 for task in self._pending_tasks:
82 task.cancel()
83 self._pending_tasks = []
84 sublime.set_timeout_async(self._trigger_on_pre_save_async)
85 for Task in self._tasks:
86 if Task.is_applicable(self.view):
87 self._pending_tasks.append(Task(self, self._on_task_completed_async))
88 if self._pending_tasks:
89 sublime.set_timeout_async(self._run_next_task_async)
90 else:
91 self._trigger_native_save()
92
93 def _trigger_on_pre_save_async(self) -> None:
94 # Supermassive hack that will go away later.
95 listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])
96 for listener in listeners:
97 if listener.__class__.__name__ == 'DocumentSyncListener':
98 listener.trigger_on_pre_save_async() # type: ignore
99 break
100
101 def _run_next_task_async(self) -> None:
102 current_task = self._pending_tasks[0]
103 current_task.run_async()
104
105 def _on_task_completed_async(self) -> None:
106 self._pending_tasks.pop(0)
107 if self._pending_tasks:
108 self._run_next_task_async()
109 else:
110 self._trigger_native_save()
111
112 def _trigger_native_save(self) -> None:
113 # Triggered from set_timeout to preserve original semantics of on_pre_save handling
114 sublime.set_timeout(lambda: self.view.run_command('save', {"async": True}))
115
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugin/save_command.py b/plugin/save_command.py
--- a/plugin/save_command.py
+++ b/plugin/save_command.py
@@ -1,6 +1,6 @@
from .core.registry import LspTextCommand
from .core.settings import userprefs
-from .core.typing import Callable, List, Type
+from .core.typing import Callable, List, Optional, Type
from abc import ABCMeta, abstractmethod
import sublime
import sublime_plugin
@@ -90,6 +90,15 @@
else:
self._trigger_native_save()
+ def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:
+ # Workaround to ensure that the command will run, even if a view was dragged out to a new window,
+ # see https://github.com/sublimelsp/LSP/issues/1791.
+ # The check to determine whether the keybinding for lsp_save is applicable already happens in
+ # DocumentSyncListener.on_query_context and should not be required here, if lsp_save is only used for the
+ # keybinding. A proper fix should ensure that LspTextCommand.is_enabled returns the correct value even for
+ # dragged out views and that LSP keeps working as expected.
+ return True
+
def _trigger_on_pre_save_async(self) -> None:
# Supermassive hack that will go away later.
listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])
| {"golden_diff": "diff --git a/plugin/save_command.py b/plugin/save_command.py\n--- a/plugin/save_command.py\n+++ b/plugin/save_command.py\n@@ -1,6 +1,6 @@\n from .core.registry import LspTextCommand\n from .core.settings import userprefs\n-from .core.typing import Callable, List, Type\n+from .core.typing import Callable, List, Optional, Type\n from abc import ABCMeta, abstractmethod\n import sublime\n import sublime_plugin\n@@ -90,6 +90,15 @@\n else:\n self._trigger_native_save()\n \n+ def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n+ # Workaround to ensure that the command will run, even if a view was dragged out to a new window,\n+ # see https://github.com/sublimelsp/LSP/issues/1791.\n+ # The check to determine whether the keybinding for lsp_save is applicable already happens in\n+ # DocumentSyncListener.on_query_context and should not be required here, if lsp_save is only used for the\n+ # keybinding. A proper fix should ensure that LspTextCommand.is_enabled returns the correct value even for\n+ # dragged out views and that LSP keeps working as expected.\n+ return True\n+\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n", "issue": "Unable to save modified file when dragged to a new window\nDescription\r\nEditing typescript project, dragged a tab out to a separate window then modified some lines and tried to save. Sublime won't save unless I drag the tab back into the open project. (see sublime issue - https://github.com/sublimehq/sublime_text/issues/4623)\r\n\r\nSteps to reproduce\r\nStart Sublime Text, open a directory containing typescript files. (make sure to have the LSP plugin installed)\r\nopen multiple files in tabs\r\ndrag one of the tabs out to a separate window\r\nmodify the tab that's been dragged out, then try to save\r\nExpected behavior\r\nExpect the separate window/modified file to save.\r\n\r\nActual behavior\r\n\r\n\r\n\r\nThe separate window doesn't save with cmd+s (can still see the modified indication on top (circle icon/symbol)\r\n\r\nEnvironment\r\nSublime Build: 4112\r\nOperating system and version: macOS 11.4,\n", "before_files": [{"content": "from .core.registry import LspTextCommand\nfrom .core.settings import userprefs\nfrom .core.typing import Callable, List, Type\nfrom abc import ABCMeta, abstractmethod\nimport sublime\nimport sublime_plugin\n\n\nclass SaveTask(metaclass=ABCMeta):\n \"\"\"\n Base class for tasks that run on save.\n\n Note: The whole task runs on the async thread.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n pass\n\n def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):\n self._task_runner = task_runner\n self._on_done = on_done\n self._completed = False\n self._cancelled = False\n self._status_key = 'lsp_save_task_timeout'\n\n def run_async(self) -> None:\n self._erase_view_status()\n sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)\n\n def _on_timeout(self) -> None:\n if not self._completed and not self._cancelled:\n self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))\n self._cancelled = True\n self._on_done()\n\n def cancel(self) -> None:\n self._cancelled = True\n\n def _set_view_status(self, text: str) -> None:\n self._task_runner.view.set_status(self._status_key, text)\n sublime.set_timeout_async(self._erase_view_status, 5000)\n\n def _erase_view_status(self) -> None:\n self._task_runner.view.erase_status(self._status_key)\n\n def _on_complete(self) -> None:\n assert not self._completed\n self._completed = True\n if not self._cancelled:\n self._on_done()\n\n def _purge_changes_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.purge_changes_async() # type: ignore\n break\n\n\nclass LspSaveCommand(LspTextCommand):\n \"\"\"\n A command used as a substitute for native save command. Runs code actions and document\n formatting before triggering the native save command.\n \"\"\"\n _tasks = [] # type: List[Type[SaveTask]]\n\n @classmethod\n def register_task(cls, task: Type[SaveTask]) -> None:\n assert task not in cls._tasks\n cls._tasks.append(task)\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._pending_tasks = [] # type: List[SaveTask]\n\n def run(self, edit: sublime.Edit) -> None:\n if self._pending_tasks:\n for task in self._pending_tasks:\n task.cancel()\n self._pending_tasks = []\n sublime.set_timeout_async(self._trigger_on_pre_save_async)\n for Task in self._tasks:\n if Task.is_applicable(self.view):\n self._pending_tasks.append(Task(self, self._on_task_completed_async))\n if self._pending_tasks:\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.trigger_on_pre_save_async() # type: ignore\n break\n\n def _run_next_task_async(self) -> None:\n current_task = self._pending_tasks[0]\n current_task.run_async()\n\n def _on_task_completed_async(self) -> None:\n self._pending_tasks.pop(0)\n if self._pending_tasks:\n self._run_next_task_async()\n else:\n self._trigger_native_save()\n\n def _trigger_native_save(self) -> None:\n # Triggered from set_timeout to preserve original semantics of on_pre_save handling\n sublime.set_timeout(lambda: self.view.run_command('save', {\"async\": True}))\n", "path": "plugin/save_command.py"}], "after_files": [{"content": "from .core.registry import LspTextCommand\nfrom .core.settings import userprefs\nfrom .core.typing import Callable, List, Optional, Type\nfrom abc import ABCMeta, abstractmethod\nimport sublime\nimport sublime_plugin\n\n\nclass SaveTask(metaclass=ABCMeta):\n \"\"\"\n Base class for tasks that run on save.\n\n Note: The whole task runs on the async thread.\n \"\"\"\n\n @classmethod\n @abstractmethod\n def is_applicable(cls, view: sublime.View) -> bool:\n pass\n\n def __init__(self, task_runner: LspTextCommand, on_done: Callable[[], None]):\n self._task_runner = task_runner\n self._on_done = on_done\n self._completed = False\n self._cancelled = False\n self._status_key = 'lsp_save_task_timeout'\n\n def run_async(self) -> None:\n self._erase_view_status()\n sublime.set_timeout_async(self._on_timeout, userprefs().on_save_task_timeout_ms)\n\n def _on_timeout(self) -> None:\n if not self._completed and not self._cancelled:\n self._set_view_status('LSP: Timeout processing {}'.format(self.__class__.__name__))\n self._cancelled = True\n self._on_done()\n\n def cancel(self) -> None:\n self._cancelled = True\n\n def _set_view_status(self, text: str) -> None:\n self._task_runner.view.set_status(self._status_key, text)\n sublime.set_timeout_async(self._erase_view_status, 5000)\n\n def _erase_view_status(self) -> None:\n self._task_runner.view.erase_status(self._status_key)\n\n def _on_complete(self) -> None:\n assert not self._completed\n self._completed = True\n if not self._cancelled:\n self._on_done()\n\n def _purge_changes_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self._task_runner.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.purge_changes_async() # type: ignore\n break\n\n\nclass LspSaveCommand(LspTextCommand):\n \"\"\"\n A command used as a substitute for native save command. Runs code actions and document\n formatting before triggering the native save command.\n \"\"\"\n _tasks = [] # type: List[Type[SaveTask]]\n\n @classmethod\n def register_task(cls, task: Type[SaveTask]) -> None:\n assert task not in cls._tasks\n cls._tasks.append(task)\n\n def __init__(self, view: sublime.View) -> None:\n super().__init__(view)\n self._pending_tasks = [] # type: List[SaveTask]\n\n def run(self, edit: sublime.Edit) -> None:\n if self._pending_tasks:\n for task in self._pending_tasks:\n task.cancel()\n self._pending_tasks = []\n sublime.set_timeout_async(self._trigger_on_pre_save_async)\n for Task in self._tasks:\n if Task.is_applicable(self.view):\n self._pending_tasks.append(Task(self, self._on_task_completed_async))\n if self._pending_tasks:\n sublime.set_timeout_async(self._run_next_task_async)\n else:\n self._trigger_native_save()\n\n def is_enabled(self, event: Optional[dict] = None, point: Optional[int] = None) -> bool:\n # Workaround to ensure that the command will run, even if a view was dragged out to a new window,\n # see https://github.com/sublimelsp/LSP/issues/1791.\n # The check to determine whether the keybinding for lsp_save is applicable already happens in\n # DocumentSyncListener.on_query_context and should not be required here, if lsp_save is only used for the\n # keybinding. A proper fix should ensure that LspTextCommand.is_enabled returns the correct value even for\n # dragged out views and that LSP keeps working as expected.\n return True\n\n def _trigger_on_pre_save_async(self) -> None:\n # Supermassive hack that will go away later.\n listeners = sublime_plugin.view_event_listeners.get(self.view.id(), [])\n for listener in listeners:\n if listener.__class__.__name__ == 'DocumentSyncListener':\n listener.trigger_on_pre_save_async() # type: ignore\n break\n\n def _run_next_task_async(self) -> None:\n current_task = self._pending_tasks[0]\n current_task.run_async()\n\n def _on_task_completed_async(self) -> None:\n self._pending_tasks.pop(0)\n if self._pending_tasks:\n self._run_next_task_async()\n else:\n self._trigger_native_save()\n\n def _trigger_native_save(self) -> None:\n # Triggered from set_timeout to preserve original semantics of on_pre_save handling\n sublime.set_timeout(lambda: self.view.run_command('save', {\"async\": True}))\n", "path": "plugin/save_command.py"}]} | 1,729 | 327 |
gh_patches_debug_10398 | rasdani/github-patches | git_diff | hpcaitech__ColossalAI-4526 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tensor] fix some unittests
[tensor] fix some unittests
[tensor] fix some unittests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `colossalai/pipeline/schedule/_utils.py`
Content:
```
1 from typing import Any, List, Optional
2
3 import torch
4 import torch.cuda
5 from torch.nn import Module
6 from torch.utils._pytree import tree_flatten, tree_map, tree_unflatten
7
8
9 def to_device(x: Any, device: Optional[torch.device] = None) -> Any:
10 """Move object to device if it is a tensor.
11
12 Args:
13 x (Any): Object to be moved.
14 device (Optional[torch.device], optional): Target device. Defaults to None.
15
16 Returns:
17 Any: Moved object.
18 """
19 if isinstance(x, torch.Tensor):
20 return x.to(device)
21 return x
22
23
24 def get_batch_size(batch: Any) -> int:
25 """Get the batch size (size of dimension-0) of the first tensor in the batch.
26
27 Args:
28 batch (Any): Batch to be inspected.
29
30 Raises:
31 RuntimeError: If no tensor is found in the batch.
32
33 Returns:
34 int: Batch size.
35 """
36 data_list, _ = tree_flatten(batch)
37 for data in data_list:
38 if isinstance(data, torch.Tensor):
39 return data.size(0)
40 raise RuntimeError('No tensor found in the batch')
41
42
43 def get_micro_batch(batch: Any, start: int, micro_batch_size: int) -> Any:
44 """Get a micro batch of the original batch.
45
46 Args:
47 batch (Any): Batch to be sliced.
48 start (int): Start index of the micro batch.
49 micro_batch_size (int): Size of the micro batch.
50
51 Returns:
52 Any: Target micro batch.
53 """
54
55 def _get_tensor_slice(x: Any):
56 if isinstance(x, torch.Tensor):
57 return x[start:start + micro_batch_size]
58 return x
59
60 return tree_map(_get_tensor_slice, batch)
61
62
63 def model_forward(model: Module, data: Any, internal_inputs: Optional[dict]) -> Any:
64 """Call model forward function with data and internal inputs.
65
66 Args:
67 model (Module): Model to be called.
68 data (Any): Data loaded from data iterator.
69 internal_inputs (Optional[dict]): Data from previous stage. It must be a dict or None if it's the first stage.
70
71 Returns:
72 Any: Outputs of the model.
73 """
74 if internal_inputs is None:
75 internal_inputs = {}
76 if isinstance(data, (list, tuple)):
77 return model(*data, **internal_inputs)
78 elif isinstance(data, dict):
79 return model(**data, **internal_inputs)
80 return model(data, **internal_inputs)
81
82
83 def retain_grad(x: Any) -> None:
84 """Call retain_grad() on a tensor.
85
86 Args:
87 x (Any): Object to be called.
88 """
89 if isinstance(x, torch.Tensor) and x.requires_grad:
90 x.retain_grad()
91
92
93 def detach(x: Any) -> Any:
94 """Call detach() on a tensor.
95
96 Args:
97 x (Any): Object to be called.
98
99 Returns:
100 Any: The detached object.
101 """
102 if isinstance(x, torch.Tensor):
103 return x.detach()
104 return x
105
106
107 def merge_batch(data: List[Any]) -> Any:
108 """Merge micro batches into a batch.
109
110 Args:
111 data (List[Any]): A list of micro batches.
112
113 Returns:
114 Any: Merge batch.
115 """
116 if len(data) == 0:
117 return
118 flattened_data = []
119 tree_spec = None
120 for d in data:
121 elems, tree_spec = tree_flatten(d)
122 flattened_data.append(elems)
123 merged_data = []
124 for elem_batch in zip(*flattened_data):
125 if isinstance(elem_batch[0], torch.Tensor):
126 merged_data.append(torch.cat(elem_batch, dim=0))
127 else:
128 merged_data.append(list(elem_batch))
129 return tree_unflatten(merged_data, tree_spec)
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/colossalai/pipeline/schedule/_utils.py b/colossalai/pipeline/schedule/_utils.py
--- a/colossalai/pipeline/schedule/_utils.py
+++ b/colossalai/pipeline/schedule/_utils.py
@@ -123,7 +123,10 @@
merged_data = []
for elem_batch in zip(*flattened_data):
if isinstance(elem_batch[0], torch.Tensor):
- merged_data.append(torch.cat(elem_batch, dim=0))
+ if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs
+ merged_data.append(None)
+ else:
+ merged_data.append(torch.cat(elem_batch, dim=0))
else:
merged_data.append(list(elem_batch))
return tree_unflatten(merged_data, tree_spec)
| {"golden_diff": "diff --git a/colossalai/pipeline/schedule/_utils.py b/colossalai/pipeline/schedule/_utils.py\n--- a/colossalai/pipeline/schedule/_utils.py\n+++ b/colossalai/pipeline/schedule/_utils.py\n@@ -123,7 +123,10 @@\n merged_data = []\n for elem_batch in zip(*flattened_data):\n if isinstance(elem_batch[0], torch.Tensor):\n- merged_data.append(torch.cat(elem_batch, dim=0))\n+ if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs\n+ merged_data.append(None)\n+ else:\n+ merged_data.append(torch.cat(elem_batch, dim=0))\n else:\n merged_data.append(list(elem_batch))\n return tree_unflatten(merged_data, tree_spec)\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "from typing import Any, List, Optional\n\nimport torch\nimport torch.cuda\nfrom torch.nn import Module\nfrom torch.utils._pytree import tree_flatten, tree_map, tree_unflatten\n\n\ndef to_device(x: Any, device: Optional[torch.device] = None) -> Any:\n \"\"\"Move object to device if it is a tensor.\n\n Args:\n x (Any): Object to be moved.\n device (Optional[torch.device], optional): Target device. Defaults to None.\n\n Returns:\n Any: Moved object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.to(device)\n return x\n\n\ndef get_batch_size(batch: Any) -> int:\n \"\"\"Get the batch size (size of dimension-0) of the first tensor in the batch.\n\n Args:\n batch (Any): Batch to be inspected.\n\n Raises:\n RuntimeError: If no tensor is found in the batch.\n\n Returns:\n int: Batch size.\n \"\"\"\n data_list, _ = tree_flatten(batch)\n for data in data_list:\n if isinstance(data, torch.Tensor):\n return data.size(0)\n raise RuntimeError('No tensor found in the batch')\n\n\ndef get_micro_batch(batch: Any, start: int, micro_batch_size: int) -> Any:\n \"\"\"Get a micro batch of the original batch.\n\n Args:\n batch (Any): Batch to be sliced.\n start (int): Start index of the micro batch.\n micro_batch_size (int): Size of the micro batch.\n\n Returns:\n Any: Target micro batch.\n \"\"\"\n\n def _get_tensor_slice(x: Any):\n if isinstance(x, torch.Tensor):\n return x[start:start + micro_batch_size]\n return x\n\n return tree_map(_get_tensor_slice, batch)\n\n\ndef model_forward(model: Module, data: Any, internal_inputs: Optional[dict]) -> Any:\n \"\"\"Call model forward function with data and internal inputs.\n\n Args:\n model (Module): Model to be called.\n data (Any): Data loaded from data iterator.\n internal_inputs (Optional[dict]): Data from previous stage. It must be a dict or None if it's the first stage.\n\n Returns:\n Any: Outputs of the model.\n \"\"\"\n if internal_inputs is None:\n internal_inputs = {}\n if isinstance(data, (list, tuple)):\n return model(*data, **internal_inputs)\n elif isinstance(data, dict):\n return model(**data, **internal_inputs)\n return model(data, **internal_inputs)\n\n\ndef retain_grad(x: Any) -> None:\n \"\"\"Call retain_grad() on a tensor.\n\n Args:\n x (Any): Object to be called.\n \"\"\"\n if isinstance(x, torch.Tensor) and x.requires_grad:\n x.retain_grad()\n\n\ndef detach(x: Any) -> Any:\n \"\"\"Call detach() on a tensor.\n\n Args:\n x (Any): Object to be called.\n\n Returns:\n Any: The detached object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.detach()\n return x\n\n\ndef merge_batch(data: List[Any]) -> Any:\n \"\"\"Merge micro batches into a batch.\n\n Args:\n data (List[Any]): A list of micro batches.\n\n Returns:\n Any: Merge batch.\n \"\"\"\n if len(data) == 0:\n return\n flattened_data = []\n tree_spec = None\n for d in data:\n elems, tree_spec = tree_flatten(d)\n flattened_data.append(elems)\n merged_data = []\n for elem_batch in zip(*flattened_data):\n if isinstance(elem_batch[0], torch.Tensor):\n merged_data.append(torch.cat(elem_batch, dim=0))\n else:\n merged_data.append(list(elem_batch))\n return tree_unflatten(merged_data, tree_spec)\n", "path": "colossalai/pipeline/schedule/_utils.py"}], "after_files": [{"content": "from typing import Any, List, Optional\n\nimport torch\nimport torch.cuda\nfrom torch.nn import Module\nfrom torch.utils._pytree import tree_flatten, tree_map, tree_unflatten\n\n\ndef to_device(x: Any, device: Optional[torch.device] = None) -> Any:\n \"\"\"Move object to device if it is a tensor.\n\n Args:\n x (Any): Object to be moved.\n device (Optional[torch.device], optional): Target device. Defaults to None.\n\n Returns:\n Any: Moved object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.to(device)\n return x\n\n\ndef get_batch_size(batch: Any) -> int:\n \"\"\"Get the batch size (size of dimension-0) of the first tensor in the batch.\n\n Args:\n batch (Any): Batch to be inspected.\n\n Raises:\n RuntimeError: If no tensor is found in the batch.\n\n Returns:\n int: Batch size.\n \"\"\"\n data_list, _ = tree_flatten(batch)\n for data in data_list:\n if isinstance(data, torch.Tensor):\n return data.size(0)\n raise RuntimeError('No tensor found in the batch')\n\n\ndef get_micro_batch(batch: Any, start: int, micro_batch_size: int) -> Any:\n \"\"\"Get a micro batch of the original batch.\n\n Args:\n batch (Any): Batch to be sliced.\n start (int): Start index of the micro batch.\n micro_batch_size (int): Size of the micro batch.\n\n Returns:\n Any: Target micro batch.\n \"\"\"\n\n def _get_tensor_slice(x: Any):\n if isinstance(x, torch.Tensor):\n return x[start:start + micro_batch_size]\n return x\n\n return tree_map(_get_tensor_slice, batch)\n\n\ndef model_forward(model: Module, data: Any, internal_inputs: Optional[dict]) -> Any:\n \"\"\"Call model forward function with data and internal inputs.\n\n Args:\n model (Module): Model to be called.\n data (Any): Data loaded from data iterator.\n internal_inputs (Optional[dict]): Data from previous stage. It must be a dict or None if it's the first stage.\n\n Returns:\n Any: Outputs of the model.\n \"\"\"\n if internal_inputs is None:\n internal_inputs = {}\n if isinstance(data, (list, tuple)):\n return model(*data, **internal_inputs)\n elif isinstance(data, dict):\n return model(**data, **internal_inputs)\n return model(data, **internal_inputs)\n\n\ndef retain_grad(x: Any) -> None:\n \"\"\"Call retain_grad() on a tensor.\n\n Args:\n x (Any): Object to be called.\n \"\"\"\n if isinstance(x, torch.Tensor) and x.requires_grad:\n x.retain_grad()\n\n\ndef detach(x: Any) -> Any:\n \"\"\"Call detach() on a tensor.\n\n Args:\n x (Any): Object to be called.\n\n Returns:\n Any: The detached object.\n \"\"\"\n if isinstance(x, torch.Tensor):\n return x.detach()\n return x\n\n\ndef merge_batch(data: List[Any]) -> Any:\n \"\"\"Merge micro batches into a batch.\n\n Args:\n data (List[Any]): A list of micro batches.\n\n Returns:\n Any: Merge batch.\n \"\"\"\n if len(data) == 0:\n return\n flattened_data = []\n tree_spec = None\n for d in data:\n elems, tree_spec = tree_flatten(d)\n flattened_data.append(elems)\n merged_data = []\n for elem_batch in zip(*flattened_data):\n if isinstance(elem_batch[0], torch.Tensor):\n if len(elem_batch[0].shape) == 0: # set loss to None in pipeline outputs\n merged_data.append(None)\n else:\n merged_data.append(torch.cat(elem_batch, dim=0))\n else:\n merged_data.append(list(elem_batch))\n return tree_unflatten(merged_data, tree_spec)\n", "path": "colossalai/pipeline/schedule/_utils.py"}]} | 1,398 | 186 |
gh_patches_debug_27404 | rasdani/github-patches | git_diff | onnx__onnx-tensorflow-762 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upsample TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'
Hi,
I have a problem with the upsample op when trying to prepare an onnx model converted from keras. Any idea of the solution ? Thx
>
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 56, in prepare
> return cls.onnx_model_to_tensorflow_rep(model, strict)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 76, in onnx_model_to_tensorflow_rep
> return cls._onnx_graph_to_tensorflow_rep(model.graph, opset_import, strict)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 133, in _onnx_graph_to_tensorflow_rep
> onnx_node, tensor_dict, handlers, opset=opset, strict=strict)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\backend.py", line 228, in _onnx_node_to_tensorflow_op
> return handler.handle(node, tensor_dict=tensor_dict, strict=strict)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\handlers\handler.py", line 59, in handle
> return ver_handle(node, **kwargs)
> File "C:\Users\vthibault\anaconda3\envs\tensorflow\lib\site-packages\onnx_tf\handlers\backend\upsample.py", line 33, in version_7
> new_height = np.floor(x_shape[2] * scales[2])
> TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `onnx_tf/handlers/backend/upsample.py`
Content:
```
1 import copy
2
3 import numpy as np
4 import tensorflow as tf
5
6 from onnx_tf.common import exception
7 from onnx_tf.handlers.backend_handler import BackendHandler
8 from onnx_tf.handlers.handler import onnx_op
9 from onnx_tf.handlers.handler import partial_support
10 from onnx_tf.handlers.handler import ps_description
11 from onnx_tf.handlers.handler import tf_func
12 from onnx_tf.common.tf_helper import tf_shape
13
14
15 @onnx_op("Upsample")
16 @tf_func(tf.image.resize)
17 @partial_support(True)
18 @ps_description("Upsample required 4D input in Tensorflow.")
19 class Upsample(BackendHandler):
20
21 @classmethod
22 def args_check(cls, node, **kwargs):
23 x = kwargs["tensor_dict"][node.inputs[0]]
24 x_shape = x.get_shape().as_list()
25 if len(x_shape) != 4:
26 exception.OP_UNSUPPORTED_EXCEPT("Upsample without 4D input", "Tensorflow")
27
28 if node.attrs.get(
29 "mode", "nearest").lower() not in ["nearest", "bilinear", "linear"]:
30 exception.OP_UNSUPPORTED_EXCEPT("Upsample without nearest or bilinear",
31 "Tensorflow")
32
33 @classmethod
34 def version_7(cls, node, **kwargs):
35 x = kwargs["tensor_dict"][node.inputs[0]]
36 x_shape = x.get_shape().as_list()
37 attrs = copy.deepcopy(node.attrs)
38 scales = attrs["scales"]
39 new_height = np.floor(x_shape[2] * scales[2])
40 new_weight = np.floor(x_shape[3] * scales[3])
41
42 mode = attrs.get("mode", "nearest")
43 if mode.lower() == "bilinear" or mode.lower() == "linear":
44 mode = tf.image.ResizeMethod.BILINEAR
45 else:
46 mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
47
48 attrs["size"] = np.array((new_height, new_weight), dtype=np.int32)
49 attrs["method"] = mode
50
51 return [
52 cls.make_tensor_from_onnx_node(
53 node, attrs=attrs, c_last_only=True, **kwargs)
54 ]
55
56 @classmethod
57 def version_9(cls, node, **kwargs):
58 x = kwargs["tensor_dict"][node.inputs[0]]
59 x_shape = tf_shape(x)
60 attrs = copy.deepcopy(node.attrs)
61 scales = kwargs["tensor_dict"][node.inputs[1]]
62
63 assert_n_c_scale_is_one = tf.Assert(
64 tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),
65 [scales])
66
67 with tf.control_dependencies([assert_n_c_scale_is_one]):
68 h_w_scale = scales[2:]
69 h_w_shape = x_shape[2:]
70 new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),
71 tf.int32)
72
73 mode = attrs.get("mode", "nearest")
74 if mode.lower() == "bilinear" or mode.lower() == "linear":
75 mode = tf.image.ResizeMethod.BILINEAR
76 else:
77 mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
78
79 attrs["size"] = new_h_w_shape
80 attrs["method"] = mode
81
82 # Remove scale.
83 upsample_node = copy.deepcopy(node)
84 del upsample_node.inputs[1]
85 return [
86 cls.make_tensor_from_onnx_node(
87 upsample_node, attrs=attrs, c_last_only=True, **kwargs)
88 ]
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/onnx_tf/handlers/backend/upsample.py b/onnx_tf/handlers/backend/upsample.py
--- a/onnx_tf/handlers/backend/upsample.py
+++ b/onnx_tf/handlers/backend/upsample.py
@@ -1,6 +1,5 @@
import copy
-import numpy as np
import tensorflow as tf
from onnx_tf.common import exception
@@ -33,20 +32,28 @@
@classmethod
def version_7(cls, node, **kwargs):
x = kwargs["tensor_dict"][node.inputs[0]]
- x_shape = x.get_shape().as_list()
+ x_shape = tf_shape(x)
attrs = copy.deepcopy(node.attrs)
scales = attrs["scales"]
- new_height = np.floor(x_shape[2] * scales[2])
- new_weight = np.floor(x_shape[3] * scales[3])
- mode = attrs.get("mode", "nearest")
- if mode.lower() == "bilinear" or mode.lower() == "linear":
- mode = tf.image.ResizeMethod.BILINEAR
- else:
- mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
+ assert_n_c_scale_is_one = tf.Assert(
+ tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),
+ [scales])
+
+ with tf.control_dependencies([assert_n_c_scale_is_one]):
+ h_w_scale = scales[2:]
+ h_w_shape = x_shape[2:]
+ new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, type(h_w_scale[0])),
+ tf.int32)
+
+ mode = attrs.get("mode", "nearest")
+ if mode.lower() == "bilinear" or mode.lower() == "linear":
+ mode = tf.image.ResizeMethod.BILINEAR
+ else:
+ mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR
- attrs["size"] = np.array((new_height, new_weight), dtype=np.int32)
- attrs["method"] = mode
+ attrs["size"] = new_h_w_shape
+ attrs["method"] = mode
return [
cls.make_tensor_from_onnx_node(
| {"golden_diff": "diff --git a/onnx_tf/handlers/backend/upsample.py b/onnx_tf/handlers/backend/upsample.py\n--- a/onnx_tf/handlers/backend/upsample.py\n+++ b/onnx_tf/handlers/backend/upsample.py\n@@ -1,6 +1,5 @@\n import copy\n \n-import numpy as np\n import tensorflow as tf\n \n from onnx_tf.common import exception\n@@ -33,20 +32,28 @@\n @classmethod\n def version_7(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n- x_shape = x.get_shape().as_list()\n+ x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = attrs[\"scales\"]\n- new_height = np.floor(x_shape[2] * scales[2])\n- new_weight = np.floor(x_shape[3] * scales[3])\n \n- mode = attrs.get(\"mode\", \"nearest\")\n- if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n- mode = tf.image.ResizeMethod.BILINEAR\n- else:\n- mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n+ assert_n_c_scale_is_one = tf.Assert(\n+ tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n+ [scales])\n+\n+ with tf.control_dependencies([assert_n_c_scale_is_one]):\n+ h_w_scale = scales[2:]\n+ h_w_shape = x_shape[2:]\n+ new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, type(h_w_scale[0])),\n+ tf.int32)\n+\n+ mode = attrs.get(\"mode\", \"nearest\")\n+ if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n+ mode = tf.image.ResizeMethod.BILINEAR\n+ else:\n+ mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n \n- attrs[\"size\"] = np.array((new_height, new_weight), dtype=np.int32)\n- attrs[\"method\"] = mode\n+ attrs[\"size\"] = new_h_w_shape\n+ attrs[\"method\"] = mode\n \n return [\n cls.make_tensor_from_onnx_node(\n", "issue": "Upsample TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'\nHi, \r\nI have a problem with the upsample op when trying to prepare an onnx model converted from keras. Any idea of the solution ? Thx\r\n\r\n> \r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 56, in prepare\r\n> return cls.onnx_model_to_tensorflow_rep(model, strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 76, in onnx_model_to_tensorflow_rep\r\n> return cls._onnx_graph_to_tensorflow_rep(model.graph, opset_import, strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 133, in _onnx_graph_to_tensorflow_rep\r\n> onnx_node, tensor_dict, handlers, opset=opset, strict=strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\backend.py\", line 228, in _onnx_node_to_tensorflow_op\r\n> return handler.handle(node, tensor_dict=tensor_dict, strict=strict)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\handlers\\handler.py\", line 59, in handle\r\n> return ver_handle(node, **kwargs)\r\n> File \"C:\\Users\\vthibault\\anaconda3\\envs\\tensorflow\\lib\\site-packages\\onnx_tf\\handlers\\backend\\upsample.py\", line 33, in version_7\r\n> new_height = np.floor(x_shape[2] * scales[2])\r\n> TypeError: unsupported operand type(s) for *: 'NoneType' and 'float'\n", "before_files": [{"content": "import copy\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom onnx_tf.common import exception\nfrom onnx_tf.handlers.backend_handler import BackendHandler\nfrom onnx_tf.handlers.handler import onnx_op\nfrom onnx_tf.handlers.handler import partial_support\nfrom onnx_tf.handlers.handler import ps_description\nfrom onnx_tf.handlers.handler import tf_func\nfrom onnx_tf.common.tf_helper import tf_shape\n\n\n@onnx_op(\"Upsample\")\n@tf_func(tf.image.resize)\n@partial_support(True)\n@ps_description(\"Upsample required 4D input in Tensorflow.\")\nclass Upsample(BackendHandler):\n\n @classmethod\n def args_check(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = x.get_shape().as_list()\n if len(x_shape) != 4:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without 4D input\", \"Tensorflow\")\n\n if node.attrs.get(\n \"mode\", \"nearest\").lower() not in [\"nearest\", \"bilinear\", \"linear\"]:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without nearest or bilinear\",\n \"Tensorflow\")\n\n @classmethod\n def version_7(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = x.get_shape().as_list()\n attrs = copy.deepcopy(node.attrs)\n scales = attrs[\"scales\"]\n new_height = np.floor(x_shape[2] * scales[2])\n new_weight = np.floor(x_shape[3] * scales[3])\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = np.array((new_height, new_weight), dtype=np.int32)\n attrs[\"method\"] = mode\n\n return [\n cls.make_tensor_from_onnx_node(\n node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n\n @classmethod\n def version_9(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = kwargs[\"tensor_dict\"][node.inputs[1]]\n\n assert_n_c_scale_is_one = tf.Assert(\n tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n [scales])\n\n with tf.control_dependencies([assert_n_c_scale_is_one]):\n h_w_scale = scales[2:]\n h_w_shape = x_shape[2:]\n new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),\n tf.int32)\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = new_h_w_shape\n attrs[\"method\"] = mode\n\n # Remove scale.\n upsample_node = copy.deepcopy(node)\n del upsample_node.inputs[1]\n return [\n cls.make_tensor_from_onnx_node(\n upsample_node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n", "path": "onnx_tf/handlers/backend/upsample.py"}], "after_files": [{"content": "import copy\n\nimport tensorflow as tf\n\nfrom onnx_tf.common import exception\nfrom onnx_tf.handlers.backend_handler import BackendHandler\nfrom onnx_tf.handlers.handler import onnx_op\nfrom onnx_tf.handlers.handler import partial_support\nfrom onnx_tf.handlers.handler import ps_description\nfrom onnx_tf.handlers.handler import tf_func\nfrom onnx_tf.common.tf_helper import tf_shape\n\n\n@onnx_op(\"Upsample\")\n@tf_func(tf.image.resize)\n@partial_support(True)\n@ps_description(\"Upsample required 4D input in Tensorflow.\")\nclass Upsample(BackendHandler):\n\n @classmethod\n def args_check(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = x.get_shape().as_list()\n if len(x_shape) != 4:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without 4D input\", \"Tensorflow\")\n\n if node.attrs.get(\n \"mode\", \"nearest\").lower() not in [\"nearest\", \"bilinear\", \"linear\"]:\n exception.OP_UNSUPPORTED_EXCEPT(\"Upsample without nearest or bilinear\",\n \"Tensorflow\")\n\n @classmethod\n def version_7(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = attrs[\"scales\"]\n\n assert_n_c_scale_is_one = tf.Assert(\n tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n [scales])\n\n with tf.control_dependencies([assert_n_c_scale_is_one]):\n h_w_scale = scales[2:]\n h_w_shape = x_shape[2:]\n new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, type(h_w_scale[0])),\n tf.int32)\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = new_h_w_shape\n attrs[\"method\"] = mode\n\n return [\n cls.make_tensor_from_onnx_node(\n node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n\n @classmethod\n def version_9(cls, node, **kwargs):\n x = kwargs[\"tensor_dict\"][node.inputs[0]]\n x_shape = tf_shape(x)\n attrs = copy.deepcopy(node.attrs)\n scales = kwargs[\"tensor_dict\"][node.inputs[1]]\n\n assert_n_c_scale_is_one = tf.Assert(\n tf.logical_and(tf.equal(scales[0], 1), tf.equal(scales[1], 1)),\n [scales])\n\n with tf.control_dependencies([assert_n_c_scale_is_one]):\n h_w_scale = scales[2:]\n h_w_shape = x_shape[2:]\n new_h_w_shape = tf.cast(h_w_scale * tf.cast(h_w_shape, scales.dtype),\n tf.int32)\n\n mode = attrs.get(\"mode\", \"nearest\")\n if mode.lower() == \"bilinear\" or mode.lower() == \"linear\":\n mode = tf.image.ResizeMethod.BILINEAR\n else:\n mode = tf.image.ResizeMethod.NEAREST_NEIGHBOR\n\n attrs[\"size\"] = new_h_w_shape\n attrs[\"method\"] = mode\n\n # Remove scale.\n upsample_node = copy.deepcopy(node)\n del upsample_node.inputs[1]\n return [\n cls.make_tensor_from_onnx_node(\n upsample_node, attrs=attrs, c_last_only=True, **kwargs)\n ]\n", "path": "onnx_tf/handlers/backend/upsample.py"}]} | 1,639 | 503 |
gh_patches_debug_17503 | rasdani/github-patches | git_diff | voxel51__fiftyone-563 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] numpy.array sample fields trigger server error when serialized
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 20.04
- **FiftyOne installed from (pip or source)**: source
- **FiftyOne version (run `fiftyone --version`)**: v0.5.6 (e86c3570) - does not occur in v0.5.5
- **Python version**: 3.6
### Commands to reproduce
1. Start server with `python fiftyone/server/main.py`
2. Start app with `yarn dev`
3. Run the code below
### Describe the problem
The server fails to serialize the sample (see traceback) and the sample does not display in the app.
### Code to reproduce issue
```python
import fiftyone as fo
import numpy as np
dataset = fo.Dataset()
dataset.add_sample(fo.Sample('/path/to/image', field=np.array([1,2,3])))
session = fo.launch_app(remote=True, dataset=dataset)
```
### Other info / logs
Probably introduced in #543, since that changed JSON encoding. Previously, this field was serialized as:
```
"field": {
"$binary": "eJyb7BfqGxDJyFDGUK2eklqcXKRupaBuk2mhrqOgnpZfVFKUmBefX5SSChJ3S8wpTgWKF2ckFqQC+RrGOpo6CrUKFAAuRgYIYILSzFAaAOdAG2c=",
"$type": "00"
}
```
Server traceback:
```
File "/usr/lib/python3.6/threading.py", line 916, in _bootstrap_inner
self.run()
File "/usr/lib/python3.6/threading.py", line 864, in run
self._target(*self._args, **self._kwargs)
File ".../lib/python3.6/site-packages/socketio/server.py", line 696, in _handle_event_internal
binary=binary))
File ".../lib/python3.6/site-packages/socketio/server.py", line 607, in _send_packet
encoded_packet = pkt.encode()
File ".../lib/python3.6/site-packages/socketio/packet.py", line 71, in encode
encoded_packet += self.json.dumps(data, separators=(',', ':'))
File "/home/alan/code/fiftyone/fiftyone/server/json_util.py", line 47, in dumps
json_util.dumps(*args, **kwargs), parse_constant=lambda c: c
File ".../lib/python3.6/site-packages/bson/json_util.py", line 383, in dumps
return json.dumps(_json_convert(obj, json_options), *args, **kwargs)
File "/usr/lib/python3.6/json/__init__.py", line 238, in dumps
**kw).encode(obj)
File ".../lib/python3.6/site-packages/simplejson/encoder.py", line 275, in encode
chunks = self.iterencode(o, _one_shot=True)
File ".../lib/python3.6/site-packages/simplejson/encoder.py", line 357, in iterencode
return _iterencode(o, 0)
UnicodeDecodeError: 'utf-8' codec can't decode byte 0x9c in position 1: invalid start byte
```
### What areas of FiftyOne does this bug affect?
- [ ] `App`: FiftyOne application issue
- [ ] `Core`: Core `fiftyone` Python library issue
- [x] `Server`: Fiftyone server issue
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `fiftyone/server/json_util.py`
Content:
```
1 """
2 FiftyOne server json utilies.
3
4 | Copyright 2017-2020, Voxel51, Inc.
5 | `voxel51.com <https://voxel51.com/>`_
6 |
7 """
8 from bson import ObjectId, json_util
9 from flask.json import JSONEncoder
10
11 from fiftyone.core.sample import Sample, SampleView
12 from fiftyone.core.stages import ViewStage
13
14
15 class FiftyOneJSONEncoder(JSONEncoder):
16 """JSON encoder for the FiftyOne server.
17
18 Any classes with non-standard serialization methods should
19 be accounted for in the `default()` method.
20 """
21
22 def default(self, o): # pylint: disable=E0202
23 """Returns the serialized representation of the objects
24
25 Args:
26 o: the object
27
28 Returns:
29 str
30 """
31 if isinstance(o, (Sample, SampleView)):
32 return o.to_mongo_dict()
33 if issubclass(type(o), ViewStage):
34 return o._serialize()
35 if isinstance(o, ObjectId):
36 return str(o)
37 if isinstance(o, float):
38 return json_util.dumps(o)
39 return super().default(o)
40
41 @staticmethod
42 def dumps(*args, **kwargs):
43 """Defined for overriding the default SocketIO `json` interface"""
44 kwargs["cls"] = FiftyOneJSONEncoder
45 return json_util.dumps(
46 json_util.loads(
47 json_util.dumps(*args, **kwargs), parse_constant=lambda c: c
48 ),
49 **kwargs
50 )
51
52 @staticmethod
53 def loads(*args, **kwargs):
54 """Defined for overriding the default SocketIO `json` interface"""
55 return json_util.loads(*args, **kwargs)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py
--- a/fiftyone/server/json_util.py
+++ b/fiftyone/server/json_util.py
@@ -10,6 +10,16 @@
from fiftyone.core.sample import Sample, SampleView
from fiftyone.core.stages import ViewStage
+import fiftyone.core.utils as fou
+
+
+def _handle_bytes(o):
+ for k, v in o.items():
+ if isinstance(v, bytes):
+ o[k] = str(fou.deserialize_numpy_array(v).shape)
+ if isinstance(v, dict):
+ o[k] = _handle_bytes(v)
+ return o
class FiftyOneJSONEncoder(JSONEncoder):
@@ -29,7 +39,7 @@
str
"""
if isinstance(o, (Sample, SampleView)):
- return o.to_mongo_dict()
+ return _handle_bytes(o.to_mongo_dict())
if issubclass(type(o), ViewStage):
return o._serialize()
if isinstance(o, ObjectId):
| {"golden_diff": "diff --git a/fiftyone/server/json_util.py b/fiftyone/server/json_util.py\n--- a/fiftyone/server/json_util.py\n+++ b/fiftyone/server/json_util.py\n@@ -10,6 +10,16 @@\n \n from fiftyone.core.sample import Sample, SampleView\n from fiftyone.core.stages import ViewStage\n+import fiftyone.core.utils as fou\n+\n+\n+def _handle_bytes(o):\n+ for k, v in o.items():\n+ if isinstance(v, bytes):\n+ o[k] = str(fou.deserialize_numpy_array(v).shape)\n+ if isinstance(v, dict):\n+ o[k] = _handle_bytes(v)\n+ return o\n \n \n class FiftyOneJSONEncoder(JSONEncoder):\n@@ -29,7 +39,7 @@\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n- return o.to_mongo_dict()\n+ return _handle_bytes(o.to_mongo_dict())\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n", "issue": "[BUG] numpy.array sample fields trigger server error when serialized\n### System information\r\n\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 20.04\r\n- **FiftyOne installed from (pip or source)**: source\r\n- **FiftyOne version (run `fiftyone --version`)**: v0.5.6 (e86c3570) - does not occur in v0.5.5\r\n- **Python version**: 3.6\r\n\r\n### Commands to reproduce\r\n\r\n1. Start server with `python fiftyone/server/main.py`\r\n2. Start app with `yarn dev`\r\n3. Run the code below\r\n\r\n### Describe the problem\r\n\r\nThe server fails to serialize the sample (see traceback) and the sample does not display in the app.\r\n\r\n\r\n### Code to reproduce issue\r\n```python\r\nimport fiftyone as fo\r\nimport numpy as np\r\ndataset = fo.Dataset()\r\ndataset.add_sample(fo.Sample('/path/to/image', field=np.array([1,2,3])))\r\nsession = fo.launch_app(remote=True, dataset=dataset)\r\n```\r\n\r\n### Other info / logs\r\n\r\nProbably introduced in #543, since that changed JSON encoding. Previously, this field was serialized as:\r\n```\r\n \"field\": {\r\n \"$binary\": \"eJyb7BfqGxDJyFDGUK2eklqcXKRupaBuk2mhrqOgnpZfVFKUmBefX5SSChJ3S8wpTgWKF2ckFqQC+RrGOpo6CrUKFAAuRgYIYILSzFAaAOdAG2c=\",\r\n \"$type\": \"00\"\r\n }\r\n```\r\n\r\nServer traceback:\r\n```\r\n File \"/usr/lib/python3.6/threading.py\", line 916, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/lib/python3.6/threading.py\", line 864, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \".../lib/python3.6/site-packages/socketio/server.py\", line 696, in _handle_event_internal\r\n binary=binary))\r\n File \".../lib/python3.6/site-packages/socketio/server.py\", line 607, in _send_packet\r\n encoded_packet = pkt.encode()\r\n File \".../lib/python3.6/site-packages/socketio/packet.py\", line 71, in encode\r\n encoded_packet += self.json.dumps(data, separators=(',', ':'))\r\n File \"/home/alan/code/fiftyone/fiftyone/server/json_util.py\", line 47, in dumps\r\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\r\n File \".../lib/python3.6/site-packages/bson/json_util.py\", line 383, in dumps\r\n return json.dumps(_json_convert(obj, json_options), *args, **kwargs)\r\n File \"/usr/lib/python3.6/json/__init__.py\", line 238, in dumps\r\n **kw).encode(obj)\r\n File \".../lib/python3.6/site-packages/simplejson/encoder.py\", line 275, in encode\r\n chunks = self.iterencode(o, _one_shot=True)\r\n File \".../lib/python3.6/site-packages/simplejson/encoder.py\", line 357, in iterencode\r\n return _iterencode(o, 0)\r\nUnicodeDecodeError: 'utf-8' codec can't decode byte 0x9c in position 1: invalid start byte\r\n```\r\n\r\n### What areas of FiftyOne does this bug affect?\r\n\r\n- [ ] `App`: FiftyOne application issue\r\n- [ ] `Core`: Core `fiftyone` Python library issue\r\n- [x] `Server`: Fiftyone server issue\r\n\n", "before_files": [{"content": "\"\"\"\nFiftyOne server json utilies.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom bson import ObjectId, json_util\nfrom flask.json import JSONEncoder\n\nfrom fiftyone.core.sample import Sample, SampleView\nfrom fiftyone.core.stages import ViewStage\n\n\nclass FiftyOneJSONEncoder(JSONEncoder):\n \"\"\"JSON encoder for the FiftyOne server.\n\n Any classes with non-standard serialization methods should\n be accounted for in the `default()` method.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n \"\"\"Returns the serialized representation of the objects\n\n Args:\n o: the object\n\n Returns:\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n return o.to_mongo_dict()\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, float):\n return json_util.dumps(o)\n return super().default(o)\n\n @staticmethod\n def dumps(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n kwargs[\"cls\"] = FiftyOneJSONEncoder\n return json_util.dumps(\n json_util.loads(\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\n ),\n **kwargs\n )\n\n @staticmethod\n def loads(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n return json_util.loads(*args, **kwargs)\n", "path": "fiftyone/server/json_util.py"}], "after_files": [{"content": "\"\"\"\nFiftyOne server json utilies.\n\n| Copyright 2017-2020, Voxel51, Inc.\n| `voxel51.com <https://voxel51.com/>`_\n|\n\"\"\"\nfrom bson import ObjectId, json_util\nfrom flask.json import JSONEncoder\n\nfrom fiftyone.core.sample import Sample, SampleView\nfrom fiftyone.core.stages import ViewStage\nimport fiftyone.core.utils as fou\n\n\ndef _handle_bytes(o):\n for k, v in o.items():\n if isinstance(v, bytes):\n o[k] = str(fou.deserialize_numpy_array(v).shape)\n if isinstance(v, dict):\n o[k] = _handle_bytes(v)\n return o\n\n\nclass FiftyOneJSONEncoder(JSONEncoder):\n \"\"\"JSON encoder for the FiftyOne server.\n\n Any classes with non-standard serialization methods should\n be accounted for in the `default()` method.\n \"\"\"\n\n def default(self, o): # pylint: disable=E0202\n \"\"\"Returns the serialized representation of the objects\n\n Args:\n o: the object\n\n Returns:\n str\n \"\"\"\n if isinstance(o, (Sample, SampleView)):\n return _handle_bytes(o.to_mongo_dict())\n if issubclass(type(o), ViewStage):\n return o._serialize()\n if isinstance(o, ObjectId):\n return str(o)\n if isinstance(o, float):\n return json_util.dumps(o)\n return super().default(o)\n\n @staticmethod\n def dumps(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n kwargs[\"cls\"] = FiftyOneJSONEncoder\n return json_util.dumps(\n json_util.loads(\n json_util.dumps(*args, **kwargs), parse_constant=lambda c: c\n ),\n **kwargs\n )\n\n @staticmethod\n def loads(*args, **kwargs):\n \"\"\"Defined for overriding the default SocketIO `json` interface\"\"\"\n return json_util.loads(*args, **kwargs)\n", "path": "fiftyone/server/json_util.py"}]} | 1,552 | 232 |
gh_patches_debug_13692 | rasdani/github-patches | git_diff | pyca__cryptography-1992 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
1.0 release
http://semver.org
> Major version zero (0.y.z) is for initial development. Anything may change at any time. The public API should not be considered stable.
Should we bump our version number all the way to 1.0 for the next release?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/__about__.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 __all__ = [
8 "__title__", "__summary__", "__uri__", "__version__", "__author__",
9 "__email__", "__license__", "__copyright__",
10 ]
11
12 __title__ = "cryptography"
13 __summary__ = ("cryptography is a package which provides cryptographic recipes"
14 " and primitives to Python developers.")
15 __uri__ = "https://github.com/pyca/cryptography"
16
17 __version__ = "0.10.dev1"
18
19 __author__ = "The cryptography developers"
20 __email__ = "[email protected]"
21
22 __license__ = "BSD or Apache License, Version 2.0"
23 __copyright__ = "Copyright 2013-2015 {0}".format(__author__)
24
```
Path: `vectors/cryptography_vectors/__about__.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 __all__ = [
8 "__title__", "__summary__", "__uri__", "__version__", "__author__",
9 "__email__", "__license__", "__copyright__",
10 ]
11
12 __title__ = "cryptography_vectors"
13 __summary__ = "Test vectors for the cryptography package."
14
15 __uri__ = "https://github.com/pyca/cryptography"
16
17 __version__ = "0.10.dev1"
18
19 __author__ = "The cryptography developers"
20 __email__ = "[email protected]"
21
22 __license__ = "BSD or Apache License, Version 2.0"
23 __copyright__ = "Copyright 2013-2015 %s" % __author__
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py
--- a/src/cryptography/__about__.py
+++ b/src/cryptography/__about__.py
@@ -14,7 +14,7 @@
" and primitives to Python developers.")
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "0.10.dev1"
+__version__ = "1.0.dev1"
__author__ = "The cryptography developers"
__email__ = "[email protected]"
diff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py
--- a/vectors/cryptography_vectors/__about__.py
+++ b/vectors/cryptography_vectors/__about__.py
@@ -14,7 +14,7 @@
__uri__ = "https://github.com/pyca/cryptography"
-__version__ = "0.10.dev1"
+__version__ = "1.0.dev1"
__author__ = "The cryptography developers"
__email__ = "[email protected]"
| {"golden_diff": "diff --git a/src/cryptography/__about__.py b/src/cryptography/__about__.py\n--- a/src/cryptography/__about__.py\n+++ b/src/cryptography/__about__.py\n@@ -14,7 +14,7 @@\n \" and primitives to Python developers.\")\n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"0.10.dev1\"\n+__version__ = \"1.0.dev1\"\n \n __author__ = \"The cryptography developers\"\n __email__ = \"[email protected]\"\ndiff --git a/vectors/cryptography_vectors/__about__.py b/vectors/cryptography_vectors/__about__.py\n--- a/vectors/cryptography_vectors/__about__.py\n+++ b/vectors/cryptography_vectors/__about__.py\n@@ -14,7 +14,7 @@\n \n __uri__ = \"https://github.com/pyca/cryptography\"\n \n-__version__ = \"0.10.dev1\"\n+__version__ = \"1.0.dev1\"\n \n __author__ = \"The cryptography developers\"\n __email__ = \"[email protected]\"\n", "issue": "1.0 release\nhttp://semver.org\n\n> Major version zero (0.y.z) is for initial development. Anything may change at any time. The public API should not be considered stable.\n\nShould we bump our version number all the way to 1.0 for the next release?\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\")\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.10.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 {0}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"0.10.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography\"\n__summary__ = (\"cryptography is a package which provides cryptographic recipes\"\n \" and primitives to Python developers.\")\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"1.0.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 {0}\".format(__author__)\n", "path": "src/cryptography/__about__.py"}, {"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\n__all__ = [\n \"__title__\", \"__summary__\", \"__uri__\", \"__version__\", \"__author__\",\n \"__email__\", \"__license__\", \"__copyright__\",\n]\n\n__title__ = \"cryptography_vectors\"\n__summary__ = \"Test vectors for the cryptography package.\"\n\n__uri__ = \"https://github.com/pyca/cryptography\"\n\n__version__ = \"1.0.dev1\"\n\n__author__ = \"The cryptography developers\"\n__email__ = \"[email protected]\"\n\n__license__ = \"BSD or Apache License, Version 2.0\"\n__copyright__ = \"Copyright 2013-2015 %s\" % __author__\n", "path": "vectors/cryptography_vectors/__about__.py"}]} | 848 | 245 |
gh_patches_debug_20125 | rasdani/github-patches | git_diff | rucio__rucio-1084 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
remove unused file from common
Motivation
----------
lib/rucio/common/client.py is probably unused and can be removed
Modification
------------
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lib/rucio/common/client.py`
Content:
```
1 # Copyright European Organization for Nuclear Research (CERN)
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # You may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Authors:
8 # - Vincent Garonne, <[email protected]>, 2012
9
10 import httplib
11
12
13 class BaseClient(object):
14
15 """A base client class"""
16
17 DEFAULT_PORT = 80
18
19 OK_RESPONSE_CODES = (
20 httplib.OK,
21 httplib.CREATED,
22 httplib.ACCEPTED,
23 httplib.NO_CONTENT,
24 )
25
26 def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):
27 """
28 Creates a new client to some service.
29
30 :param host: The host where service resides
31 :param port: The port where service resides
32 :param use_ssl: Should we use HTTPS?
33 :param auth_tok: The auth token to pass to the server
34 :param creds: The credentials to pass to the auth plugin
35 """
36 self.host = host
37 self.port = port or self.DEFAULT_PORT
38 self.use_ssl = use_ssl
39 self.auth_tok = auth_tok
40 self.creds = creds or {}
41 self.connection = None
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lib/rucio/common/client.py b/lib/rucio/common/client.py
deleted file mode 100644
--- a/lib/rucio/common/client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-# Copyright European Organization for Nuclear Research (CERN)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# You may not use this file except in compliance with the License.
-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
-#
-# Authors:
-# - Vincent Garonne, <[email protected]>, 2012
-
-import httplib
-
-
-class BaseClient(object):
-
- """A base client class"""
-
- DEFAULT_PORT = 80
-
- OK_RESPONSE_CODES = (
- httplib.OK,
- httplib.CREATED,
- httplib.ACCEPTED,
- httplib.NO_CONTENT,
- )
-
- def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):
- """
- Creates a new client to some service.
-
- :param host: The host where service resides
- :param port: The port where service resides
- :param use_ssl: Should we use HTTPS?
- :param auth_tok: The auth token to pass to the server
- :param creds: The credentials to pass to the auth plugin
- """
- self.host = host
- self.port = port or self.DEFAULT_PORT
- self.use_ssl = use_ssl
- self.auth_tok = auth_tok
- self.creds = creds or {}
- self.connection = None
| {"golden_diff": "diff --git a/lib/rucio/common/client.py b/lib/rucio/common/client.py\ndeleted file mode 100644\n--- a/lib/rucio/common/client.py\n+++ /dev/null\n@@ -1,41 +0,0 @@\n-# Copyright European Organization for Nuclear Research (CERN)\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# You may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Authors:\n-# - Vincent Garonne, <[email protected]>, 2012\n-\n-import httplib\n-\n-\n-class BaseClient(object):\n-\n- \"\"\"A base client class\"\"\"\n-\n- DEFAULT_PORT = 80\n-\n- OK_RESPONSE_CODES = (\n- httplib.OK,\n- httplib.CREATED,\n- httplib.ACCEPTED,\n- httplib.NO_CONTENT,\n- )\n-\n- def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):\n- \"\"\"\n- Creates a new client to some service.\n-\n- :param host: The host where service resides\n- :param port: The port where service resides\n- :param use_ssl: Should we use HTTPS?\n- :param auth_tok: The auth token to pass to the server\n- :param creds: The credentials to pass to the auth plugin\n- \"\"\"\n- self.host = host\n- self.port = port or self.DEFAULT_PORT\n- self.use_ssl = use_ssl\n- self.auth_tok = auth_tok\n- self.creds = creds or {}\n- self.connection = None\n", "issue": "remove unused file from common\nMotivation\r\n----------\r\nlib/rucio/common/client.py is probably unused and can be removed\r\n\r\nModification\r\n------------\r\n\n", "before_files": [{"content": "# Copyright European Organization for Nuclear Research (CERN)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# You may not use this file except in compliance with the License.\n# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0\n#\n# Authors:\n# - Vincent Garonne, <[email protected]>, 2012\n\nimport httplib\n\n\nclass BaseClient(object):\n\n \"\"\"A base client class\"\"\"\n\n DEFAULT_PORT = 80\n\n OK_RESPONSE_CODES = (\n httplib.OK,\n httplib.CREATED,\n httplib.ACCEPTED,\n httplib.NO_CONTENT,\n )\n\n def __init__(self, host, port=None, use_ssl=False, auth_tok=None, creds=None):\n \"\"\"\n Creates a new client to some service.\n\n :param host: The host where service resides\n :param port: The port where service resides\n :param use_ssl: Should we use HTTPS?\n :param auth_tok: The auth token to pass to the server\n :param creds: The credentials to pass to the auth plugin\n \"\"\"\n self.host = host\n self.port = port or self.DEFAULT_PORT\n self.use_ssl = use_ssl\n self.auth_tok = auth_tok\n self.creds = creds or {}\n self.connection = None\n", "path": "lib/rucio/common/client.py"}], "after_files": [{"content": null, "path": "lib/rucio/common/client.py"}]} | 664 | 386 |
gh_patches_debug_22868 | rasdani/github-patches | git_diff | microsoft__Qcodes-1110 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Creation of db files when import qcodes
It's not a bug I guess, but I think its still unwanted.
### Steps to reproduce
Set any directory to the current working directory
Execute a python script/program that just imports the qcodes module
### Expected behaviour
No side-effects of the file system
### Actual behaviour
An experiment.db file is created in the current working directory.
I think it's better if measurement databases are only created when we actually call a function that initialized an experiment.
I use the qcodes module to load my experimental data from a lot of different working directories, like folders where I work on papers, during the analyses, etc...
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/dataset/database.py`
Content:
```
1 # high-level interface to the database
2
3 from qcodes.dataset.sqlite_base import connect as _connect
4 from qcodes.dataset.sqlite_base import init_db as _init_db
5 import qcodes.config
6
7
8 def get_DB_location() -> str:
9 return qcodes.config["core"]["db_location"]
10
11
12 def get_DB_debug() -> bool:
13 return bool(qcodes.config["core"]["db_debug"])
14
15
16 def initialise_database() -> None:
17 """
18 Initialise a database in the location specified by the config object
19 If the database already exists, nothing happens
20
21 Args:
22 config: An instance of the config object
23 """
24 conn = _connect(get_DB_location(), get_DB_debug())
25 # init is actually idempotent so it's safe to always call!
26 _init_db(conn)
27 conn.close()
28 del conn
29
```
Path: `qcodes/__init__.py`
Content:
```
1 """Set up the main qcodes namespace."""
2
3 # flake8: noqa (we don't need the "<...> imported but unused" error)
4
5 # config
6
7 from qcodes.config import Config
8 from qcodes.utils.helpers import add_to_spyder_UMR_excludelist
9
10 # we dont want spyder to reload qcodes as this will overwrite the default station
11 # instrument list and running monitor
12 add_to_spyder_UMR_excludelist('qcodes')
13 config = Config() # type: Config
14
15 from qcodes.version import __version__
16
17 plotlib = config.gui.plotlib
18 if plotlib in {'QT', 'all'}:
19 try:
20 from qcodes.plots.pyqtgraph import QtPlot
21 except Exception:
22 print('pyqtgraph plotting not supported, '
23 'try "from qcodes.plots.pyqtgraph import QtPlot" '
24 'to see the full error')
25
26 if plotlib in {'matplotlib', 'all'}:
27 try:
28 from qcodes.plots.qcmatplotlib import MatPlot
29 except Exception:
30 print('matplotlib plotting not supported, '
31 'try "from qcodes.plots.qcmatplotlib import MatPlot" '
32 'to see the full error')
33
34
35 from qcodes.station import Station
36 from qcodes.loops import Loop, active_loop, active_data_set
37 from qcodes.measure import Measure
38 from qcodes.actions import Task, Wait, BreakIf
39 haswebsockets = True
40 try:
41 import websockets
42 except ImportError:
43 haswebsockets = False
44 if haswebsockets:
45 from qcodes.monitor.monitor import Monitor
46
47 from qcodes.data.data_set import DataSet, new_data, load_data
48 from qcodes.data.location import FormatLocation
49 from qcodes.data.data_array import DataArray
50 from qcodes.data.format import Formatter
51 from qcodes.data.gnuplot_format import GNUPlotFormat
52 from qcodes.data.hdf5_format import HDF5Format
53 from qcodes.data.io import DiskIO
54
55 from qcodes.instrument.base import Instrument
56 from qcodes.instrument.ip import IPInstrument
57 from qcodes.instrument.visa import VisaInstrument
58 from qcodes.instrument.channel import InstrumentChannel, ChannelList
59
60 from qcodes.instrument.function import Function
61 from qcodes.instrument.parameter import (
62 Parameter,
63 ArrayParameter,
64 MultiParameter,
65 StandardParameter,
66 ManualParameter,
67 combine,
68 CombinedParameter)
69 from qcodes.instrument.sweep_values import SweepFixedValues, SweepValues
70
71 from qcodes.utils import validators
72 from qcodes.utils.zmq_helpers import Publisher
73 from qcodes.instrument_drivers.test import test_instruments, test_instrument
74
75 from qcodes.dataset.data_set import new_data_set, load_by_counter, load_by_id
76 from qcodes.dataset.experiment_container import new_experiment, load_experiment, load_experiment_by_name, \
77 load_last_experiment, experiments
78 from qcodes.dataset.sqlite_settings import SQLiteSettings
79 from qcodes.dataset.param_spec import ParamSpec
80 # TODO: do we want this?
81 from qcodes.dataset.sqlite_base import connect as _connect
82 from qcodes.dataset.sqlite_base import init_db as _init_db
83
84 _c = _connect(config["core"]["db_location"], config["core"]["db_debug"])
85 # init is actually idempotent so it's safe to always call!
86 _init_db(_c)
87 _c.close()
88 del _c
89
90 try:
91 get_ipython() # type: ignore # Check if we are in iPython
92 from qcodes.utils.magic import register_magic_class
93 _register_magic = config.core.get('register_magic', False)
94 if _register_magic is not False:
95 register_magic_class(magic_commands=_register_magic)
96 except NameError:
97 pass
98 except RuntimeError as e:
99 print(e)
100
101 # ensure to close all instruments when interpreter is closed
102 import atexit
103 atexit.register(Instrument.close_all)
104
105 def test(**kwargs):
106 """
107 Run QCoDeS tests. This requires the test requirements given
108 in test_requirements.txt to be installed.
109 All arguments are forwarded to pytest.main
110 """
111 try:
112 import pytest
113 except ImportError:
114 print("Need pytest to run tests")
115 return
116 args = ['--pyargs', 'qcodes.tests']
117 retcode = pytest.main(args, **kwargs)
118 return retcode
119
120
121 test.__test__ = False # type: ignore # Don't try to run this method as a test
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qcodes/__init__.py b/qcodes/__init__.py
--- a/qcodes/__init__.py
+++ b/qcodes/__init__.py
@@ -77,15 +77,6 @@
load_last_experiment, experiments
from qcodes.dataset.sqlite_settings import SQLiteSettings
from qcodes.dataset.param_spec import ParamSpec
-# TODO: do we want this?
-from qcodes.dataset.sqlite_base import connect as _connect
-from qcodes.dataset.sqlite_base import init_db as _init_db
-
-_c = _connect(config["core"]["db_location"], config["core"]["db_debug"])
-# init is actually idempotent so it's safe to always call!
-_init_db(_c)
-_c.close()
-del _c
try:
get_ipython() # type: ignore # Check if we are in iPython
diff --git a/qcodes/dataset/database.py b/qcodes/dataset/database.py
--- a/qcodes/dataset/database.py
+++ b/qcodes/dataset/database.py
@@ -1,12 +1,14 @@
# high-level interface to the database
+from os.path import expanduser
+
from qcodes.dataset.sqlite_base import connect as _connect
from qcodes.dataset.sqlite_base import init_db as _init_db
import qcodes.config
def get_DB_location() -> str:
- return qcodes.config["core"]["db_location"]
+ return expanduser(qcodes.config["core"]["db_location"])
def get_DB_debug() -> bool:
| {"golden_diff": "diff --git a/qcodes/__init__.py b/qcodes/__init__.py\n--- a/qcodes/__init__.py\n+++ b/qcodes/__init__.py\n@@ -77,15 +77,6 @@\n load_last_experiment, experiments\n from qcodes.dataset.sqlite_settings import SQLiteSettings\n from qcodes.dataset.param_spec import ParamSpec\n-# TODO: do we want this?\n-from qcodes.dataset.sqlite_base import connect as _connect\n-from qcodes.dataset.sqlite_base import init_db as _init_db\n-\n-_c = _connect(config[\"core\"][\"db_location\"], config[\"core\"][\"db_debug\"])\n-# init is actually idempotent so it's safe to always call!\n-_init_db(_c)\n-_c.close()\n-del _c\n \n try:\n get_ipython() # type: ignore # Check if we are in iPython\ndiff --git a/qcodes/dataset/database.py b/qcodes/dataset/database.py\n--- a/qcodes/dataset/database.py\n+++ b/qcodes/dataset/database.py\n@@ -1,12 +1,14 @@\n # high-level interface to the database\n \n+from os.path import expanduser\n+\n from qcodes.dataset.sqlite_base import connect as _connect\n from qcodes.dataset.sqlite_base import init_db as _init_db\n import qcodes.config\n \n \n def get_DB_location() -> str:\n- return qcodes.config[\"core\"][\"db_location\"]\n+ return expanduser(qcodes.config[\"core\"][\"db_location\"])\n \n \n def get_DB_debug() -> bool:\n", "issue": "Creation of db files when import qcodes\nIt's not a bug I guess, but I think its still unwanted. \r\n\r\n### Steps to reproduce\r\nSet any directory to the current working directory\r\nExecute a python script/program that just imports the qcodes module\r\n\r\n### Expected behaviour\r\nNo side-effects of the file system\r\n\r\n### Actual behaviour\r\nAn experiment.db file is created in the current working directory. \r\n\r\nI think it's better if measurement databases are only created when we actually call a function that initialized an experiment. \r\nI use the qcodes module to load my experimental data from a lot of different working directories, like folders where I work on papers, during the analyses, etc... \n", "before_files": [{"content": "# high-level interface to the database\n\nfrom qcodes.dataset.sqlite_base import connect as _connect\nfrom qcodes.dataset.sqlite_base import init_db as _init_db\nimport qcodes.config\n\n\ndef get_DB_location() -> str:\n return qcodes.config[\"core\"][\"db_location\"]\n\n\ndef get_DB_debug() -> bool:\n return bool(qcodes.config[\"core\"][\"db_debug\"])\n\n\ndef initialise_database() -> None:\n \"\"\"\n Initialise a database in the location specified by the config object\n If the database already exists, nothing happens\n\n Args:\n config: An instance of the config object\n \"\"\"\n conn = _connect(get_DB_location(), get_DB_debug())\n # init is actually idempotent so it's safe to always call!\n _init_db(conn)\n conn.close()\n del conn\n", "path": "qcodes/dataset/database.py"}, {"content": "\"\"\"Set up the main qcodes namespace.\"\"\"\n\n# flake8: noqa (we don't need the \"<...> imported but unused\" error)\n\n# config\n\nfrom qcodes.config import Config\nfrom qcodes.utils.helpers import add_to_spyder_UMR_excludelist\n\n# we dont want spyder to reload qcodes as this will overwrite the default station\n# instrument list and running monitor\nadd_to_spyder_UMR_excludelist('qcodes')\nconfig = Config() # type: Config\n\nfrom qcodes.version import __version__\n\nplotlib = config.gui.plotlib\nif plotlib in {'QT', 'all'}:\n try:\n from qcodes.plots.pyqtgraph import QtPlot\n except Exception:\n print('pyqtgraph plotting not supported, '\n 'try \"from qcodes.plots.pyqtgraph import QtPlot\" '\n 'to see the full error')\n\nif plotlib in {'matplotlib', 'all'}:\n try:\n from qcodes.plots.qcmatplotlib import MatPlot\n except Exception:\n print('matplotlib plotting not supported, '\n 'try \"from qcodes.plots.qcmatplotlib import MatPlot\" '\n 'to see the full error')\n\n\nfrom qcodes.station import Station\nfrom qcodes.loops import Loop, active_loop, active_data_set\nfrom qcodes.measure import Measure\nfrom qcodes.actions import Task, Wait, BreakIf\nhaswebsockets = True\ntry:\n import websockets\nexcept ImportError:\n haswebsockets = False\nif haswebsockets:\n from qcodes.monitor.monitor import Monitor\n\nfrom qcodes.data.data_set import DataSet, new_data, load_data\nfrom qcodes.data.location import FormatLocation\nfrom qcodes.data.data_array import DataArray\nfrom qcodes.data.format import Formatter\nfrom qcodes.data.gnuplot_format import GNUPlotFormat\nfrom qcodes.data.hdf5_format import HDF5Format\nfrom qcodes.data.io import DiskIO\n\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.instrument.ip import IPInstrument\nfrom qcodes.instrument.visa import VisaInstrument\nfrom qcodes.instrument.channel import InstrumentChannel, ChannelList\n\nfrom qcodes.instrument.function import Function\nfrom qcodes.instrument.parameter import (\n Parameter,\n ArrayParameter,\n MultiParameter,\n StandardParameter,\n ManualParameter,\n combine,\n CombinedParameter)\nfrom qcodes.instrument.sweep_values import SweepFixedValues, SweepValues\n\nfrom qcodes.utils import validators\nfrom qcodes.utils.zmq_helpers import Publisher\nfrom qcodes.instrument_drivers.test import test_instruments, test_instrument\n\nfrom qcodes.dataset.data_set import new_data_set, load_by_counter, load_by_id\nfrom qcodes.dataset.experiment_container import new_experiment, load_experiment, load_experiment_by_name, \\\n load_last_experiment, experiments\nfrom qcodes.dataset.sqlite_settings import SQLiteSettings\nfrom qcodes.dataset.param_spec import ParamSpec\n# TODO: do we want this?\nfrom qcodes.dataset.sqlite_base import connect as _connect\nfrom qcodes.dataset.sqlite_base import init_db as _init_db\n\n_c = _connect(config[\"core\"][\"db_location\"], config[\"core\"][\"db_debug\"])\n# init is actually idempotent so it's safe to always call!\n_init_db(_c)\n_c.close()\ndel _c\n\ntry:\n get_ipython() # type: ignore # Check if we are in iPython\n from qcodes.utils.magic import register_magic_class\n _register_magic = config.core.get('register_magic', False)\n if _register_magic is not False:\n register_magic_class(magic_commands=_register_magic)\nexcept NameError:\n pass\nexcept RuntimeError as e:\n print(e)\n\n# ensure to close all instruments when interpreter is closed\nimport atexit\natexit.register(Instrument.close_all)\n\ndef test(**kwargs):\n \"\"\"\n Run QCoDeS tests. This requires the test requirements given\n in test_requirements.txt to be installed.\n All arguments are forwarded to pytest.main\n \"\"\"\n try:\n import pytest\n except ImportError:\n print(\"Need pytest to run tests\")\n return\n args = ['--pyargs', 'qcodes.tests']\n retcode = pytest.main(args, **kwargs)\n return retcode\n\n\ntest.__test__ = False # type: ignore # Don't try to run this method as a test\n", "path": "qcodes/__init__.py"}], "after_files": [{"content": "# high-level interface to the database\n\nfrom os.path import expanduser\n\nfrom qcodes.dataset.sqlite_base import connect as _connect\nfrom qcodes.dataset.sqlite_base import init_db as _init_db\nimport qcodes.config\n\n\ndef get_DB_location() -> str:\n return expanduser(qcodes.config[\"core\"][\"db_location\"])\n\n\ndef get_DB_debug() -> bool:\n return bool(qcodes.config[\"core\"][\"db_debug\"])\n\n\ndef initialise_database() -> None:\n \"\"\"\n Initialise a database in the location specified by the config object\n If the database already exists, nothing happens\n\n Args:\n config: An instance of the config object\n \"\"\"\n conn = _connect(get_DB_location(), get_DB_debug())\n # init is actually idempotent so it's safe to always call!\n _init_db(conn)\n conn.close()\n del conn\n", "path": "qcodes/dataset/database.py"}, {"content": "\"\"\"Set up the main qcodes namespace.\"\"\"\n\n# flake8: noqa (we don't need the \"<...> imported but unused\" error)\n\n# config\n\nfrom qcodes.config import Config\nfrom qcodes.utils.helpers import add_to_spyder_UMR_excludelist\n\n# we dont want spyder to reload qcodes as this will overwrite the default station\n# instrument list and running monitor\nadd_to_spyder_UMR_excludelist('qcodes')\nconfig = Config() # type: Config\n\nfrom qcodes.version import __version__\n\nplotlib = config.gui.plotlib\nif plotlib in {'QT', 'all'}:\n try:\n from qcodes.plots.pyqtgraph import QtPlot\n except Exception:\n print('pyqtgraph plotting not supported, '\n 'try \"from qcodes.plots.pyqtgraph import QtPlot\" '\n 'to see the full error')\n\nif plotlib in {'matplotlib', 'all'}:\n try:\n from qcodes.plots.qcmatplotlib import MatPlot\n except Exception:\n print('matplotlib plotting not supported, '\n 'try \"from qcodes.plots.qcmatplotlib import MatPlot\" '\n 'to see the full error')\n\n\nfrom qcodes.station import Station\nfrom qcodes.loops import Loop, active_loop, active_data_set\nfrom qcodes.measure import Measure\nfrom qcodes.actions import Task, Wait, BreakIf\nhaswebsockets = True\ntry:\n import websockets\nexcept ImportError:\n haswebsockets = False\nif haswebsockets:\n from qcodes.monitor.monitor import Monitor\n\nfrom qcodes.data.data_set import DataSet, new_data, load_data\nfrom qcodes.data.location import FormatLocation\nfrom qcodes.data.data_array import DataArray\nfrom qcodes.data.format import Formatter\nfrom qcodes.data.gnuplot_format import GNUPlotFormat\nfrom qcodes.data.hdf5_format import HDF5Format\nfrom qcodes.data.io import DiskIO\n\nfrom qcodes.instrument.base import Instrument\nfrom qcodes.instrument.ip import IPInstrument\nfrom qcodes.instrument.visa import VisaInstrument\nfrom qcodes.instrument.channel import InstrumentChannel, ChannelList\n\nfrom qcodes.instrument.function import Function\nfrom qcodes.instrument.parameter import (\n Parameter,\n ArrayParameter,\n MultiParameter,\n StandardParameter,\n ManualParameter,\n combine,\n CombinedParameter)\nfrom qcodes.instrument.sweep_values import SweepFixedValues, SweepValues\n\nfrom qcodes.utils import validators\nfrom qcodes.utils.zmq_helpers import Publisher\nfrom qcodes.instrument_drivers.test import test_instruments, test_instrument\n\nfrom qcodes.dataset.data_set import new_data_set, load_by_counter, load_by_id\nfrom qcodes.dataset.experiment_container import new_experiment, load_experiment, load_experiment_by_name, \\\n load_last_experiment, experiments\nfrom qcodes.dataset.sqlite_settings import SQLiteSettings\nfrom qcodes.dataset.param_spec import ParamSpec\n\ntry:\n get_ipython() # type: ignore # Check if we are in iPython\n from qcodes.utils.magic import register_magic_class\n _register_magic = config.core.get('register_magic', False)\n if _register_magic is not False:\n register_magic_class(magic_commands=_register_magic)\nexcept NameError:\n pass\nexcept RuntimeError as e:\n print(e)\n\n# ensure to close all instruments when interpreter is closed\nimport atexit\natexit.register(Instrument.close_all)\n\ndef test(**kwargs):\n \"\"\"\n Run QCoDeS tests. This requires the test requirements given\n in test_requirements.txt to be installed.\n All arguments are forwarded to pytest.main\n \"\"\"\n try:\n import pytest\n except ImportError:\n print(\"Need pytest to run tests\")\n return\n args = ['--pyargs', 'qcodes.tests']\n retcode = pytest.main(args, **kwargs)\n return retcode\n\n\ntest.__test__ = False # type: ignore # Don't try to run this method as a test\n", "path": "qcodes/__init__.py"}]} | 1,828 | 321 |
gh_patches_debug_29675 | rasdani/github-patches | git_diff | opensearch-project__opensearch-build-147 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Script to build each component from manifest and assemble bundle.
This script should read a manifest and output all artifacts ready for upload.
Example.
/build/opensearch-dashboards-min-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz <- min bundle
/build/opensearch-dashboards-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz. <-- full bundle
/build/opensearch-sql-1.0.0.0-rc1.zip
/build/org/opensearch... <- maven artifacts
Input: to the script should be a manifest file location - format defined here #111
Output: all required artifacts are written to ./build
- [x] Clone each component repository defined in the manifest
- [x] Build each component. This includes plugin zips and maven artifacts and place under a new folder with build id. Note: We need to know if the version of a particular component is already published to maven central. If this is the case we do not need to rebuild and include that artifact.
- [ ] Assemble the bundle itself and add to the /build directory. This is dependent on being able to write manifests - #134
To make assembling maven artifacts easier, each repo can be published to maven local and copied from that location into /build. All artifacts will be under org/opensearch.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/bundle-build/lib/component.py`
Content:
```
1 import os
2 import tempfile
3 import subprocess
4 from lib.git import GitRepository
5
6 class Component:
7 def __init__(self, data):
8 self._name = data['name']
9 self._repository = data['repository']
10 self._ref = data['ref']
11
12 def name(self):
13 return self._name
14
15 def repository(self):
16 return self._repository
17
18 def git_repository(self):
19 return self._git_repository
20
21 def ref(self):
22 return self._ref
23
24 def checkout(self):
25 self._git_repository = GitRepository(self.repository(), self.ref())
26
27 # script overridden in this repo
28 def custom_component_script_path(self):
29 dirname = os.path.dirname(os.path.abspath(__file__))
30 return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))
31
32 # script inside the component repo
33 def component_script_path(self):
34 dirname = self.git_repository().dir()
35 return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))
36
37 # default gradle script
38 def default_script_path(self):
39 dirname = os.path.dirname(os.path.abspath(__file__))
40 return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))
41
42 def build_script(self):
43 paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]
44 return next(filter(lambda path: os.path.exists(path), paths), None)
45
46 def build(self, version, arch):
47 build_script = f'{self.build_script()} {version} {arch}'
48 print(f'Running {build_script} ...')
49 self.git_repository().execute(build_script)
50
51 def artifacts_path(self):
52 dirname = self.git_repository().dir()
53 return os.path.realpath(os.path.join(dirname, 'artifacts'))
54
55 def export(self, dest):
56 artifacts_path = self.artifacts_path()
57 if os.path.exists(artifacts_path):
58 print(f'Publishing artifacts from {artifacts_path} into {dest} ...')
59 self.git_repository().execute(f'cp -r "{artifacts_path}/"* "{dest}"')
60 else:
61 print(f'No artifacts found in {artifacts_path}, skipping.')
62
63 def dict(self):
64 return {
65 'name': self.name(),
66 'repository': self.repository(),
67 'ref': self.ref(),
68 'sha': self.git_repository().sha()
69 }
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/bundle-build/lib/component.py b/tools/bundle-build/lib/component.py
--- a/tools/bundle-build/lib/component.py
+++ b/tools/bundle-build/lib/component.py
@@ -21,6 +21,9 @@
def ref(self):
return self._ref
+ def artifacts(self):
+ return self._artifacts
+
def checkout(self):
self._git_repository = GitRepository(self.repository(), self.ref())
@@ -57,13 +60,29 @@
if os.path.exists(artifacts_path):
print(f'Publishing artifacts from {artifacts_path} into {dest} ...')
self.git_repository().execute(f'cp -r "{artifacts_path}/"* "{dest}"')
+ self.set_artifacts()
else:
print(f'No artifacts found in {artifacts_path}, skipping.')
+ def set_artifacts(self):
+ self._artifacts = {key: self.file_paths(key) for key in ["maven", "plugins", "bundle", "libs"] if self.file_paths(key)}
+
+ def file_paths(self, dir_name):
+ artifacts_path = self.artifacts_path()
+ sub_dir = os.path.join(artifacts_path, dir_name)
+ file_paths = []
+ if os.path.exists(sub_dir):
+ for dir, dirs, files in os.walk(sub_dir):
+ for file_name in files:
+ path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)
+ file_paths.append(path)
+ return file_paths
+
def dict(self):
return {
'name': self.name(),
'repository': self.repository(),
'ref': self.ref(),
- 'sha': self.git_repository().sha()
+ 'sha': self.git_repository().sha(),
+ 'artifacts': self.artifacts()
}
| {"golden_diff": "diff --git a/tools/bundle-build/lib/component.py b/tools/bundle-build/lib/component.py\n--- a/tools/bundle-build/lib/component.py\n+++ b/tools/bundle-build/lib/component.py\n@@ -21,6 +21,9 @@\n def ref(self):\n return self._ref\n \n+ def artifacts(self):\n+ return self._artifacts\n+\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n \n@@ -57,13 +60,29 @@\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n+ self.set_artifacts()\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n \n+ def set_artifacts(self):\n+ self._artifacts = {key: self.file_paths(key) for key in [\"maven\", \"plugins\", \"bundle\", \"libs\"] if self.file_paths(key)}\n+\n+ def file_paths(self, dir_name):\n+ artifacts_path = self.artifacts_path()\n+ sub_dir = os.path.join(artifacts_path, dir_name)\n+ file_paths = []\n+ if os.path.exists(sub_dir):\n+ for dir, dirs, files in os.walk(sub_dir):\n+ for file_name in files:\n+ path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)\n+ file_paths.append(path)\n+ return file_paths\n+\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n- 'sha': self.git_repository().sha()\n+ 'sha': self.git_repository().sha(),\n+ 'artifacts': self.artifacts()\n }\n", "issue": "Script to build each component from manifest and assemble bundle.\nThis script should read a manifest and output all artifacts ready for upload.\r\nExample.\r\n/build/opensearch-dashboards-min-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz <- min bundle\r\n/build/opensearch-dashboards-1.0.0-SNAPSHOT-linux-arm64-198.tar.gz. <-- full bundle\r\n/build/opensearch-sql-1.0.0.0-rc1.zip\r\n/build/org/opensearch... <- maven artifacts\r\n\r\nInput: to the script should be a manifest file location - format defined here #111 \r\nOutput: all required artifacts are written to ./build\r\n\r\n- [x] Clone each component repository defined in the manifest\r\n- [x] Build each component. This includes plugin zips and maven artifacts and place under a new folder with build id. Note: We need to know if the version of a particular component is already published to maven central. If this is the case we do not need to rebuild and include that artifact.\r\n- [ ] Assemble the bundle itself and add to the /build directory. This is dependent on being able to write manifests - #134 \r\n\r\nTo make assembling maven artifacts easier, each repo can be published to maven local and copied from that location into /build. All artifacts will be under org/opensearch.\r\n\n", "before_files": [{"content": "import os\nimport tempfile\nimport subprocess\nfrom lib.git import GitRepository\n\nclass Component:\n def __init__(self, data):\n self._name = data['name']\n self._repository = data['repository']\n self._ref = data['ref']\n\n def name(self):\n return self._name\n\n def repository(self):\n return self._repository\n\n def git_repository(self):\n return self._git_repository\n\n def ref(self):\n return self._ref\n\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n\n # script overridden in this repo\n def custom_component_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))\n\n # script inside the component repo\n def component_script_path(self):\n dirname = self.git_repository().dir() \n return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))\n\n # default gradle script\n def default_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))\n\n def build_script(self):\n paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]\n return next(filter(lambda path: os.path.exists(path), paths), None)\n\n def build(self, version, arch):\n build_script = f'{self.build_script()} {version} {arch}' \n print(f'Running {build_script} ...')\n self.git_repository().execute(build_script)\n\n def artifacts_path(self):\n dirname = self.git_repository().dir()\n return os.path.realpath(os.path.join(dirname, 'artifacts'))\n\n def export(self, dest):\n artifacts_path = self.artifacts_path()\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n 'sha': self.git_repository().sha()\n }\n", "path": "tools/bundle-build/lib/component.py"}], "after_files": [{"content": "import os\nimport tempfile\nimport subprocess\nfrom lib.git import GitRepository\n\nclass Component:\n def __init__(self, data):\n self._name = data['name']\n self._repository = data['repository']\n self._ref = data['ref']\n\n def name(self):\n return self._name\n\n def repository(self):\n return self._repository\n\n def git_repository(self):\n return self._git_repository\n\n def ref(self):\n return self._ref\n\n def artifacts(self):\n return self._artifacts\n\n def checkout(self):\n self._git_repository = GitRepository(self.repository(), self.ref())\n\n # script overridden in this repo\n def custom_component_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/components', self.name(), 'build.sh'))\n\n # script inside the component repo\n def component_script_path(self):\n dirname = self.git_repository().dir() \n return os.path.realpath(os.path.join(dirname, 'scripts/build.sh'))\n\n # default gradle script\n def default_script_path(self):\n dirname = os.path.dirname(os.path.abspath(__file__)) \n return os.path.realpath(os.path.join(dirname, '../../../scripts/bundle-build/standard-gradle-build/build.sh'))\n\n def build_script(self):\n paths = [self.component_script_path(), self.custom_component_script_path(), self.default_script_path()]\n return next(filter(lambda path: os.path.exists(path), paths), None)\n\n def build(self, version, arch):\n build_script = f'{self.build_script()} {version} {arch}' \n print(f'Running {build_script} ...')\n self.git_repository().execute(build_script)\n\n def artifacts_path(self):\n dirname = self.git_repository().dir()\n return os.path.realpath(os.path.join(dirname, 'artifacts'))\n\n def export(self, dest):\n artifacts_path = self.artifacts_path()\n if os.path.exists(artifacts_path):\n print(f'Publishing artifacts from {artifacts_path} into {dest} ...')\n self.git_repository().execute(f'cp -r \"{artifacts_path}/\"* \"{dest}\"')\n self.set_artifacts()\n else:\n print(f'No artifacts found in {artifacts_path}, skipping.')\n\n def set_artifacts(self):\n self._artifacts = {key: self.file_paths(key) for key in [\"maven\", \"plugins\", \"bundle\", \"libs\"] if self.file_paths(key)}\n\n def file_paths(self, dir_name):\n artifacts_path = self.artifacts_path()\n sub_dir = os.path.join(artifacts_path, dir_name)\n file_paths = []\n if os.path.exists(sub_dir):\n for dir, dirs, files in os.walk(sub_dir):\n for file_name in files:\n path = os.path.relpath(os.path.join(dir, file_name), artifacts_path)\n file_paths.append(path)\n return file_paths\n\n def dict(self):\n return {\n 'name': self.name(),\n 'repository': self.repository(),\n 'ref': self.ref(),\n 'sha': self.git_repository().sha(),\n 'artifacts': self.artifacts()\n }\n", "path": "tools/bundle-build/lib/component.py"}]} | 1,211 | 403 |
gh_patches_debug_7646 | rasdani/github-patches | git_diff | lnbits__lnbits-194 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LNURLp links give errors on WalletofSatoshi and BlueWallet
Using this LNURLp link: https://lnbits.com/lnurlp/212
BlueWallet Error: "Alert: Bad response from server"
Wallet of Satoshi Error: "Error: Could not complete payment, please try again."
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lnbits/extensions/lnurlp/views_api.py`
Content:
```
1 from quart import g, jsonify, request
2 from http import HTTPStatus
3 from lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore
4
5 from lnbits.core.crud import get_user
6 from lnbits.decorators import api_check_wallet_key, api_validate_post_request
7 from lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis
8
9 from . import lnurlp_ext
10 from .crud import (
11 create_pay_link,
12 get_pay_link,
13 get_pay_links,
14 update_pay_link,
15 delete_pay_link,
16 )
17
18
19 @lnurlp_ext.route("/api/v1/currencies", methods=["GET"])
20 async def api_list_currencies_available():
21 return jsonify(list(currencies.keys()))
22
23
24 @lnurlp_ext.route("/api/v1/links", methods=["GET"])
25 @api_check_wallet_key("invoice")
26 async def api_links():
27 wallet_ids = [g.wallet.id]
28
29 if "all_wallets" in request.args:
30 wallet_ids = (await get_user(g.wallet.user)).wallet_ids
31
32 try:
33 return (
34 jsonify(
35 [
36 {**link._asdict(), **{"lnurl": link.lnurl}}
37 for link in await get_pay_links(wallet_ids)
38 ]
39 ),
40 HTTPStatus.OK,
41 )
42 except LnurlInvalidUrl:
43 return (
44 jsonify(
45 {
46 "message": "LNURLs need to be delivered over a publically accessible `https` domain or Tor."
47 }
48 ),
49 HTTPStatus.UPGRADE_REQUIRED,
50 )
51
52
53 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["GET"])
54 @api_check_wallet_key("invoice")
55 async def api_link_retrieve(link_id):
56 link = await get_pay_link(link_id)
57
58 if not link:
59 return jsonify({"message": "Pay link does not exist."}), HTTPStatus.NOT_FOUND
60
61 if link.wallet != g.wallet.id:
62 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
63
64 return jsonify({**link._asdict(), **{"lnurl": link.lnurl}}), HTTPStatus.OK
65
66
67 @lnurlp_ext.route("/api/v1/links", methods=["POST"])
68 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["PUT"])
69 @api_check_wallet_key("invoice")
70 @api_validate_post_request(
71 schema={
72 "description": {"type": "string", "empty": False, "required": True},
73 "min": {"type": "number", "min": 0.01, "required": True},
74 "max": {"type": "number", "min": 0.01, "required": True},
75 "currency": {"type": "string", "nullable": True, "required": False},
76 "comment_chars": {"type": "integer", "required": True, "min": 0, "max": 800},
77 "webhook_url": {"type": "string", "required": False},
78 "success_text": {"type": "string", "required": False},
79 "success_url": {"type": "string", "required": False},
80 }
81 )
82 async def api_link_create_or_update(link_id=None):
83 if g.data["min"] > g.data["max"]:
84 return jsonify({"message": "Min is greater than max."}), HTTPStatus.BAD_REQUEST
85
86 if g.data.get("currency") == None and (
87 round(g.data["min"]) != g.data["min"] or round(g.data["max"]) != g.data["max"]
88 ):
89 return jsonify({"message": "Must use full satoshis."}), HTTPStatus.BAD_REQUEST
90
91 if link_id:
92 link = await get_pay_link(link_id)
93
94 if not link:
95 return (
96 jsonify({"message": "Pay link does not exist."}),
97 HTTPStatus.NOT_FOUND,
98 )
99
100 if link.wallet != g.wallet.id:
101 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
102
103 link = await update_pay_link(link_id, **g.data)
104 else:
105 link = await create_pay_link(wallet_id=g.wallet.id, **g.data)
106
107 return (
108 jsonify({**link._asdict(), **{"lnurl": link.lnurl}}),
109 HTTPStatus.OK if link_id else HTTPStatus.CREATED,
110 )
111
112
113 @lnurlp_ext.route("/api/v1/links/<link_id>", methods=["DELETE"])
114 @api_check_wallet_key("invoice")
115 async def api_link_delete(link_id):
116 link = await get_pay_link(link_id)
117
118 if not link:
119 return jsonify({"message": "Pay link does not exist."}), HTTPStatus.NOT_FOUND
120
121 if link.wallet != g.wallet.id:
122 return jsonify({"message": "Not your pay link."}), HTTPStatus.FORBIDDEN
123
124 await delete_pay_link(link_id)
125
126 return "", HTTPStatus.NO_CONTENT
127
128
129 @lnurlp_ext.route("/api/v1/rate/<currency>", methods=["GET"])
130 async def api_check_fiat_rate(currency):
131 try:
132 rate = await get_fiat_rate_satoshis(currency)
133 except AssertionError:
134 rate = None
135
136 return jsonify({"rate": rate}), HTTPStatus.OK
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py
--- a/lnbits/extensions/lnurlp/views_api.py
+++ b/lnbits/extensions/lnurlp/views_api.py
@@ -87,6 +87,9 @@
round(g.data["min"]) != g.data["min"] or round(g.data["max"]) != g.data["max"]
):
return jsonify({"message": "Must use full satoshis."}), HTTPStatus.BAD_REQUEST
+
+ if g.data["success_url"][:8] != "https://":
+ return jsonify({"message": "Success URL must be secure https://..."}), HTTPStatus.BAD_REQUEST
if link_id:
link = await get_pay_link(link_id)
| {"golden_diff": "diff --git a/lnbits/extensions/lnurlp/views_api.py b/lnbits/extensions/lnurlp/views_api.py\n--- a/lnbits/extensions/lnurlp/views_api.py\n+++ b/lnbits/extensions/lnurlp/views_api.py\n@@ -87,6 +87,9 @@\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n+ \n+ if g.data[\"success_url\"][:8] != \"https://\":\n+ return jsonify({\"message\": \"Success URL must be secure https://...\"}), HTTPStatus.BAD_REQUEST\n \n if link_id:\n link = await get_pay_link(link_id)\n", "issue": "LNURLp links give errors on WalletofSatoshi and BlueWallet\nUsing this LNURLp link: https://lnbits.com/lnurlp/212\r\n\r\nBlueWallet Error: \"Alert: Bad response from server\"\r\nWallet of Satoshi Error: \"Error: Could not complete payment, please try again.\"\n", "before_files": [{"content": "from quart import g, jsonify, request\nfrom http import HTTPStatus\nfrom lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore\n\nfrom lnbits.core.crud import get_user\nfrom lnbits.decorators import api_check_wallet_key, api_validate_post_request\nfrom lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis\n\nfrom . import lnurlp_ext\nfrom .crud import (\n create_pay_link,\n get_pay_link,\n get_pay_links,\n update_pay_link,\n delete_pay_link,\n)\n\n\n@lnurlp_ext.route(\"/api/v1/currencies\", methods=[\"GET\"])\nasync def api_list_currencies_available():\n return jsonify(list(currencies.keys()))\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_links():\n wallet_ids = [g.wallet.id]\n\n if \"all_wallets\" in request.args:\n wallet_ids = (await get_user(g.wallet.user)).wallet_ids\n\n try:\n return (\n jsonify(\n [\n {**link._asdict(), **{\"lnurl\": link.lnurl}}\n for link in await get_pay_links(wallet_ids)\n ]\n ),\n HTTPStatus.OK,\n )\n except LnurlInvalidUrl:\n return (\n jsonify(\n {\n \"message\": \"LNURLs need to be delivered over a publically accessible `https` domain or Tor.\"\n }\n ),\n HTTPStatus.UPGRADE_REQUIRED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_retrieve(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n return jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}), HTTPStatus.OK\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"POST\"])\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"PUT\"])\n@api_check_wallet_key(\"invoice\")\n@api_validate_post_request(\n schema={\n \"description\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"min\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"max\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"currency\": {\"type\": \"string\", \"nullable\": True, \"required\": False},\n \"comment_chars\": {\"type\": \"integer\", \"required\": True, \"min\": 0, \"max\": 800},\n \"webhook_url\": {\"type\": \"string\", \"required\": False},\n \"success_text\": {\"type\": \"string\", \"required\": False},\n \"success_url\": {\"type\": \"string\", \"required\": False},\n }\n)\nasync def api_link_create_or_update(link_id=None):\n if g.data[\"min\"] > g.data[\"max\"]:\n return jsonify({\"message\": \"Min is greater than max.\"}), HTTPStatus.BAD_REQUEST\n\n if g.data.get(\"currency\") == None and (\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n\n if link_id:\n link = await get_pay_link(link_id)\n\n if not link:\n return (\n jsonify({\"message\": \"Pay link does not exist.\"}),\n HTTPStatus.NOT_FOUND,\n )\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n link = await update_pay_link(link_id, **g.data)\n else:\n link = await create_pay_link(wallet_id=g.wallet.id, **g.data)\n\n return (\n jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}),\n HTTPStatus.OK if link_id else HTTPStatus.CREATED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"DELETE\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_delete(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n await delete_pay_link(link_id)\n\n return \"\", HTTPStatus.NO_CONTENT\n\n\n@lnurlp_ext.route(\"/api/v1/rate/<currency>\", methods=[\"GET\"])\nasync def api_check_fiat_rate(currency):\n try:\n rate = await get_fiat_rate_satoshis(currency)\n except AssertionError:\n rate = None\n\n return jsonify({\"rate\": rate}), HTTPStatus.OK\n", "path": "lnbits/extensions/lnurlp/views_api.py"}], "after_files": [{"content": "from quart import g, jsonify, request\nfrom http import HTTPStatus\nfrom lnurl.exceptions import InvalidUrl as LnurlInvalidUrl # type: ignore\n\nfrom lnbits.core.crud import get_user\nfrom lnbits.decorators import api_check_wallet_key, api_validate_post_request\nfrom lnbits.utils.exchange_rates import currencies, get_fiat_rate_satoshis\n\nfrom . import lnurlp_ext\nfrom .crud import (\n create_pay_link,\n get_pay_link,\n get_pay_links,\n update_pay_link,\n delete_pay_link,\n)\n\n\n@lnurlp_ext.route(\"/api/v1/currencies\", methods=[\"GET\"])\nasync def api_list_currencies_available():\n return jsonify(list(currencies.keys()))\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_links():\n wallet_ids = [g.wallet.id]\n\n if \"all_wallets\" in request.args:\n wallet_ids = (await get_user(g.wallet.user)).wallet_ids\n\n try:\n return (\n jsonify(\n [\n {**link._asdict(), **{\"lnurl\": link.lnurl}}\n for link in await get_pay_links(wallet_ids)\n ]\n ),\n HTTPStatus.OK,\n )\n except LnurlInvalidUrl:\n return (\n jsonify(\n {\n \"message\": \"LNURLs need to be delivered over a publically accessible `https` domain or Tor.\"\n }\n ),\n HTTPStatus.UPGRADE_REQUIRED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"GET\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_retrieve(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n return jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}), HTTPStatus.OK\n\n\n@lnurlp_ext.route(\"/api/v1/links\", methods=[\"POST\"])\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"PUT\"])\n@api_check_wallet_key(\"invoice\")\n@api_validate_post_request(\n schema={\n \"description\": {\"type\": \"string\", \"empty\": False, \"required\": True},\n \"min\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"max\": {\"type\": \"number\", \"min\": 0.01, \"required\": True},\n \"currency\": {\"type\": \"string\", \"nullable\": True, \"required\": False},\n \"comment_chars\": {\"type\": \"integer\", \"required\": True, \"min\": 0, \"max\": 800},\n \"webhook_url\": {\"type\": \"string\", \"required\": False},\n \"success_text\": {\"type\": \"string\", \"required\": False},\n \"success_url\": {\"type\": \"string\", \"required\": False},\n }\n)\nasync def api_link_create_or_update(link_id=None):\n if g.data[\"min\"] > g.data[\"max\"]:\n return jsonify({\"message\": \"Min is greater than max.\"}), HTTPStatus.BAD_REQUEST\n\n if g.data.get(\"currency\") == None and (\n round(g.data[\"min\"]) != g.data[\"min\"] or round(g.data[\"max\"]) != g.data[\"max\"]\n ):\n return jsonify({\"message\": \"Must use full satoshis.\"}), HTTPStatus.BAD_REQUEST\n \n if g.data[\"success_url\"][:8] != \"https://\":\n return jsonify({\"message\": \"Success URL must be secure https://...\"}), HTTPStatus.BAD_REQUEST\n\n if link_id:\n link = await get_pay_link(link_id)\n\n if not link:\n return (\n jsonify({\"message\": \"Pay link does not exist.\"}),\n HTTPStatus.NOT_FOUND,\n )\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n link = await update_pay_link(link_id, **g.data)\n else:\n link = await create_pay_link(wallet_id=g.wallet.id, **g.data)\n\n return (\n jsonify({**link._asdict(), **{\"lnurl\": link.lnurl}}),\n HTTPStatus.OK if link_id else HTTPStatus.CREATED,\n )\n\n\n@lnurlp_ext.route(\"/api/v1/links/<link_id>\", methods=[\"DELETE\"])\n@api_check_wallet_key(\"invoice\")\nasync def api_link_delete(link_id):\n link = await get_pay_link(link_id)\n\n if not link:\n return jsonify({\"message\": \"Pay link does not exist.\"}), HTTPStatus.NOT_FOUND\n\n if link.wallet != g.wallet.id:\n return jsonify({\"message\": \"Not your pay link.\"}), HTTPStatus.FORBIDDEN\n\n await delete_pay_link(link_id)\n\n return \"\", HTTPStatus.NO_CONTENT\n\n\n@lnurlp_ext.route(\"/api/v1/rate/<currency>\", methods=[\"GET\"])\nasync def api_check_fiat_rate(currency):\n try:\n rate = await get_fiat_rate_satoshis(currency)\n except AssertionError:\n rate = None\n\n return jsonify({\"rate\": rate}), HTTPStatus.OK\n", "path": "lnbits/extensions/lnurlp/views_api.py"}]} | 1,753 | 173 |
gh_patches_debug_20567 | rasdani/github-patches | git_diff | pantsbuild__pants-13467 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pants package does not build missing docker images if previous build was cached.
**Describe the bug**
Pant's caching of build targets does not take into consideration that the final target does not exist.
Take this example: https://www.pantsbuild.org/v2.8/docs/docker#example
```
$ ./pants package src/docker/hw/Dockerfile
[...]
18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex
18:07:31.83 [INFO] Completed: Building docker image helloworld:latest
18:07:31.83 [INFO] Built docker image: helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
helloworld latest abcdefabcdef 6 seconds ago 420MB
$ docker rmi helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
$ ./pants package src/docker/hw/Dockerfile
19:07:31.83 [INFO] Built docker image: helloworld:latest
$ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
```
If you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.
**Pants version**
2.8rc1
**OS**
Linux
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/backend/docker/util_rules/docker_binary.py`
Content:
```
1 # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 from __future__ import annotations
5
6 from dataclasses import dataclass
7 from typing import Mapping
8
9 from pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs
10 from pants.engine.fs import Digest
11 from pants.engine.process import (
12 BinaryNotFoundError,
13 BinaryPath,
14 BinaryPathRequest,
15 BinaryPaths,
16 BinaryPathTest,
17 Process,
18 SearchPath,
19 )
20 from pants.engine.rules import Get, collect_rules, rule
21 from pants.util.logging import LogLevel
22 from pants.util.strutil import pluralize
23
24
25 class DockerBinary(BinaryPath):
26 """The `docker` binary."""
27
28 DEFAULT_SEARCH_PATH = SearchPath(("/usr/bin", "/bin", "/usr/local/bin"))
29
30 def build_image(
31 self,
32 tags: tuple[str, ...],
33 digest: Digest,
34 dockerfile: str | None = None,
35 build_args: DockerBuildArgs | None = None,
36 env: Mapping[str, str] | None = None,
37 ) -> Process:
38 args = [self.path, "build"]
39
40 for tag in tags:
41 args.extend(["-t", tag])
42
43 if build_args:
44 for build_arg in build_args:
45 args.extend(["--build-arg", build_arg])
46
47 if dockerfile:
48 args.extend(["-f", dockerfile])
49
50 # Add build context root.
51 args.append(".")
52
53 return Process(
54 argv=tuple(args),
55 description=(
56 f"Building docker image {tags[0]}"
57 + (f" +{pluralize(len(tags)-1, 'additional tag')}." if len(tags) > 1 else ".")
58 ),
59 env=env,
60 input_digest=digest,
61 )
62
63 def push_image(self, tags: tuple[str, ...]) -> Process | None:
64 if not tags:
65 return None
66
67 return Process(
68 argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}"
69 )
70
71
72 @dataclass(frozen=True)
73 class DockerBinaryRequest:
74 search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH
75
76
77 @rule(desc="Finding the `docker` binary", level=LogLevel.DEBUG)
78 async def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:
79 request = BinaryPathRequest(
80 binary_name="docker",
81 search_path=docker_request.search_path,
82 test=BinaryPathTest(args=["-v"]),
83 )
84 paths = await Get(BinaryPaths, BinaryPathRequest, request)
85 first_path = paths.first_path
86 if not first_path:
87 raise BinaryNotFoundError.from_request(request, rationale="interact with the docker daemon")
88 return DockerBinary(first_path.path, first_path.fingerprint)
89
90
91 @rule
92 async def get_docker() -> DockerBinary:
93 return await Get(DockerBinary, DockerBinaryRequest())
94
95
96 def rules():
97 return collect_rules()
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py
--- a/src/python/pants/backend/docker/util_rules/docker_binary.py
+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py
@@ -15,6 +15,7 @@
BinaryPaths,
BinaryPathTest,
Process,
+ ProcessCacheScope,
SearchPath,
)
from pants.engine.rules import Get, collect_rules, rule
@@ -58,6 +59,7 @@
),
env=env,
input_digest=digest,
+ cache_scope=ProcessCacheScope.PER_SESSION,
)
def push_image(self, tags: tuple[str, ...]) -> Process | None:
@@ -65,7 +67,9 @@
return None
return Process(
- argv=(self.path, "push", *tags), description="Pushing docker image {tags[0]}"
+ argv=(self.path, "push", *tags),
+ cache_scope=ProcessCacheScope.PER_SESSION,
+ description=f"Pushing docker image {tags[0]}",
)
| {"golden_diff": "diff --git a/src/python/pants/backend/docker/util_rules/docker_binary.py b/src/python/pants/backend/docker/util_rules/docker_binary.py\n--- a/src/python/pants/backend/docker/util_rules/docker_binary.py\n+++ b/src/python/pants/backend/docker/util_rules/docker_binary.py\n@@ -15,6 +15,7 @@\n BinaryPaths,\n BinaryPathTest,\n Process,\n+ ProcessCacheScope,\n SearchPath,\n )\n from pants.engine.rules import Get, collect_rules, rule\n@@ -58,6 +59,7 @@\n ),\n env=env,\n input_digest=digest,\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n )\n \n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n@@ -65,7 +67,9 @@\n return None\n \n return Process(\n- argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n+ argv=(self.path, \"push\", *tags),\n+ cache_scope=ProcessCacheScope.PER_SESSION,\n+ description=f\"Pushing docker image {tags[0]}\",\n )\n", "issue": "pants package does not build missing docker images if previous build was cached.\n**Describe the bug**\r\nPant's caching of build targets does not take into consideration that the final target does not exist.\r\n\r\nTake this example: https://www.pantsbuild.org/v2.8/docs/docker#example\r\n\r\n```\r\n$ ./pants package src/docker/hw/Dockerfile\r\n[...]\r\n18:07:29.66 [INFO] Completed: Building src.python.hw/bin.pex\r\n18:07:31.83 [INFO] Completed: Building docker image helloworld:latest\r\n18:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\nhelloworld latest abcdefabcdef 6 seconds ago 420MB\r\n\r\n$ docker rmi helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n\r\n$ ./pants package src/docker/hw/Dockerfile\r\n19:07:31.83 [INFO] Built docker image: helloworld:latest\r\n\r\n$ docker images\r\nREPOSITORY TAG IMAGE ID CREATED SIZE\r\n```\r\nIf you did the equivalent commands for the `helloworld.pex` files, `pants package` would replace the missing file in the `dist/` folder.\r\n\r\n**Pants version**\r\n2.8rc1\r\n\r\n**OS**\r\nLinux\r\n\n", "before_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags), description=\"Pushing docker image {tags[0]}\"\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/backend/docker/util_rules/docker_binary.py"}], "after_files": [{"content": "# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\nfrom typing import Mapping\n\nfrom pants.backend.docker.util_rules.docker_build_args import DockerBuildArgs\nfrom pants.engine.fs import Digest\nfrom pants.engine.process import (\n BinaryNotFoundError,\n BinaryPath,\n BinaryPathRequest,\n BinaryPaths,\n BinaryPathTest,\n Process,\n ProcessCacheScope,\n SearchPath,\n)\nfrom pants.engine.rules import Get, collect_rules, rule\nfrom pants.util.logging import LogLevel\nfrom pants.util.strutil import pluralize\n\n\nclass DockerBinary(BinaryPath):\n \"\"\"The `docker` binary.\"\"\"\n\n DEFAULT_SEARCH_PATH = SearchPath((\"/usr/bin\", \"/bin\", \"/usr/local/bin\"))\n\n def build_image(\n self,\n tags: tuple[str, ...],\n digest: Digest,\n dockerfile: str | None = None,\n build_args: DockerBuildArgs | None = None,\n env: Mapping[str, str] | None = None,\n ) -> Process:\n args = [self.path, \"build\"]\n\n for tag in tags:\n args.extend([\"-t\", tag])\n\n if build_args:\n for build_arg in build_args:\n args.extend([\"--build-arg\", build_arg])\n\n if dockerfile:\n args.extend([\"-f\", dockerfile])\n\n # Add build context root.\n args.append(\".\")\n\n return Process(\n argv=tuple(args),\n description=(\n f\"Building docker image {tags[0]}\"\n + (f\" +{pluralize(len(tags)-1, 'additional tag')}.\" if len(tags) > 1 else \".\")\n ),\n env=env,\n input_digest=digest,\n cache_scope=ProcessCacheScope.PER_SESSION,\n )\n\n def push_image(self, tags: tuple[str, ...]) -> Process | None:\n if not tags:\n return None\n\n return Process(\n argv=(self.path, \"push\", *tags),\n cache_scope=ProcessCacheScope.PER_SESSION,\n description=f\"Pushing docker image {tags[0]}\",\n )\n\n\n@dataclass(frozen=True)\nclass DockerBinaryRequest:\n search_path: SearchPath = DockerBinary.DEFAULT_SEARCH_PATH\n\n\n@rule(desc=\"Finding the `docker` binary\", level=LogLevel.DEBUG)\nasync def find_docker(docker_request: DockerBinaryRequest) -> DockerBinary:\n request = BinaryPathRequest(\n binary_name=\"docker\",\n search_path=docker_request.search_path,\n test=BinaryPathTest(args=[\"-v\"]),\n )\n paths = await Get(BinaryPaths, BinaryPathRequest, request)\n first_path = paths.first_path\n if not first_path:\n raise BinaryNotFoundError.from_request(request, rationale=\"interact with the docker daemon\")\n return DockerBinary(first_path.path, first_path.fingerprint)\n\n\n@rule\nasync def get_docker() -> DockerBinary:\n return await Get(DockerBinary, DockerBinaryRequest())\n\n\ndef rules():\n return collect_rules()\n", "path": "src/python/pants/backend/docker/util_rules/docker_binary.py"}]} | 1,400 | 245 |
gh_patches_debug_23656 | rasdani/github-patches | git_diff | OpenMined__PySyft-4991 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Does the framework support IPv6 networks?
Is this framework suitable for IPv6 network environment?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/syft/grid/example_nodes/network.py`
Content:
```
1 """
2 The purpose of this application is to allow us to dev and test PySyft
3 functionality on an actual local network. This is NOT meant to be run in
4 production (that's the *actual* grid's job).
5
6 For example:
7 $ python src/syft/grid/example_nodes/network.py
8
9 """
10 # stdlib
11 import os
12
13 # third party
14 import flask
15 from flask import Flask
16 from flask import Response
17 from nacl.encoding import HexEncoder
18
19 # syft absolute
20 from syft.core.common.message import SignedImmediateSyftMessageWithReply
21 from syft.core.common.message import SignedImmediateSyftMessageWithoutReply
22 from syft.core.common.serde.deserialize import _deserialize
23 from syft.core.node.network.network import Network
24 from syft.grid.services.signaling_service import PullSignalingService
25 from syft.grid.services.signaling_service import PushSignalingService
26 from syft.grid.services.signaling_service import RegisterDuetPeerService
27
28 app = Flask(__name__)
29
30 network = Network(name="om-net")
31
32 network.immediate_services_without_reply.append(PushSignalingService)
33 network.immediate_services_with_reply.append(PullSignalingService)
34 network.immediate_services_with_reply.append(RegisterDuetPeerService)
35 network._register_services() # re-register all services including SignalingService
36
37
38 @app.route("/metadata")
39 def get_metadata() -> flask.Response:
40 metadata = network.get_metadata_for_client()
41 metadata_proto = metadata.serialize()
42 r = Response(
43 response=metadata_proto.SerializeToString(),
44 status=200,
45 )
46 r.headers["Content-Type"] = "application/octet-stream"
47 return r
48
49
50 @app.route("/", methods=["POST"])
51 def process_network_msgs() -> flask.Response:
52 data = flask.request.get_data()
53 obj_msg = _deserialize(blob=data, from_bytes=True)
54 if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):
55 print(
56 f"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch"
57 )
58 reply = network.recv_immediate_msg_with_reply(msg=obj_msg)
59 r = Response(response=reply.serialize(to_bytes=True), status=200)
60 r.headers["Content-Type"] = "application/octet-stream"
61 return r
62 elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):
63 print(
64 f"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch"
65 )
66 network.recv_immediate_msg_without_reply(msg=obj_msg)
67 r = Response(status=200)
68 return r
69 else:
70 print(
71 f"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch"
72 )
73 network.recv_eventual_msg_without_reply(msg=obj_msg)
74 r = Response(status=200)
75 return r
76
77
78 def run() -> None:
79 global network
80 print("====================================")
81 print("========== NODE ROOT KEY ===========")
82 print("====================================")
83 # this signing_key is to aid in local development and is not used in the real
84 # PyGrid implementation
85 PORT = os.getenv("PORT", 5000)
86 print(f"Starting Node on PORT: {PORT}")
87 print(network.signing_key.encode(encoder=HexEncoder).decode("utf-8"), "\n")
88 app.run(host="0.0.0.0", port=int(PORT)) # nosec
89
90
91 run()
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/syft/grid/example_nodes/network.py b/src/syft/grid/example_nodes/network.py
--- a/src/syft/grid/example_nodes/network.py
+++ b/src/syft/grid/example_nodes/network.py
@@ -9,6 +9,7 @@
"""
# stdlib
import os
+import sys
# third party
import flask
@@ -77,15 +78,25 @@
def run() -> None:
global network
- print("====================================")
- print("========== NODE ROOT KEY ===========")
- print("====================================")
+
+ IP_MODE = os.getenv("IP_MODE", "IPV4") # default to ipv4
+ if len(sys.argv) > 1:
+ IP_MODE = sys.argv[1]
+
+ IP_MODE = "IPV6" if IP_MODE == "IPV6" else "IPV4"
# this signing_key is to aid in local development and is not used in the real
# PyGrid implementation
+ HOST = "0.0.0.0" if IP_MODE == "IPV4" else "::" # nosec
PORT = os.getenv("PORT", 5000)
- print(f"Starting Node on PORT: {PORT}")
+
+ print("====================================")
+ print("========== NODE ROOT KEY ===========")
+ print("====================================")
print(network.signing_key.encode(encoder=HexEncoder).decode("utf-8"), "\n")
- app.run(host="0.0.0.0", port=int(PORT)) # nosec
+
+ print(f"Using {IP_MODE} and listening on port {PORT}")
+
+ app.run(host=HOST, port=int(PORT))
run()
| {"golden_diff": "diff --git a/src/syft/grid/example_nodes/network.py b/src/syft/grid/example_nodes/network.py\n--- a/src/syft/grid/example_nodes/network.py\n+++ b/src/syft/grid/example_nodes/network.py\n@@ -9,6 +9,7 @@\n \"\"\"\n # stdlib\n import os\n+import sys\n \n # third party\n import flask\n@@ -77,15 +78,25 @@\n \n def run() -> None:\n global network\n- print(\"====================================\")\n- print(\"========== NODE ROOT KEY ===========\")\n- print(\"====================================\")\n+\n+ IP_MODE = os.getenv(\"IP_MODE\", \"IPV4\") # default to ipv4\n+ if len(sys.argv) > 1:\n+ IP_MODE = sys.argv[1]\n+\n+ IP_MODE = \"IPV6\" if IP_MODE == \"IPV6\" else \"IPV4\"\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n+ HOST = \"0.0.0.0\" if IP_MODE == \"IPV4\" else \"::\" # nosec\n PORT = os.getenv(\"PORT\", 5000)\n- print(f\"Starting Node on PORT: {PORT}\")\n+\n+ print(\"====================================\")\n+ print(\"========== NODE ROOT KEY ===========\")\n+ print(\"====================================\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n- app.run(host=\"0.0.0.0\", port=int(PORT)) # nosec\n+\n+ print(f\"Using {IP_MODE} and listening on port {PORT}\")\n+\n+ app.run(host=HOST, port=int(PORT))\n \n \n run()\n", "issue": "Does the framework support IPv6 networks?\nIs this framework suitable for IPv6 network environment? \n", "before_files": [{"content": "\"\"\"\nThe purpose of this application is to allow us to dev and test PySyft\nfunctionality on an actual local network. This is NOT meant to be run in\nproduction (that's the *actual* grid's job).\n\nFor example:\n$ python src/syft/grid/example_nodes/network.py\n\n\"\"\"\n# stdlib\nimport os\n\n# third party\nimport flask\nfrom flask import Flask\nfrom flask import Response\nfrom nacl.encoding import HexEncoder\n\n# syft absolute\nfrom syft.core.common.message import SignedImmediateSyftMessageWithReply\nfrom syft.core.common.message import SignedImmediateSyftMessageWithoutReply\nfrom syft.core.common.serde.deserialize import _deserialize\nfrom syft.core.node.network.network import Network\nfrom syft.grid.services.signaling_service import PullSignalingService\nfrom syft.grid.services.signaling_service import PushSignalingService\nfrom syft.grid.services.signaling_service import RegisterDuetPeerService\n\napp = Flask(__name__)\n\nnetwork = Network(name=\"om-net\")\n\nnetwork.immediate_services_without_reply.append(PushSignalingService)\nnetwork.immediate_services_with_reply.append(PullSignalingService)\nnetwork.immediate_services_with_reply.append(RegisterDuetPeerService)\nnetwork._register_services() # re-register all services including SignalingService\n\n\[email protected](\"/metadata\")\ndef get_metadata() -> flask.Response:\n metadata = network.get_metadata_for_client()\n metadata_proto = metadata.serialize()\n r = Response(\n response=metadata_proto.SerializeToString(),\n status=200,\n )\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n\n\[email protected](\"/\", methods=[\"POST\"])\ndef process_network_msgs() -> flask.Response:\n data = flask.request.get_data()\n obj_msg = _deserialize(blob=data, from_bytes=True)\n if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch\"\n )\n reply = network.recv_immediate_msg_with_reply(msg=obj_msg)\n r = Response(response=reply.serialize(to_bytes=True), status=200)\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_immediate_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n else:\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_eventual_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n\n\ndef run() -> None:\n global network\n print(\"====================================\")\n print(\"========== NODE ROOT KEY ===========\")\n print(\"====================================\")\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n PORT = os.getenv(\"PORT\", 5000)\n print(f\"Starting Node on PORT: {PORT}\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n app.run(host=\"0.0.0.0\", port=int(PORT)) # nosec\n\n\nrun()\n", "path": "src/syft/grid/example_nodes/network.py"}], "after_files": [{"content": "\"\"\"\nThe purpose of this application is to allow us to dev and test PySyft\nfunctionality on an actual local network. This is NOT meant to be run in\nproduction (that's the *actual* grid's job).\n\nFor example:\n$ python src/syft/grid/example_nodes/network.py\n\n\"\"\"\n# stdlib\nimport os\nimport sys\n\n# third party\nimport flask\nfrom flask import Flask\nfrom flask import Response\nfrom nacl.encoding import HexEncoder\n\n# syft absolute\nfrom syft.core.common.message import SignedImmediateSyftMessageWithReply\nfrom syft.core.common.message import SignedImmediateSyftMessageWithoutReply\nfrom syft.core.common.serde.deserialize import _deserialize\nfrom syft.core.node.network.network import Network\nfrom syft.grid.services.signaling_service import PullSignalingService\nfrom syft.grid.services.signaling_service import PushSignalingService\nfrom syft.grid.services.signaling_service import RegisterDuetPeerService\n\napp = Flask(__name__)\n\nnetwork = Network(name=\"om-net\")\n\nnetwork.immediate_services_without_reply.append(PushSignalingService)\nnetwork.immediate_services_with_reply.append(PullSignalingService)\nnetwork.immediate_services_with_reply.append(RegisterDuetPeerService)\nnetwork._register_services() # re-register all services including SignalingService\n\n\[email protected](\"/metadata\")\ndef get_metadata() -> flask.Response:\n metadata = network.get_metadata_for_client()\n metadata_proto = metadata.serialize()\n r = Response(\n response=metadata_proto.SerializeToString(),\n status=200,\n )\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n\n\[email protected](\"/\", methods=[\"POST\"])\ndef process_network_msgs() -> flask.Response:\n data = flask.request.get_data()\n obj_msg = _deserialize(blob=data, from_bytes=True)\n if isinstance(obj_msg, SignedImmediateSyftMessageWithReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithReply: {obj_msg.message} watch\"\n )\n reply = network.recv_immediate_msg_with_reply(msg=obj_msg)\n r = Response(response=reply.serialize(to_bytes=True), status=200)\n r.headers[\"Content-Type\"] = \"application/octet-stream\"\n return r\n elif isinstance(obj_msg, SignedImmediateSyftMessageWithoutReply):\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_immediate_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n else:\n print(\n f\"Signaling server SignedImmediateSyftMessageWithoutReply: {obj_msg.message} watch\"\n )\n network.recv_eventual_msg_without_reply(msg=obj_msg)\n r = Response(status=200)\n return r\n\n\ndef run() -> None:\n global network\n\n IP_MODE = os.getenv(\"IP_MODE\", \"IPV4\") # default to ipv4\n if len(sys.argv) > 1:\n IP_MODE = sys.argv[1]\n\n IP_MODE = \"IPV6\" if IP_MODE == \"IPV6\" else \"IPV4\"\n # this signing_key is to aid in local development and is not used in the real\n # PyGrid implementation\n HOST = \"0.0.0.0\" if IP_MODE == \"IPV4\" else \"::\" # nosec\n PORT = os.getenv(\"PORT\", 5000)\n\n print(\"====================================\")\n print(\"========== NODE ROOT KEY ===========\")\n print(\"====================================\")\n print(network.signing_key.encode(encoder=HexEncoder).decode(\"utf-8\"), \"\\n\")\n\n print(f\"Using {IP_MODE} and listening on port {PORT}\")\n\n app.run(host=HOST, port=int(PORT))\n\n\nrun()\n", "path": "src/syft/grid/example_nodes/network.py"}]} | 1,180 | 384 |
gh_patches_debug_19544 | rasdani/github-patches | git_diff | mabel-dev__opteryx-1688 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
🪲 [CI] MyPy test failure
### Thank you for taking the time to report a problem with Opteryx.
_To help us to respond to your request we ask that you try to provide the below detail about the bug._
**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._
**Expected behaviour** _A clear and concise description of what you expected to happen._
**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._
~~~sql
~~~
**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opteryx/planner/views/__init__.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 import orjson
14
15 from opteryx.managers.expression import NodeType
16 from opteryx.third_party.travers import Graph
17
18
19 def _load_views():
20 try:
21 with open("views.json", "rb") as defs:
22 return orjson.loads(defs.read())
23 except Exception as err:
24 print(f"[OPTERYX] Unable to open views definition file. {err}")
25 return {}
26
27
28 VIEWS = _load_views()
29
30
31 def is_view(view_name: str) -> bool:
32 return view_name in VIEWS
33
34
35 def view_as_plan(view_name: str) -> Graph:
36 from opteryx.planner.logical_planner import do_logical_planning_phase
37 from opteryx.third_party import sqloxide
38 from opteryx.utils.sql import clean_statement
39 from opteryx.utils.sql import remove_comments
40
41 operation = VIEWS.get(view_name)["statement"]
42
43 clean_sql = clean_statement(remove_comments(operation))
44 parsed_statements = sqloxide.parse_sql(clean_sql, dialect="mysql")
45 logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))
46
47 return logical_plan
48
```
Path: `opteryx/__version__.py`
Content:
```
1 __build__ = 522
2
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 Store the version here so:
17 1) we don't load dependencies by storing it in __init__.py
18 2) we can import it in setup.py for the same reason
19 """
20 from enum import Enum # isort: skip
21
22
23 class VersionStatus(Enum):
24 ALPHA = "alpha"
25 BETA = "beta"
26 RELEASE = "release"
27
28
29 _major = 0
30 _minor = 16
31 _revision = 0
32 _status = VersionStatus.ALPHA
33
34 __author__ = "@joocer"
35 __version__ = f"{_major}.{_minor}.{_revision}" + (
36 f"-{_status.value}.{__build__}" if _status != VersionStatus.RELEASE else ""
37 )
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opteryx/__version__.py b/opteryx/__version__.py
--- a/opteryx/__version__.py
+++ b/opteryx/__version__.py
@@ -1,4 +1,4 @@
-__build__ = 522
+__build__ = 523
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
diff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py
--- a/opteryx/planner/views/__init__.py
+++ b/opteryx/planner/views/__init__.py
@@ -12,8 +12,7 @@
import orjson
-from opteryx.managers.expression import NodeType
-from opteryx.third_party.travers import Graph
+from opteryx.planner.logical_planner import LogicalPlan
def _load_views():
@@ -32,7 +31,7 @@
return view_name in VIEWS
-def view_as_plan(view_name: str) -> Graph:
+def view_as_plan(view_name: str) -> LogicalPlan:
from opteryx.planner.logical_planner import do_logical_planning_phase
from opteryx.third_party import sqloxide
from opteryx.utils.sql import clean_statement
| {"golden_diff": "diff --git a/opteryx/__version__.py b/opteryx/__version__.py\n--- a/opteryx/__version__.py\n+++ b/opteryx/__version__.py\n@@ -1,4 +1,4 @@\n-__build__ = 522\n+__build__ = 523\n \n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\ndiff --git a/opteryx/planner/views/__init__.py b/opteryx/planner/views/__init__.py\n--- a/opteryx/planner/views/__init__.py\n+++ b/opteryx/planner/views/__init__.py\n@@ -12,8 +12,7 @@\n \n import orjson\n \n-from opteryx.managers.expression import NodeType\n-from opteryx.third_party.travers import Graph\n+from opteryx.planner.logical_planner import LogicalPlan\n \n \n def _load_views():\n@@ -32,7 +31,7 @@\n return view_name in VIEWS\n \n \n-def view_as_plan(view_name: str) -> Graph:\n+def view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n", "issue": "\ud83e\udeb2 [CI] MyPy test failure\n### Thank you for taking the time to report a problem with Opteryx.\r\n_To help us to respond to your request we ask that you try to provide the below detail about the bug._\r\n\r\n**Describe the bug** _A clear and specific description of what the bug is. What the error, incorrect or unexpected behaviour was._\r\n\r\n\r\n**Expected behaviour** _A clear and concise description of what you expected to happen._\r\n\r\n\r\n**Sample Code/Statement** _If you can, please submit the SQL statement or Python code snippet, or a representative example using the sample datasets._\r\n\r\n~~~sql\r\n\r\n~~~\r\n\r\n**Additional context** _Add any other context about the problem here, for example what you have done to try to diagnose or workaround the problem._\r\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.managers.expression import NodeType\nfrom opteryx.third_party.travers import Graph\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n print(f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> Graph:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n", "path": "opteryx/planner/views/__init__.py"}, {"content": "__build__ = 522\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport orjson\n\nfrom opteryx.planner.logical_planner import LogicalPlan\n\n\ndef _load_views():\n try:\n with open(\"views.json\", \"rb\") as defs:\n return orjson.loads(defs.read())\n except Exception as err:\n print(f\"[OPTERYX] Unable to open views definition file. {err}\")\n return {}\n\n\nVIEWS = _load_views()\n\n\ndef is_view(view_name: str) -> bool:\n return view_name in VIEWS\n\n\ndef view_as_plan(view_name: str) -> LogicalPlan:\n from opteryx.planner.logical_planner import do_logical_planning_phase\n from opteryx.third_party import sqloxide\n from opteryx.utils.sql import clean_statement\n from opteryx.utils.sql import remove_comments\n\n operation = VIEWS.get(view_name)[\"statement\"]\n\n clean_sql = clean_statement(remove_comments(operation))\n parsed_statements = sqloxide.parse_sql(clean_sql, dialect=\"mysql\")\n logical_plan, _, _ = next(do_logical_planning_phase(parsed_statements))\n\n return logical_plan\n", "path": "opteryx/planner/views/__init__.py"}, {"content": "__build__ = 523\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nStore the version here so:\n1) we don't load dependencies by storing it in __init__.py\n2) we can import it in setup.py for the same reason\n\"\"\"\nfrom enum import Enum # isort: skip\n\n\nclass VersionStatus(Enum):\n ALPHA = \"alpha\"\n BETA = \"beta\"\n RELEASE = \"release\"\n\n\n_major = 0\n_minor = 16\n_revision = 0\n_status = VersionStatus.ALPHA\n\n__author__ = \"@joocer\"\n__version__ = f\"{_major}.{_minor}.{_revision}\" + (\n f\"-{_status.value}.{__build__}\" if _status != VersionStatus.RELEASE else \"\"\n)\n", "path": "opteryx/__version__.py"}]} | 1,251 | 300 |
gh_patches_debug_37940 | rasdani/github-patches | git_diff | deepset-ai__haystack-6753 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
feat: Add split by `page` to `DocumentSplitter`
**Is your feature request related to a problem? Please describe.**
There are some cases where we would like to be able to split the contents of a PDF by page. Either to keep all text from a single page as a document to help preserve context or to be able to perform two sets of chunking (i.e. split by page, followed up by split by sentence). I would not say this is a common set up, but I believe we can straightforwardly extend the `DocumentSplitter` to have this flexibility.
**Describe the solution you'd like**
Add a new `split_by` value of `page` that would split on `"\f"`.
**Describe alternatives you've considered**
Split up the source file (e.g. a PDF) into individual pages before feeding it into the Haystack pipeline. Definitely doable, but less elegant than having the `DocumentSplitter` being able to handle this.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/components/preprocessors/document_splitter.py`
Content:
```
1 from copy import deepcopy
2 from typing import List, Literal
3
4 from more_itertools import windowed
5
6 from haystack import component, Document
7
8
9 @component
10 class DocumentSplitter:
11 """
12 Splits a list of text documents into a list of text documents with shorter texts.
13 This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.
14 """
15
16 def __init__(
17 self, split_by: Literal["word", "sentence", "passage"] = "word", split_length: int = 200, split_overlap: int = 0
18 ):
19 """
20 :param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ",
21 "sentence" for splitting by ".", or "passage" for splitting by "\\n\\n".
22 :param split_length: The maximum number of units in each split.
23 :param split_overlap: The number of units that each split should overlap.
24 """
25
26 self.split_by = split_by
27 if split_by not in ["word", "sentence", "passage"]:
28 raise ValueError("split_by must be one of 'word', 'sentence' or 'passage'.")
29 if split_length <= 0:
30 raise ValueError("split_length must be greater than 0.")
31 self.split_length = split_length
32 if split_overlap < 0:
33 raise ValueError("split_overlap must be greater than or equal to 0.")
34 self.split_overlap = split_overlap
35
36 @component.output_types(documents=List[Document])
37 def run(self, documents: List[Document]):
38 """
39 Splits the documents by split_by after split_length units with an overlap of split_overlap units.
40 Returns a list of documents with the split texts.
41 A metadata field "source_id" is added to each document to keep track of the original document that was split.
42 Other metadata are copied from the original document.
43 :param documents: The documents to split.
44 :return: A list of documents with the split texts.
45 """
46
47 if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):
48 raise TypeError("DocumentSplitter expects a List of Documents as input.")
49
50 split_docs = []
51 for doc in documents:
52 if doc.content is None:
53 raise ValueError(
54 f"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None."
55 )
56 units = self._split_into_units(doc.content, self.split_by)
57 text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)
58 metadata = deepcopy(doc.meta)
59 metadata["source_id"] = doc.id
60 split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
61 return {"documents": split_docs}
62
63 def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage"]) -> List[str]:
64 if split_by == "passage":
65 split_at = "\n\n"
66 elif split_by == "sentence":
67 split_at = "."
68 elif split_by == "word":
69 split_at = " "
70 else:
71 raise NotImplementedError(
72 "DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options."
73 )
74 units = text.split(split_at)
75 # Add the delimiter back to all units except the last one
76 for i in range(len(units) - 1):
77 units[i] += split_at
78 return units
79
80 def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:
81 """
82 Concatenates the elements into parts of split_length units.
83 """
84 text_splits = []
85 segments = windowed(elements, n=split_length, step=split_length - split_overlap)
86 for seg in segments:
87 current_units = [unit for unit in seg if unit is not None]
88 txt = "".join(current_units)
89 if len(txt) > 0:
90 text_splits.append(txt)
91 return text_splits
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py
--- a/haystack/components/preprocessors/document_splitter.py
+++ b/haystack/components/preprocessors/document_splitter.py
@@ -14,18 +14,21 @@
"""
def __init__(
- self, split_by: Literal["word", "sentence", "passage"] = "word", split_length: int = 200, split_overlap: int = 0
+ self,
+ split_by: Literal["word", "sentence", "page", "passage"] = "word",
+ split_length: int = 200,
+ split_overlap: int = 0,
):
"""
:param split_by: The unit by which the document should be split. Choose from "word" for splitting by " ",
- "sentence" for splitting by ".", or "passage" for splitting by "\\n\\n".
+ "sentence" for splitting by ".", "page" for splitting by "\f" or "passage" for splitting by "\\n\\n".
:param split_length: The maximum number of units in each split.
:param split_overlap: The number of units that each split should overlap.
"""
self.split_by = split_by
- if split_by not in ["word", "sentence", "passage"]:
- raise ValueError("split_by must be one of 'word', 'sentence' or 'passage'.")
+ if split_by not in ["word", "sentence", "page", "passage"]:
+ raise ValueError("split_by must be one of 'word', 'sentence', 'page' or 'passage'.")
if split_length <= 0:
raise ValueError("split_length must be greater than 0.")
self.split_length = split_length
@@ -60,8 +63,10 @@
split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]
return {"documents": split_docs}
- def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage"]) -> List[str]:
- if split_by == "passage":
+ def _split_into_units(self, text: str, split_by: Literal["word", "sentence", "passage", "page"]) -> List[str]:
+ if split_by == "page":
+ split_at = "\f"
+ elif split_by == "passage":
split_at = "\n\n"
elif split_by == "sentence":
split_at = "."
@@ -69,7 +74,7 @@
split_at = " "
else:
raise NotImplementedError(
- "DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options."
+ "DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options."
)
units = text.split(split_at)
# Add the delimiter back to all units except the last one
| {"golden_diff": "diff --git a/haystack/components/preprocessors/document_splitter.py b/haystack/components/preprocessors/document_splitter.py\n--- a/haystack/components/preprocessors/document_splitter.py\n+++ b/haystack/components/preprocessors/document_splitter.py\n@@ -14,18 +14,21 @@\n \"\"\"\n \n def __init__(\n- self, split_by: Literal[\"word\", \"sentence\", \"passage\"] = \"word\", split_length: int = 200, split_overlap: int = 0\n+ self,\n+ split_by: Literal[\"word\", \"sentence\", \"page\", \"passage\"] = \"word\",\n+ split_length: int = 200,\n+ split_overlap: int = 0,\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n- \"sentence\" for splitting by \".\", or \"passage\" for splitting by \"\\\\n\\\\n\".\n+ \"sentence\" for splitting by \".\", \"page\" for splitting by \"\\f\" or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n \n self.split_by = split_by\n- if split_by not in [\"word\", \"sentence\", \"passage\"]:\n- raise ValueError(\"split_by must be one of 'word', 'sentence' or 'passage'.\")\n+ if split_by not in [\"word\", \"sentence\", \"page\", \"passage\"]:\n+ raise ValueError(\"split_by must be one of 'word', 'sentence', 'page' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n@@ -60,8 +63,10 @@\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n \n- def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\"]) -> List[str]:\n- if split_by == \"passage\":\n+ def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\", \"page\"]) -> List[str]:\n+ if split_by == \"page\":\n+ split_at = \"\\f\"\n+ elif split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n@@ -69,7 +74,7 @@\n split_at = \" \"\n else:\n raise NotImplementedError(\n- \"DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options.\"\n+ \"DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n", "issue": "feat: Add split by `page` to `DocumentSplitter`\n**Is your feature request related to a problem? Please describe.**\r\nThere are some cases where we would like to be able to split the contents of a PDF by page. Either to keep all text from a single page as a document to help preserve context or to be able to perform two sets of chunking (i.e. split by page, followed up by split by sentence). I would not say this is a common set up, but I believe we can straightforwardly extend the `DocumentSplitter` to have this flexibility. \r\n\r\n**Describe the solution you'd like**\r\nAdd a new `split_by` value of `page` that would split on `\"\\f\"`.\r\n\r\n**Describe alternatives you've considered**\r\nSplit up the source file (e.g. a PDF) into individual pages before feeding it into the Haystack pipeline. Definitely doable, but less elegant than having the `DocumentSplitter` being able to handle this. \r\n\n", "before_files": [{"content": "from copy import deepcopy\nfrom typing import List, Literal\n\nfrom more_itertools import windowed\n\nfrom haystack import component, Document\n\n\n@component\nclass DocumentSplitter:\n \"\"\"\n Splits a list of text documents into a list of text documents with shorter texts.\n This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.\n \"\"\"\n\n def __init__(\n self, split_by: Literal[\"word\", \"sentence\", \"passage\"] = \"word\", split_length: int = 200, split_overlap: int = 0\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n \"sentence\" for splitting by \".\", or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n\n self.split_by = split_by\n if split_by not in [\"word\", \"sentence\", \"passage\"]:\n raise ValueError(\"split_by must be one of 'word', 'sentence' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n if split_overlap < 0:\n raise ValueError(\"split_overlap must be greater than or equal to 0.\")\n self.split_overlap = split_overlap\n\n @component.output_types(documents=List[Document])\n def run(self, documents: List[Document]):\n \"\"\"\n Splits the documents by split_by after split_length units with an overlap of split_overlap units.\n Returns a list of documents with the split texts.\n A metadata field \"source_id\" is added to each document to keep track of the original document that was split.\n Other metadata are copied from the original document.\n :param documents: The documents to split.\n :return: A list of documents with the split texts.\n \"\"\"\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None.\"\n )\n units = self._split_into_units(doc.content, self.split_by)\n text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n\n def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\"]) -> List[str]:\n if split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n elif split_by == \"word\":\n split_at = \" \"\n else:\n raise NotImplementedError(\n \"DocumentSplitter only supports 'passage', 'sentence' or 'word' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n return units\n\n def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n \"\"\"\n text_splits = []\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n if len(txt) > 0:\n text_splits.append(txt)\n return text_splits\n", "path": "haystack/components/preprocessors/document_splitter.py"}], "after_files": [{"content": "from copy import deepcopy\nfrom typing import List, Literal\n\nfrom more_itertools import windowed\n\nfrom haystack import component, Document\n\n\n@component\nclass DocumentSplitter:\n \"\"\"\n Splits a list of text documents into a list of text documents with shorter texts.\n This is useful for splitting documents with long texts that otherwise would not fit into the maximum text length of language models.\n \"\"\"\n\n def __init__(\n self,\n split_by: Literal[\"word\", \"sentence\", \"page\", \"passage\"] = \"word\",\n split_length: int = 200,\n split_overlap: int = 0,\n ):\n \"\"\"\n :param split_by: The unit by which the document should be split. Choose from \"word\" for splitting by \" \",\n \"sentence\" for splitting by \".\", \"page\" for splitting by \"\\f\" or \"passage\" for splitting by \"\\\\n\\\\n\".\n :param split_length: The maximum number of units in each split.\n :param split_overlap: The number of units that each split should overlap.\n \"\"\"\n\n self.split_by = split_by\n if split_by not in [\"word\", \"sentence\", \"page\", \"passage\"]:\n raise ValueError(\"split_by must be one of 'word', 'sentence', 'page' or 'passage'.\")\n if split_length <= 0:\n raise ValueError(\"split_length must be greater than 0.\")\n self.split_length = split_length\n if split_overlap < 0:\n raise ValueError(\"split_overlap must be greater than or equal to 0.\")\n self.split_overlap = split_overlap\n\n @component.output_types(documents=List[Document])\n def run(self, documents: List[Document]):\n \"\"\"\n Splits the documents by split_by after split_length units with an overlap of split_overlap units.\n Returns a list of documents with the split texts.\n A metadata field \"source_id\" is added to each document to keep track of the original document that was split.\n Other metadata are copied from the original document.\n :param documents: The documents to split.\n :return: A list of documents with the split texts.\n \"\"\"\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but document.content for document ID {doc.id} is None.\"\n )\n units = self._split_into_units(doc.content, self.split_by)\n text_splits = self._concatenate_units(units, self.split_length, self.split_overlap)\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += [Document(content=txt, meta=metadata) for txt in text_splits]\n return {\"documents\": split_docs}\n\n def _split_into_units(self, text: str, split_by: Literal[\"word\", \"sentence\", \"passage\", \"page\"]) -> List[str]:\n if split_by == \"page\":\n split_at = \"\\f\"\n elif split_by == \"passage\":\n split_at = \"\\n\\n\"\n elif split_by == \"sentence\":\n split_at = \".\"\n elif split_by == \"word\":\n split_at = \" \"\n else:\n raise NotImplementedError(\n \"DocumentSplitter only supports 'word', 'sentence', 'page' or 'passage' split_by options.\"\n )\n units = text.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n return units\n\n def _concatenate_units(self, elements: List[str], split_length: int, split_overlap: int) -> List[str]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n \"\"\"\n text_splits = []\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n if len(txt) > 0:\n text_splits.append(txt)\n return text_splits\n", "path": "haystack/components/preprocessors/document_splitter.py"}]} | 1,521 | 667 |
gh_patches_debug_1503 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-11075 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build: support Ruby under `build.tools`
We should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.
Work required:
- [x] Update the documentation
- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images
- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)
- [x] Update `settings.py` to add this tool and version
- [x] Update config v2 to accept this value
- [x] Create a branch on `test-builds` for this use case
> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/builds/constants_docker.py`
Content:
```
1 """
2 Define constants here to allow import them without any external dependency.
3
4 There are situations where we want to have access to these values without Django installed
5 (e.g. common/dockerfiles/tasks.py)
6
7 Note these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.
8 """
9
10 DOCKER_DEFAULT_IMAGE = "readthedocs/build"
11
12 # Adding a new tool/version to this setting requires:
13 #
14 # - a mapping between the expected version in the config file, to the full
15 # version installed via asdf (found via ``asdf list all <tool>``)
16 #
17 # - running the script ``./scripts/compile_version_upload.sh`` in
18 # development and production environments to compile and cache the new
19 # tool/version
20 #
21 # Note that when updating this options, you should also update the file:
22 # readthedocs/rtd_tests/fixtures/spec/v2/schema.json
23 RTD_DOCKER_BUILD_SETTINGS = {
24 # Mapping of build.os options to docker image.
25 "os": {
26 "ubuntu-20.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04",
27 "ubuntu-22.04": f"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04",
28 },
29 # Mapping of build.tools options to specific versions.
30 "tools": {
31 "python": {
32 "2.7": "2.7.18",
33 "3.6": "3.6.15",
34 "3.7": "3.7.17",
35 "3.8": "3.8.18",
36 "3.9": "3.9.18",
37 "3.10": "3.10.13",
38 "3.11": "3.11.6",
39 "3.12": "3.12.0",
40 # Always point to the latest stable release.
41 "3": "3.12.0",
42 "miniconda3-4.7": "miniconda3-4.7.12",
43 "mambaforge-4.10": "mambaforge-4.10.3-10",
44 "mambaforge-22.9": "mambaforge-22.9.0-3",
45 },
46 "nodejs": {
47 "14": "14.20.1",
48 "16": "16.18.1",
49 "18": "18.16.1", # LTS
50 "19": "19.0.1",
51 "20": "20.3.1",
52 },
53 "rust": {
54 "1.55": "1.55.0",
55 "1.61": "1.61.0",
56 "1.64": "1.64.0",
57 "1.70": "1.70.0",
58 },
59 "golang": {
60 "1.17": "1.17.13",
61 "1.18": "1.18.10",
62 "1.19": "1.19.10",
63 "1.20": "1.20.5",
64 },
65 },
66 }
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py
--- a/readthedocs/builds/constants_docker.py
+++ b/readthedocs/builds/constants_docker.py
@@ -50,6 +50,9 @@
"19": "19.0.1",
"20": "20.3.1",
},
+ "ruby": {
+ "3.3": "3.3.0",
+ },
"rust": {
"1.55": "1.55.0",
"1.61": "1.61.0",
| {"golden_diff": "diff --git a/readthedocs/builds/constants_docker.py b/readthedocs/builds/constants_docker.py\n--- a/readthedocs/builds/constants_docker.py\n+++ b/readthedocs/builds/constants_docker.py\n@@ -50,6 +50,9 @@\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n+ \"ruby\": {\n+ \"3.3\": \"3.3.0\",\n+ },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n", "issue": "Build: support Ruby under `build.tools` \nWe should add support for Ruby on `build.tools`. It will be useful for doctools like Jekyll.\r\n\r\nWork required:\r\n\r\n- [x] Update the documentation\r\n- [x] Install asdf-ruby (https://github.com/asdf-vm/asdf-ruby) on Docker images\r\n- [x] Compile latest Ruby version and upload it S3 (happening at https://app.circleci.com/pipelines/github/readthedocs/readthedocs-docker-images/289/workflows/f1bc7c62-02d8-4353-ac94-972eb58b0675/jobs/503)\r\n- [x] Update `settings.py` to add this tool and version\r\n- [x] Update config v2 to accept this value\r\n- [x] Create a branch on `test-builds` for this use case\r\n\r\n\r\n> **Note**: we had a support request for this at https://github.com/readthedocs/readthedocs.org/issues/9599#issuecomment-1560011462\n", "before_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.18\",\n \"3.9\": \"3.9.18\",\n \"3.10\": \"3.10.13\",\n \"3.11\": \"3.11.6\",\n \"3.12\": \"3.12.0\",\n # Always point to the latest stable release.\n \"3\": \"3.12.0\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}], "after_files": [{"content": "\"\"\"\nDefine constants here to allow import them without any external dependency.\n\nThere are situations where we want to have access to these values without Django installed\n(e.g. common/dockerfiles/tasks.py)\n\nNote these constants where previously defined as Django settings in ``readthedocs/settings/base.py``.\n\"\"\"\n\nDOCKER_DEFAULT_IMAGE = \"readthedocs/build\"\n\n# Adding a new tool/version to this setting requires:\n#\n# - a mapping between the expected version in the config file, to the full\n# version installed via asdf (found via ``asdf list all <tool>``)\n#\n# - running the script ``./scripts/compile_version_upload.sh`` in\n# development and production environments to compile and cache the new\n# tool/version\n#\n# Note that when updating this options, you should also update the file:\n# readthedocs/rtd_tests/fixtures/spec/v2/schema.json\nRTD_DOCKER_BUILD_SETTINGS = {\n # Mapping of build.os options to docker image.\n \"os\": {\n \"ubuntu-20.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-20.04\",\n \"ubuntu-22.04\": f\"{DOCKER_DEFAULT_IMAGE}:ubuntu-22.04\",\n },\n # Mapping of build.tools options to specific versions.\n \"tools\": {\n \"python\": {\n \"2.7\": \"2.7.18\",\n \"3.6\": \"3.6.15\",\n \"3.7\": \"3.7.17\",\n \"3.8\": \"3.8.18\",\n \"3.9\": \"3.9.18\",\n \"3.10\": \"3.10.13\",\n \"3.11\": \"3.11.6\",\n \"3.12\": \"3.12.0\",\n # Always point to the latest stable release.\n \"3\": \"3.12.0\",\n \"miniconda3-4.7\": \"miniconda3-4.7.12\",\n \"mambaforge-4.10\": \"mambaforge-4.10.3-10\",\n \"mambaforge-22.9\": \"mambaforge-22.9.0-3\",\n },\n \"nodejs\": {\n \"14\": \"14.20.1\",\n \"16\": \"16.18.1\",\n \"18\": \"18.16.1\", # LTS\n \"19\": \"19.0.1\",\n \"20\": \"20.3.1\",\n },\n \"ruby\": {\n \"3.3\": \"3.3.0\",\n },\n \"rust\": {\n \"1.55\": \"1.55.0\",\n \"1.61\": \"1.61.0\",\n \"1.64\": \"1.64.0\",\n \"1.70\": \"1.70.0\",\n },\n \"golang\": {\n \"1.17\": \"1.17.13\",\n \"1.18\": \"1.18.10\",\n \"1.19\": \"1.19.10\",\n \"1.20\": \"1.20.5\",\n },\n },\n}\n", "path": "readthedocs/builds/constants_docker.py"}]} | 1,347 | 146 |
gh_patches_debug_58664 | rasdani/github-patches | git_diff | jazzband__pip-tools-12 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support Python versions lower than 2.7, too
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 """
2 pip-tools keeps your pinned dependencies fresh.
3 """
4 import sys
5 from setuptools import setup
6
7
8 setup(
9 name='pip-tools',
10 version='0.2',
11 url='https://github.com/nvie/pip-tools/',
12 license='BSD',
13 author='Vincent Driessen',
14 author_email='[email protected]',
15 description=__doc__,
16 #packages=[],
17 scripts=['bin/pip-review', 'bin/pip-dump'],
18 #include_package_data=True,
19 zip_safe=False,
20 platforms='any',
21 #install_requires=[],
22 classifiers=[
23 # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
24 #'Development Status :: 1 - Planning',
25 #'Development Status :: 2 - Pre-Alpha',
26 #'Development Status :: 3 - Alpha',
27 'Development Status :: 4 - Beta',
28 #'Development Status :: 5 - Production/Stable',
29 #'Development Status :: 6 - Mature',
30 #'Development Status :: 7 - Inactive',
31 'Intended Audience :: Developers',
32 'Intended Audience :: System Administrators',
33 'License :: OSI Approved :: BSD License',
34 'Operating System :: OS Independent',
35 'Topic :: System :: Systems Administration',
36 ]
37 )
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -18,7 +18,7 @@
#include_package_data=True,
zip_safe=False,
platforms='any',
- #install_requires=[],
+ install_requires=['argparse==1.2.1'], # needed for python 2.6
classifiers=[
# As from http://pypi.python.org/pypi?%3Aaction=list_classifiers
#'Development Status :: 1 - Planning',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -18,7 +18,7 @@\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n- #install_requires=[],\n+ install_requires=['argparse==1.2.1'], # needed for python 2.6\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n", "issue": "Support Python versions lower than 2.7, too\n\n", "before_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nimport sys\nfrom setuptools import setup\n\n\nsetup(\n name='pip-tools',\n version='0.2',\n url='https://github.com/nvie/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n #packages=[],\n scripts=['bin/pip-review', 'bin/pip-dump'],\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n #install_requires=[],\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n #'Development Status :: 2 - Pre-Alpha',\n #'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n #'Development Status :: 5 - Production/Stable',\n #'Development Status :: 6 - Mature',\n #'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}], "after_files": [{"content": "\"\"\"\npip-tools keeps your pinned dependencies fresh.\n\"\"\"\nimport sys\nfrom setuptools import setup\n\n\nsetup(\n name='pip-tools',\n version='0.2',\n url='https://github.com/nvie/pip-tools/',\n license='BSD',\n author='Vincent Driessen',\n author_email='[email protected]',\n description=__doc__,\n #packages=[],\n scripts=['bin/pip-review', 'bin/pip-dump'],\n #include_package_data=True,\n zip_safe=False,\n platforms='any',\n install_requires=['argparse==1.2.1'], # needed for python 2.6\n classifiers=[\n # As from http://pypi.python.org/pypi?%3Aaction=list_classifiers\n #'Development Status :: 1 - Planning',\n #'Development Status :: 2 - Pre-Alpha',\n #'Development Status :: 3 - Alpha',\n 'Development Status :: 4 - Beta',\n #'Development Status :: 5 - Production/Stable',\n #'Development Status :: 6 - Mature',\n #'Development Status :: 7 - Inactive',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: BSD License',\n 'Operating System :: OS Independent',\n 'Topic :: System :: Systems Administration',\n ]\n)\n", "path": "setup.py"}]} | 604 | 114 |
gh_patches_debug_8563 | rasdani/github-patches | git_diff | google__osv.dev-1044 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"withdrawn" entries not getting exported correctly
Identified by @andrewpollock :
https://github.com/google/osv.dev/blob/26050deb42785bc5a4dc7d802eac8e7f95135509/docker/exporter/exporter.py#L94
withdrawn entries are marked as status = INVALID in our DB, so they're not included.
They should be included when we export.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/exporter/exporter.py`
Content:
```
1 #!/usr/bin/env python3
2 # Copyright 2021 Google LLC
3 #
4 # Licensed under the Apache License, Version 2.0 (the "License");
5 # you may not use this file except in compliance with the License.
6 # You may obtain a copy of the License at
7 #
8 # http://www.apache.org/licenses/LICENSE-2.0
9 #
10 # Unless required by applicable law or agreed to in writing, software
11 # distributed under the License is distributed on an "AS IS" BASIS,
12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """OSV Exporter."""
16 import argparse
17 import concurrent.futures
18 import logging
19 import os
20 import tempfile
21 import zipfile
22 from typing import List
23
24 from google.cloud import ndb
25 from google.cloud import storage
26 from google.cloud import logging as google_logging
27
28 import osv
29
30 DEFAULT_WORK_DIR = '/work'
31
32 DEFAULT_EXPORT_BUCKET = 'osv-vulnerabilities'
33 _EXPORT_WORKERS = 32
34 ECOSYSTEMS_FILE = 'ecosystems.txt'
35
36
37 class Exporter:
38 """Exporter."""
39
40 def __init__(self, work_dir, export_bucket):
41 self._work_dir = work_dir
42 self._export_bucket = export_bucket
43
44 def run(self):
45 """Run exporter."""
46 query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)
47 ecosystems = [bug.ecosystem[0] for bug in query if bug.ecosystem]
48
49 for ecosystem in ecosystems:
50 with tempfile.TemporaryDirectory() as tmp_dir:
51 self._export_ecosystem_to_bucket(ecosystem, tmp_dir)
52
53 with tempfile.TemporaryDirectory() as tmp_dir:
54 self._export_ecosystem_list_to_bucket(ecosystems, tmp_dir)
55
56 def upload_single(self, bucket, source_path, target_path):
57 """Upload a single file to a bucket."""
58 logging.info('Uploading %s', target_path)
59 try:
60 blob = bucket.blob(target_path)
61 blob.upload_from_filename(source_path)
62 except Exception as e:
63 logging.error('Failed to export: %s', e)
64
65 def _export_ecosystem_list_to_bucket(self, ecosystems: List[str],
66 tmp_dir: str):
67 """Export an ecosystems.txt file with all of the ecosystem names.
68
69 See https://github.com/google/osv.dev/issues/619
70
71 Args:
72 ecosystems: the list of ecosystem names
73 tmp_dir: temporary directory for scratch
74 """
75
76 logging.info('Exporting ecosystem list to %s', ECOSYSTEMS_FILE)
77 storage_client = storage.Client()
78 bucket = storage_client.get_bucket(self._export_bucket)
79 ecosystems_file_path = os.path.join(tmp_dir, ECOSYSTEMS_FILE)
80 with open(ecosystems_file_path, "w") as ecosystems_file:
81 ecosystems_file.writelines([e + "\n" for e in ecosystems])
82
83 self.upload_single(bucket, ecosystems_file_path, ECOSYSTEMS_FILE)
84
85 def _export_ecosystem_to_bucket(self, ecosystem, tmp_dir):
86 """Export ecosystem vulns to bucket."""
87 logging.info('Exporting vulnerabilities for ecosystem %s', ecosystem)
88 storage_client = storage.Client()
89 bucket = storage_client.get_bucket(self._export_bucket)
90
91 zip_path = os.path.join(tmp_dir, 'all.zip')
92 with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
93 for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):
94 if not bug.public or not bug.status == osv.BugStatus.PROCESSED:
95 continue
96
97 file_path = os.path.join(tmp_dir, bug.id() + '.json')
98 osv.write_vulnerability(
99 bug.to_vulnerability(include_source=True), file_path)
100 zip_file.write(file_path, os.path.basename(file_path))
101
102 with concurrent.futures.ThreadPoolExecutor(
103 max_workers=_EXPORT_WORKERS) as executor:
104 for filename in os.listdir(tmp_dir):
105 executor.submit(self.upload_single, bucket,
106 os.path.join(tmp_dir, filename),
107 f'{ecosystem}/{filename}')
108
109
110 def main():
111 logging.getLogger().setLevel(logging.INFO)
112 parser = argparse.ArgumentParser(description='Exporter')
113 parser.add_argument(
114 '--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)
115 parser.add_argument(
116 '--bucket',
117 help='Bucket name to export to',
118 default=DEFAULT_EXPORT_BUCKET)
119 args = parser.parse_args()
120
121 tmp_dir = os.path.join(args.work_dir, 'tmp')
122 os.makedirs(tmp_dir, exist_ok=True)
123 os.environ['TMPDIR'] = tmp_dir
124
125 exporter = Exporter(args.work_dir, args.bucket)
126 exporter.run()
127
128
129 if __name__ == '__main__':
130 _ndb_client = ndb.Client()
131 logging_client = google_logging.Client()
132 logging_client.setup_logging()
133 with _ndb_client.context():
134 main()
135
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docker/exporter/exporter.py b/docker/exporter/exporter.py
--- a/docker/exporter/exporter.py
+++ b/docker/exporter/exporter.py
@@ -91,7 +91,7 @@
zip_path = os.path.join(tmp_dir, 'all.zip')
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:
for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):
- if not bug.public or not bug.status == osv.BugStatus.PROCESSED:
+ if not bug.public or bug.status == osv.BugStatus.UNPROCESSED:
continue
file_path = os.path.join(tmp_dir, bug.id() + '.json')
| {"golden_diff": "diff --git a/docker/exporter/exporter.py b/docker/exporter/exporter.py\n--- a/docker/exporter/exporter.py\n+++ b/docker/exporter/exporter.py\n@@ -91,7 +91,7 @@\n zip_path = os.path.join(tmp_dir, 'all.zip')\n with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):\n- if not bug.public or not bug.status == osv.BugStatus.PROCESSED:\n+ if not bug.public or bug.status == osv.BugStatus.UNPROCESSED:\n continue\n \n file_path = os.path.join(tmp_dir, bug.id() + '.json')\n", "issue": "\"withdrawn\" entries not getting exported correctly\nIdentified by @andrewpollock : \r\n\r\nhttps://github.com/google/osv.dev/blob/26050deb42785bc5a4dc7d802eac8e7f95135509/docker/exporter/exporter.py#L94\r\n\r\nwithdrawn entries are marked as status = INVALID in our DB, so they're not included.\r\n\r\nThey should be included when we export. \n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"OSV Exporter.\"\"\"\nimport argparse\nimport concurrent.futures\nimport logging\nimport os\nimport tempfile\nimport zipfile\nfrom typing import List\n\nfrom google.cloud import ndb\nfrom google.cloud import storage\nfrom google.cloud import logging as google_logging\n\nimport osv\n\nDEFAULT_WORK_DIR = '/work'\n\nDEFAULT_EXPORT_BUCKET = 'osv-vulnerabilities'\n_EXPORT_WORKERS = 32\nECOSYSTEMS_FILE = 'ecosystems.txt'\n\n\nclass Exporter:\n \"\"\"Exporter.\"\"\"\n\n def __init__(self, work_dir, export_bucket):\n self._work_dir = work_dir\n self._export_bucket = export_bucket\n\n def run(self):\n \"\"\"Run exporter.\"\"\"\n query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)\n ecosystems = [bug.ecosystem[0] for bug in query if bug.ecosystem]\n\n for ecosystem in ecosystems:\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._export_ecosystem_to_bucket(ecosystem, tmp_dir)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._export_ecosystem_list_to_bucket(ecosystems, tmp_dir)\n\n def upload_single(self, bucket, source_path, target_path):\n \"\"\"Upload a single file to a bucket.\"\"\"\n logging.info('Uploading %s', target_path)\n try:\n blob = bucket.blob(target_path)\n blob.upload_from_filename(source_path)\n except Exception as e:\n logging.error('Failed to export: %s', e)\n\n def _export_ecosystem_list_to_bucket(self, ecosystems: List[str],\n tmp_dir: str):\n \"\"\"Export an ecosystems.txt file with all of the ecosystem names.\n\n See https://github.com/google/osv.dev/issues/619\n\n Args:\n ecosystems: the list of ecosystem names\n tmp_dir: temporary directory for scratch\n \"\"\"\n\n logging.info('Exporting ecosystem list to %s', ECOSYSTEMS_FILE)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self._export_bucket)\n ecosystems_file_path = os.path.join(tmp_dir, ECOSYSTEMS_FILE)\n with open(ecosystems_file_path, \"w\") as ecosystems_file:\n ecosystems_file.writelines([e + \"\\n\" for e in ecosystems])\n\n self.upload_single(bucket, ecosystems_file_path, ECOSYSTEMS_FILE)\n\n def _export_ecosystem_to_bucket(self, ecosystem, tmp_dir):\n \"\"\"Export ecosystem vulns to bucket.\"\"\"\n logging.info('Exporting vulnerabilities for ecosystem %s', ecosystem)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self._export_bucket)\n\n zip_path = os.path.join(tmp_dir, 'all.zip')\n with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):\n if not bug.public or not bug.status == osv.BugStatus.PROCESSED:\n continue\n\n file_path = os.path.join(tmp_dir, bug.id() + '.json')\n osv.write_vulnerability(\n bug.to_vulnerability(include_source=True), file_path)\n zip_file.write(file_path, os.path.basename(file_path))\n\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=_EXPORT_WORKERS) as executor:\n for filename in os.listdir(tmp_dir):\n executor.submit(self.upload_single, bucket,\n os.path.join(tmp_dir, filename),\n f'{ecosystem}/{filename}')\n\n\ndef main():\n logging.getLogger().setLevel(logging.INFO)\n parser = argparse.ArgumentParser(description='Exporter')\n parser.add_argument(\n '--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)\n parser.add_argument(\n '--bucket',\n help='Bucket name to export to',\n default=DEFAULT_EXPORT_BUCKET)\n args = parser.parse_args()\n\n tmp_dir = os.path.join(args.work_dir, 'tmp')\n os.makedirs(tmp_dir, exist_ok=True)\n os.environ['TMPDIR'] = tmp_dir\n\n exporter = Exporter(args.work_dir, args.bucket)\n exporter.run()\n\n\nif __name__ == '__main__':\n _ndb_client = ndb.Client()\n logging_client = google_logging.Client()\n logging_client.setup_logging()\n with _ndb_client.context():\n main()\n", "path": "docker/exporter/exporter.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"OSV Exporter.\"\"\"\nimport argparse\nimport concurrent.futures\nimport logging\nimport os\nimport tempfile\nimport zipfile\nfrom typing import List\n\nfrom google.cloud import ndb\nfrom google.cloud import storage\nfrom google.cloud import logging as google_logging\n\nimport osv\n\nDEFAULT_WORK_DIR = '/work'\n\nDEFAULT_EXPORT_BUCKET = 'osv-vulnerabilities'\n_EXPORT_WORKERS = 32\nECOSYSTEMS_FILE = 'ecosystems.txt'\n\n\nclass Exporter:\n \"\"\"Exporter.\"\"\"\n\n def __init__(self, work_dir, export_bucket):\n self._work_dir = work_dir\n self._export_bucket = export_bucket\n\n def run(self):\n \"\"\"Run exporter.\"\"\"\n query = osv.Bug.query(projection=[osv.Bug.ecosystem], distinct=True)\n ecosystems = [bug.ecosystem[0] for bug in query if bug.ecosystem]\n\n for ecosystem in ecosystems:\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._export_ecosystem_to_bucket(ecosystem, tmp_dir)\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self._export_ecosystem_list_to_bucket(ecosystems, tmp_dir)\n\n def upload_single(self, bucket, source_path, target_path):\n \"\"\"Upload a single file to a bucket.\"\"\"\n logging.info('Uploading %s', target_path)\n try:\n blob = bucket.blob(target_path)\n blob.upload_from_filename(source_path)\n except Exception as e:\n logging.error('Failed to export: %s', e)\n\n def _export_ecosystem_list_to_bucket(self, ecosystems: List[str],\n tmp_dir: str):\n \"\"\"Export an ecosystems.txt file with all of the ecosystem names.\n\n See https://github.com/google/osv.dev/issues/619\n\n Args:\n ecosystems: the list of ecosystem names\n tmp_dir: temporary directory for scratch\n \"\"\"\n\n logging.info('Exporting ecosystem list to %s', ECOSYSTEMS_FILE)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self._export_bucket)\n ecosystems_file_path = os.path.join(tmp_dir, ECOSYSTEMS_FILE)\n with open(ecosystems_file_path, \"w\") as ecosystems_file:\n ecosystems_file.writelines([e + \"\\n\" for e in ecosystems])\n\n self.upload_single(bucket, ecosystems_file_path, ECOSYSTEMS_FILE)\n\n def _export_ecosystem_to_bucket(self, ecosystem, tmp_dir):\n \"\"\"Export ecosystem vulns to bucket.\"\"\"\n logging.info('Exporting vulnerabilities for ecosystem %s', ecosystem)\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(self._export_bucket)\n\n zip_path = os.path.join(tmp_dir, 'all.zip')\n with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip_file:\n for bug in osv.Bug.query(osv.Bug.ecosystem == ecosystem):\n if not bug.public or bug.status == osv.BugStatus.UNPROCESSED:\n continue\n\n file_path = os.path.join(tmp_dir, bug.id() + '.json')\n osv.write_vulnerability(\n bug.to_vulnerability(include_source=True), file_path)\n zip_file.write(file_path, os.path.basename(file_path))\n\n with concurrent.futures.ThreadPoolExecutor(\n max_workers=_EXPORT_WORKERS) as executor:\n for filename in os.listdir(tmp_dir):\n executor.submit(self.upload_single, bucket,\n os.path.join(tmp_dir, filename),\n f'{ecosystem}/{filename}')\n\n\ndef main():\n logging.getLogger().setLevel(logging.INFO)\n parser = argparse.ArgumentParser(description='Exporter')\n parser.add_argument(\n '--work_dir', help='Working directory', default=DEFAULT_WORK_DIR)\n parser.add_argument(\n '--bucket',\n help='Bucket name to export to',\n default=DEFAULT_EXPORT_BUCKET)\n args = parser.parse_args()\n\n tmp_dir = os.path.join(args.work_dir, 'tmp')\n os.makedirs(tmp_dir, exist_ok=True)\n os.environ['TMPDIR'] = tmp_dir\n\n exporter = Exporter(args.work_dir, args.bucket)\n exporter.run()\n\n\nif __name__ == '__main__':\n _ndb_client = ndb.Client()\n logging_client = google_logging.Client()\n logging_client.setup_logging()\n with _ndb_client.context():\n main()\n", "path": "docker/exporter/exporter.py"}]} | 1,736 | 163 |
gh_patches_debug_6226 | rasdani/github-patches | git_diff | DataDog__dd-trace-py-1847 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KeyError: 'split_by_domain'
```
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py", line 581, in post
return self.request('POST', url, data=data, json=json, **kwargs)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py", line 533, in request
resp = self.send(prep, **send_kwargs)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py", line 73, in _wrap_send
span.service = _extract_service_name(instance, span, hostname=hostname)
File "/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py", line 30, in _extract_service_name
if cfg['split_by_domain'] and hostname:
KeyError: 'split_by_domain'
```
Happens on python 3.6 and 3.7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/contrib/requests/session.py`
Content:
```
1 import requests
2
3 from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
4
5 from .connection import _wrap_send
6
7
8 class TracedSession(requests.Session):
9 """TracedSession is a requests' Session that is already traced.
10 You can use it if you want a finer grained control for your
11 HTTP clients.
12 """
13
14 pass
15
16
17 # always patch our `TracedSession` when imported
18 _w(TracedSession, "send", _wrap_send)
19
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ddtrace/contrib/requests/session.py b/ddtrace/contrib/requests/session.py
--- a/ddtrace/contrib/requests/session.py
+++ b/ddtrace/contrib/requests/session.py
@@ -2,6 +2,8 @@
from ddtrace.vendor.wrapt import wrap_function_wrapper as _w
+from ddtrace import config, Pin
+
from .connection import _wrap_send
@@ -16,3 +18,4 @@
# always patch our `TracedSession` when imported
_w(TracedSession, "send", _wrap_send)
+Pin(_config=config.requests).onto(TracedSession)
| {"golden_diff": "diff --git a/ddtrace/contrib/requests/session.py b/ddtrace/contrib/requests/session.py\n--- a/ddtrace/contrib/requests/session.py\n+++ b/ddtrace/contrib/requests/session.py\n@@ -2,6 +2,8 @@\n \n from ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n \n+from ddtrace import config, Pin\n+\n from .connection import _wrap_send\n \n \n@@ -16,3 +18,4 @@\n \n # always patch our `TracedSession` when imported\n _w(TracedSession, \"send\", _wrap_send)\n+Pin(_config=config.requests).onto(TracedSession)\n", "issue": "KeyError: 'split_by_domain' \n```\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py\", line 581, in post\r\n return self.request('POST', url, data=data, json=json, **kwargs)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/requests/sessions.py\", line 533, in request\r\n resp = self.send(prep, **send_kwargs)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py\", line 73, in _wrap_send\r\n span.service = _extract_service_name(instance, span, hostname=hostname)\r\n File \"/Users/viperfx/.pyenv/versions/scrumgenius-env-3.6/lib/python3.6/site-packages/ddtrace/contrib/requests/connection.py\", line 30, in _extract_service_name\r\n if cfg['split_by_domain'] and hostname:\r\nKeyError: 'split_by_domain'\r\n```\r\n\r\nHappens on python 3.6 and 3.7\n", "before_files": [{"content": "import requests\n\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom .connection import _wrap_send\n\n\nclass TracedSession(requests.Session):\n \"\"\"TracedSession is a requests' Session that is already traced.\n You can use it if you want a finer grained control for your\n HTTP clients.\n \"\"\"\n\n pass\n\n\n# always patch our `TracedSession` when imported\n_w(TracedSession, \"send\", _wrap_send)\n", "path": "ddtrace/contrib/requests/session.py"}], "after_files": [{"content": "import requests\n\nfrom ddtrace.vendor.wrapt import wrap_function_wrapper as _w\n\nfrom ddtrace import config, Pin\n\nfrom .connection import _wrap_send\n\n\nclass TracedSession(requests.Session):\n \"\"\"TracedSession is a requests' Session that is already traced.\n You can use it if you want a finer grained control for your\n HTTP clients.\n \"\"\"\n\n pass\n\n\n# always patch our `TracedSession` when imported\n_w(TracedSession, \"send\", _wrap_send)\nPin(_config=config.requests).onto(TracedSession)\n", "path": "ddtrace/contrib/requests/session.py"}]} | 671 | 140 |
gh_patches_debug_21677 | rasdani/github-patches | git_diff | plotly__plotly.py-4562 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
With newer versions of orjson, users need to specify the json engine explicitly (bug?)
Hey
I found out I get an
`AttributeError: partially initialized module 'orjson' has no attribute 'OPT_NON_STR_KEYS'`
if I don't specify this
`plotly.io.json.config.default_engine = 'orjson'`
when using orjson v3.6.6 (latest as of 25jan2022)
Also, additional note for whoever might have this issue: you don't need to uninstall orjson if you don't want to use it. just set the engine to 'json' explicitly.
I'm using orjson because of the performance claims, although I ran some tests switching between the 2 engines and they seem to yield the same results: using go.Candlestick with 10000 candlesticks and some 4-5 indicators, getting ~0.8sec in each case for creating the plot. My purpose is to improve the dash server performace, but it seems it makes no difference (the web page still renders slower than the ticker even with 600 candles)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/python/plotly/_plotly_utils/optional_imports.py`
Content:
```
1 """
2 Stand-alone module to provide information about whether optional deps exist.
3
4 """
5 from importlib import import_module
6 import logging
7 import sys
8
9 logger = logging.getLogger(__name__)
10 _not_importable = set()
11
12
13 def get_module(name, should_load=True):
14 """
15 Return module or None. Absolute import is required.
16
17 :param (str) name: Dot-separated module path. E.g., 'scipy.stats'.
18 :raise: (ImportError) Only when exc_msg is defined.
19 :return: (module|None) If import succeeds, the module will be returned.
20
21 """
22 if name in sys.modules:
23 return sys.modules[name]
24 if not should_load:
25 return None
26 if name not in _not_importable:
27 try:
28 return import_module(name)
29 except ImportError:
30 _not_importable.add(name)
31 except Exception:
32 _not_importable.add(name)
33 msg = f"Error importing optional module {name}"
34 logger.exception(msg)
35
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/python/plotly/_plotly_utils/optional_imports.py b/packages/python/plotly/_plotly_utils/optional_imports.py
--- a/packages/python/plotly/_plotly_utils/optional_imports.py
+++ b/packages/python/plotly/_plotly_utils/optional_imports.py
@@ -2,6 +2,7 @@
Stand-alone module to provide information about whether optional deps exist.
"""
+
from importlib import import_module
import logging
import sys
@@ -19,10 +20,9 @@
:return: (module|None) If import succeeds, the module will be returned.
"""
- if name in sys.modules:
- return sys.modules[name]
if not should_load:
- return None
+ return sys.modules.get(name, None)
+
if name not in _not_importable:
try:
return import_module(name)
@@ -32,3 +32,5 @@
_not_importable.add(name)
msg = f"Error importing optional module {name}"
logger.exception(msg)
+
+ return None
| {"golden_diff": "diff --git a/packages/python/plotly/_plotly_utils/optional_imports.py b/packages/python/plotly/_plotly_utils/optional_imports.py\n--- a/packages/python/plotly/_plotly_utils/optional_imports.py\n+++ b/packages/python/plotly/_plotly_utils/optional_imports.py\n@@ -2,6 +2,7 @@\n Stand-alone module to provide information about whether optional deps exist.\n \n \"\"\"\n+\n from importlib import import_module\n import logging\n import sys\n@@ -19,10 +20,9 @@\n :return: (module|None) If import succeeds, the module will be returned.\n \n \"\"\"\n- if name in sys.modules:\n- return sys.modules[name]\n if not should_load:\n- return None\n+ return sys.modules.get(name, None)\n+\n if name not in _not_importable:\n try:\n return import_module(name)\n@@ -32,3 +32,5 @@\n _not_importable.add(name)\n msg = f\"Error importing optional module {name}\"\n logger.exception(msg)\n+\n+ return None\n", "issue": "With newer versions of orjson, users need to specify the json engine explicitly (bug?)\nHey\r\n\r\nI found out I get an\r\n`AttributeError: partially initialized module 'orjson' has no attribute 'OPT_NON_STR_KEYS'`\r\nif I don't specify this\r\n`plotly.io.json.config.default_engine = 'orjson'`\r\nwhen using orjson v3.6.6 (latest as of 25jan2022)\r\n\r\nAlso, additional note for whoever might have this issue: you don't need to uninstall orjson if you don't want to use it. just set the engine to 'json' explicitly. \r\n\r\nI'm using orjson because of the performance claims, although I ran some tests switching between the 2 engines and they seem to yield the same results: using go.Candlestick with 10000 candlesticks and some 4-5 indicators, getting ~0.8sec in each case for creating the plot. My purpose is to improve the dash server performace, but it seems it makes no difference (the web page still renders slower than the ticker even with 600 candles)\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nStand-alone module to provide information about whether optional deps exist.\n\n\"\"\"\nfrom importlib import import_module\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n_not_importable = set()\n\n\ndef get_module(name, should_load=True):\n \"\"\"\n Return module or None. Absolute import is required.\n\n :param (str) name: Dot-separated module path. E.g., 'scipy.stats'.\n :raise: (ImportError) Only when exc_msg is defined.\n :return: (module|None) If import succeeds, the module will be returned.\n\n \"\"\"\n if name in sys.modules:\n return sys.modules[name]\n if not should_load:\n return None\n if name not in _not_importable:\n try:\n return import_module(name)\n except ImportError:\n _not_importable.add(name)\n except Exception:\n _not_importable.add(name)\n msg = f\"Error importing optional module {name}\"\n logger.exception(msg)\n", "path": "packages/python/plotly/_plotly_utils/optional_imports.py"}], "after_files": [{"content": "\"\"\"\nStand-alone module to provide information about whether optional deps exist.\n\n\"\"\"\n\nfrom importlib import import_module\nimport logging\nimport sys\n\nlogger = logging.getLogger(__name__)\n_not_importable = set()\n\n\ndef get_module(name, should_load=True):\n \"\"\"\n Return module or None. Absolute import is required.\n\n :param (str) name: Dot-separated module path. E.g., 'scipy.stats'.\n :raise: (ImportError) Only when exc_msg is defined.\n :return: (module|None) If import succeeds, the module will be returned.\n\n \"\"\"\n if not should_load:\n return sys.modules.get(name, None)\n\n if name not in _not_importable:\n try:\n return import_module(name)\n except ImportError:\n _not_importable.add(name)\n except Exception:\n _not_importable.add(name)\n msg = f\"Error importing optional module {name}\"\n logger.exception(msg)\n\n return None\n", "path": "packages/python/plotly/_plotly_utils/optional_imports.py"}]} | 774 | 240 |
gh_patches_debug_49037 | rasdani/github-patches | git_diff | facebookresearch__hydra-2677 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] Fix failing tests
Several tests are broken on main
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import os
3
4 from omegaconf import DictConfig
5
6 import hydra
7
8
9 @hydra.main(version_base=None)
10 def my_app(_cfg: DictConfig) -> None:
11 print(f"Working directory : {os.getcwd()}")
12 print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
13
14
15 if __name__ == "__main__":
16 my_app()
17
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
@@ -9,7 +9,9 @@
@hydra.main(version_base=None)
def my_app(_cfg: DictConfig) -> None:
print(f"Working directory : {os.getcwd()}")
- print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
+ print(
+ f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}"
+ )
if __name__ == "__main__":
| {"golden_diff": "diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py\n@@ -9,7 +9,9 @@\n @hydra.main(version_base=None)\n def my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n- print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n+ print(\n+ f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\"\n+ )\n \n \n if __name__ == \"__main__\":\n", "issue": "[Bug] Fix failing tests\nSeveral tests are broken on main\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\n\nfrom omegaconf import DictConfig\n\nimport hydra\n\n\[email protected](version_base=None)\ndef my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n\n\nif __name__ == \"__main__\":\n my_app()\n", "path": "examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\n\nfrom omegaconf import DictConfig\n\nimport hydra\n\n\[email protected](version_base=None)\ndef my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n print(\n f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\"\n )\n\n\nif __name__ == \"__main__\":\n my_app()\n", "path": "examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py"}]} | 418 | 197 |
gh_patches_debug_66238 | rasdani/github-patches | git_diff | deepchecks__deepchecks-728 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] resources/suite_output.html file is missing when installing not via git
**Describe the bug**
can't use save_as_html because suite_output.html file is missing
**To Reproduce**
pip install deepchecks
suite_result.save_as_html()
**Expected behavior**
save as html
**Environment (please complete the following information):**
- OS: linux
- Python Version: 3.7
- Deepchecks Version: 0.3.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """
12
13 |build| |Documentation Status| |pkgVersion| |pyVersions|
14 |Maintainability| |Coverage Status|
15
16 .. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png
17 :target: https://github.com/deepchecks/deepchecks
18
19 Deepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.
20 This includes checks related to various types of issues, such as model performance, data integrity,
21 distribution mismatches, and more.
22
23 What Do You Need in Order to Start Validating?
24 ----------------------------------------------
25
26 Depending on your phase and what you wise to validate, you'll need a
27 subset of the following:
28
29 - Raw data (before pre-processing such as OHE, string processing,
30 etc.), with optional labels
31
32 - The model's training data with labels
33
34 - Test data (which the model isn't exposed to) with labels
35
36 - A model compatible with scikit-learn API that you wish to validate
37 (e.g. RandomForest, XGBoost)
38
39 Deepchecks validation accompanies you from the initial phase when you
40 have only raw data, through the data splits, and to the final stage of
41 having a trained model that you wish to evaluate. Accordingly, each
42 phase requires different assets for the validation. See more about
43 typical usage scenarios and the built-in suites in the
44 `docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.
45
46 Installation
47 ------------
48
49 Using pip
50 ~~~~~~~~~
51
52 .. code:: bash
53
54 pip install deepchecks #--upgrade --user
55
56 Using conda
57 ~~~~~~~~~~~
58
59 .. code:: bash
60
61 conda install -c deepchecks deepchecks
62
63 .. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg
64 .. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest
65 :target: https://docs.deepchecks.com/en/latest/?badge=latest
66 .. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks
67 .. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks
68 .. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability
69 :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability
70 .. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main
71 :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main
72
73 """
74
75 import setuptools
76 from setuptools import setup
77 from distutils.util import convert_path
78 import os
79
80 main_ns = {}
81 DOCLINES = (__doc__ or '').split("\n")
82
83 with open(os.path.join('./', 'VERSION')) as version_file:
84 VER = version_file.read().strip()
85
86 requirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'
87 install_requires = []
88 if os.path.isfile(requirementPath):
89 with open(requirementPath) as f:
90 install_requires = f.read().splitlines()
91
92
93
94
95 setup(
96 name='deepchecks',
97 version=VER,
98 packages=setuptools.find_packages(),
99 install_requires=install_requires,
100 license_files = ('LICENSE', ),
101 description = DOCLINES[0],
102 long_description="\n".join(DOCLINES[2:]),
103 author = 'deepchecks',
104 author_email = '[email protected]',
105 url = 'https://github.com/deepchecks/deepchecks',
106 download_url = "https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz".format(VER),
107 keywords = ['Software Development', 'Machine Learning'],
108 include_package_data=True,
109 classifiers = [
110 'Intended Audience :: Developers',
111 'Intended Audience :: Science/Research',
112 'Topic :: Software Development',
113 'Topic :: Scientific/Engineering',
114 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',
115 'Programming Language :: Python :: 3',
116 'Programming Language :: Python :: 3.6',
117 'Programming Language :: Python :: 3.7',
118 'Programming Language :: Python :: 3.8',
119 'Programming Language :: Python :: 3.9',
120 'Programming Language :: Python :: 3.10',
121 ],
122 )
123
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,6 @@
import setuptools
from setuptools import setup
-from distutils.util import convert_path
import os
main_ns = {}
@@ -89,9 +88,6 @@
with open(requirementPath) as f:
install_requires = f.read().splitlines()
-
-
-
setup(
name='deepchecks',
version=VER,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -74,7 +74,6 @@\n \n import setuptools\n from setuptools import setup\n-from distutils.util import convert_path\n import os\n \n main_ns = {}\n@@ -89,9 +88,6 @@\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n \n-\n-\n-\n setup(\n name='deepchecks',\n version=VER,\n", "issue": "[BUG] resources/suite_output.html file is missing when installing not via git\n**Describe the bug**\r\ncan't use save_as_html because suite_output.html file is missing\r\n\r\n**To Reproduce**\r\npip install deepchecks\r\nsuite_result.save_as_html()\r\n\r\n**Expected behavior**\r\nsave as html\r\n\r\n**Environment (please complete the following information):**\r\n - OS: linux\r\n - Python Version: 3.7\r\n - Deepchecks Version: 0.3.1\r\n\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"\n\n|build| |Documentation Status| |pkgVersion| |pyVersions|\n|Maintainability| |Coverage Status|\n\n.. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png\n :target: https://github.com/deepchecks/deepchecks\n\nDeepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.\nThis includes checks related to various types of issues, such as model performance, data integrity,\ndistribution mismatches, and more.\n\nWhat Do You Need in Order to Start Validating?\n----------------------------------------------\n\nDepending on your phase and what you wise to validate, you'll need a\nsubset of the following:\n\n- Raw data (before pre-processing such as OHE, string processing,\n etc.), with optional labels\n\n- The model's training data with labels\n\n- Test data (which the model isn't exposed to) with labels\n\n- A model compatible with scikit-learn API that you wish to validate\n (e.g. RandomForest, XGBoost)\n\nDeepchecks validation accompanies you from the initial phase when you\nhave only raw data, through the data splits, and to the final stage of\nhaving a trained model that you wish to evaluate. Accordingly, each\nphase requires different assets for the validation. See more about\ntypical usage scenarios and the built-in suites in the\n`docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.\n\nInstallation\n------------\n\nUsing pip\n~~~~~~~~~\n\n.. code:: bash\n\n pip install deepchecks #--upgrade --user\n\nUsing conda\n~~~~~~~~~~~\n\n.. code:: bash\n\n conda install -c deepchecks deepchecks\n\n.. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg\n.. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest\n :target: https://docs.deepchecks.com/en/latest/?badge=latest\n.. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks\n.. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks\n.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability\n :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability\n.. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main\n :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main\n\n\"\"\"\n\nimport setuptools\nfrom setuptools import setup\nfrom distutils.util import convert_path\nimport os\n\nmain_ns = {}\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nwith open(os.path.join('./', 'VERSION')) as version_file:\n VER = version_file.read().strip()\n\nrequirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirementPath):\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n\n\n\n\nsetup(\n name='deepchecks',\n version=VER,\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n license_files = ('LICENSE', ),\n description = DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author = 'deepchecks', \n author_email = '[email protected]', \n url = 'https://github.com/deepchecks/deepchecks',\n download_url = \"https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz\".format(VER),\n keywords = ['Software Development', 'Machine Learning'],\n include_package_data=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"\n\n|build| |Documentation Status| |pkgVersion| |pyVersions|\n|Maintainability| |Coverage Status|\n\n.. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png\n :target: https://github.com/deepchecks/deepchecks\n\nDeepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.\nThis includes checks related to various types of issues, such as model performance, data integrity,\ndistribution mismatches, and more.\n\nWhat Do You Need in Order to Start Validating?\n----------------------------------------------\n\nDepending on your phase and what you wise to validate, you'll need a\nsubset of the following:\n\n- Raw data (before pre-processing such as OHE, string processing,\n etc.), with optional labels\n\n- The model's training data with labels\n\n- Test data (which the model isn't exposed to) with labels\n\n- A model compatible with scikit-learn API that you wish to validate\n (e.g. RandomForest, XGBoost)\n\nDeepchecks validation accompanies you from the initial phase when you\nhave only raw data, through the data splits, and to the final stage of\nhaving a trained model that you wish to evaluate. Accordingly, each\nphase requires different assets for the validation. See more about\ntypical usage scenarios and the built-in suites in the\n`docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.\n\nInstallation\n------------\n\nUsing pip\n~~~~~~~~~\n\n.. code:: bash\n\n pip install deepchecks #--upgrade --user\n\nUsing conda\n~~~~~~~~~~~\n\n.. code:: bash\n\n conda install -c deepchecks deepchecks\n\n.. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg\n.. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest\n :target: https://docs.deepchecks.com/en/latest/?badge=latest\n.. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks\n.. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks\n.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability\n :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability\n.. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main\n :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main\n\n\"\"\"\n\nimport setuptools\nfrom setuptools import setup\nimport os\n\nmain_ns = {}\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nwith open(os.path.join('./', 'VERSION')) as version_file:\n VER = version_file.read().strip()\n\nrequirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirementPath):\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n\nsetup(\n name='deepchecks',\n version=VER,\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n license_files = ('LICENSE', ),\n description = DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author = 'deepchecks', \n author_email = '[email protected]', \n url = 'https://github.com/deepchecks/deepchecks',\n download_url = \"https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz\".format(VER),\n keywords = ['Software Development', 'Machine Learning'],\n include_package_data=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n", "path": "setup.py"}]} | 1,708 | 105 |
gh_patches_debug_4106 | rasdani/github-patches | git_diff | hylang__hy-1955 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make hy-history location configurable
How about an environment variable like `HY_HISTORY` that allows the user to change the location of `~/.hy-history`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hy/completer.py`
Content:
```
1 # Copyright 2021 the authors.
2 # This file is part of Hy, which is free software licensed under the Expat
3 # license. See the LICENSE.
4
5 import contextlib
6 import os
7 import re
8 import sys
9 import builtins
10
11 import hy.macros
12 import hy.compiler
13
14
15 docomplete = True
16
17 try:
18 import readline
19 except ImportError:
20 try:
21 import pyreadline.rlmain
22 import pyreadline.unicode_helper # NOQA
23 import readline
24 except ImportError:
25 docomplete = False
26
27 if docomplete:
28 if sys.platform == 'darwin' and 'libedit' in readline.__doc__:
29 readline_bind = "bind ^I rl_complete"
30 else:
31 readline_bind = "tab: complete"
32
33
34 class Completer(object):
35
36 def __init__(self, namespace={}):
37 if not isinstance(namespace, dict):
38 raise TypeError('namespace must be a dictionary')
39 self.namespace = namespace
40 self.path = [hy.compiler._special_form_compilers,
41 builtins.__dict__,
42 namespace]
43
44 self.tag_path = []
45
46 namespace.setdefault('__macros__', {})
47 namespace.setdefault('__tags__', {})
48
49 self.path.append(namespace['__macros__'])
50 self.tag_path.append(namespace['__tags__'])
51
52 def attr_matches(self, text):
53 # Borrowed from IPython's completer
54 m = re.match(r"(\S+(\.[\w-]+)*)\.([\w-]*)$", text)
55
56 if m:
57 expr, attr = m.group(1, 3)
58 attr = attr.replace("-", "_")
59 expr = expr.replace("-", "_")
60 else:
61 return []
62
63 try:
64 obj = eval(expr, self.namespace)
65 words = dir(obj)
66 except Exception:
67 return []
68
69 n = len(attr)
70 matches = []
71 for w in words:
72 if w[:n] == attr:
73 matches.append("{}.{}".format(
74 expr.replace("_", "-"), w.replace("_", "-")))
75 return matches
76
77 def global_matches(self, text):
78 matches = []
79 for p in self.path:
80 for k in p.keys():
81 if isinstance(k, str):
82 k = k.replace("_", "-")
83 if k.startswith(text):
84 matches.append(k)
85 return matches
86
87 def tag_matches(self, text):
88 text = text[1:]
89 matches = []
90 for p in self.tag_path:
91 for k in p.keys():
92 if isinstance(k, str):
93 if k.startswith(text):
94 matches.append("#{}".format(k))
95 return matches
96
97 def complete(self, text, state):
98 if text.startswith("#"):
99 matches = self.tag_matches(text)
100 elif "." in text:
101 matches = self.attr_matches(text)
102 else:
103 matches = self.global_matches(text)
104 try:
105 return matches[state]
106 except IndexError:
107 return None
108
109
110 @contextlib.contextmanager
111 def completion(completer=None):
112 delims = "()[]{} "
113 if not completer:
114 completer = Completer()
115
116 if docomplete:
117 readline.set_completer(completer.complete)
118 readline.set_completer_delims(delims)
119
120 history = os.path.expanduser("~/.hy-history")
121 readline.parse_and_bind("set blink-matching-paren on")
122
123 try:
124 readline.read_history_file(history)
125 except IOError:
126 pass
127
128 readline.parse_and_bind(readline_bind)
129
130 try:
131 yield
132 finally:
133 if docomplete:
134 try:
135 readline.write_history_file(history)
136 except IOError:
137 pass
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hy/completer.py b/hy/completer.py
--- a/hy/completer.py
+++ b/hy/completer.py
@@ -117,7 +117,8 @@
readline.set_completer(completer.complete)
readline.set_completer_delims(delims)
- history = os.path.expanduser("~/.hy-history")
+ history = os.environ.get(
+ "HY_HISTORY", os.path.expanduser("~/.hy-history"))
readline.parse_and_bind("set blink-matching-paren on")
try:
| {"golden_diff": "diff --git a/hy/completer.py b/hy/completer.py\n--- a/hy/completer.py\n+++ b/hy/completer.py\n@@ -117,7 +117,8 @@\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n \n- history = os.path.expanduser(\"~/.hy-history\")\n+ history = os.environ.get(\n+ \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n readline.parse_and_bind(\"set blink-matching-paren on\")\n \n try:\n", "issue": "Make hy-history location configurable\nHow about an environment variable like `HY_HISTORY` that allows the user to change the location of `~/.hy-history`.\n", "before_files": [{"content": "# Copyright 2021 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport contextlib\nimport os\nimport re\nimport sys\nimport builtins\n\nimport hy.macros\nimport hy.compiler\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept ImportError:\n try:\n import pyreadline.rlmain\n import pyreadline.unicode_helper # NOQA\n import readline\n except ImportError:\n docomplete = False\n\nif docomplete:\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\n else:\n readline_bind = \"tab: complete\"\n\n\nclass Completer(object):\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [hy.compiler._special_form_compilers,\n builtins.__dict__,\n namespace]\n\n self.tag_path = []\n\n namespace.setdefault('__macros__', {})\n namespace.setdefault('__tags__', {})\n\n self.path.append(namespace['__macros__'])\n self.tag_path.append(namespace['__tags__'])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, str):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def tag_matches(self, text):\n text = text[1:]\n matches = []\n for p in self.tag_path:\n for k in p.keys():\n if isinstance(k, str):\n if k.startswith(text):\n matches.append(\"#{}\".format(k))\n return matches\n\n def complete(self, text, state):\n if text.startswith(\"#\"):\n matches = self.tag_matches(text)\n elif \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.path.expanduser(\"~/.hy-history\")\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except IOError:\n pass\n\n readline.parse_and_bind(readline_bind)\n\n try:\n yield\n finally:\n if docomplete:\n try:\n readline.write_history_file(history)\n except IOError:\n pass\n", "path": "hy/completer.py"}], "after_files": [{"content": "# Copyright 2021 the authors.\n# This file is part of Hy, which is free software licensed under the Expat\n# license. See the LICENSE.\n\nimport contextlib\nimport os\nimport re\nimport sys\nimport builtins\n\nimport hy.macros\nimport hy.compiler\n\n\ndocomplete = True\n\ntry:\n import readline\nexcept ImportError:\n try:\n import pyreadline.rlmain\n import pyreadline.unicode_helper # NOQA\n import readline\n except ImportError:\n docomplete = False\n\nif docomplete:\n if sys.platform == 'darwin' and 'libedit' in readline.__doc__:\n readline_bind = \"bind ^I rl_complete\"\n else:\n readline_bind = \"tab: complete\"\n\n\nclass Completer(object):\n\n def __init__(self, namespace={}):\n if not isinstance(namespace, dict):\n raise TypeError('namespace must be a dictionary')\n self.namespace = namespace\n self.path = [hy.compiler._special_form_compilers,\n builtins.__dict__,\n namespace]\n\n self.tag_path = []\n\n namespace.setdefault('__macros__', {})\n namespace.setdefault('__tags__', {})\n\n self.path.append(namespace['__macros__'])\n self.tag_path.append(namespace['__tags__'])\n\n def attr_matches(self, text):\n # Borrowed from IPython's completer\n m = re.match(r\"(\\S+(\\.[\\w-]+)*)\\.([\\w-]*)$\", text)\n\n if m:\n expr, attr = m.group(1, 3)\n attr = attr.replace(\"-\", \"_\")\n expr = expr.replace(\"-\", \"_\")\n else:\n return []\n\n try:\n obj = eval(expr, self.namespace)\n words = dir(obj)\n except Exception:\n return []\n\n n = len(attr)\n matches = []\n for w in words:\n if w[:n] == attr:\n matches.append(\"{}.{}\".format(\n expr.replace(\"_\", \"-\"), w.replace(\"_\", \"-\")))\n return matches\n\n def global_matches(self, text):\n matches = []\n for p in self.path:\n for k in p.keys():\n if isinstance(k, str):\n k = k.replace(\"_\", \"-\")\n if k.startswith(text):\n matches.append(k)\n return matches\n\n def tag_matches(self, text):\n text = text[1:]\n matches = []\n for p in self.tag_path:\n for k in p.keys():\n if isinstance(k, str):\n if k.startswith(text):\n matches.append(\"#{}\".format(k))\n return matches\n\n def complete(self, text, state):\n if text.startswith(\"#\"):\n matches = self.tag_matches(text)\n elif \".\" in text:\n matches = self.attr_matches(text)\n else:\n matches = self.global_matches(text)\n try:\n return matches[state]\n except IndexError:\n return None\n\n\[email protected]\ndef completion(completer=None):\n delims = \"()[]{} \"\n if not completer:\n completer = Completer()\n\n if docomplete:\n readline.set_completer(completer.complete)\n readline.set_completer_delims(delims)\n\n history = os.environ.get(\n \"HY_HISTORY\", os.path.expanduser(\"~/.hy-history\"))\n readline.parse_and_bind(\"set blink-matching-paren on\")\n\n try:\n readline.read_history_file(history)\n except IOError:\n pass\n\n readline.parse_and_bind(readline_bind)\n\n try:\n yield\n finally:\n if docomplete:\n try:\n readline.write_history_file(history)\n except IOError:\n pass\n", "path": "hy/completer.py"}]} | 1,368 | 127 |
gh_patches_debug_58411 | rasdani/github-patches | git_diff | web2py__web2py-1871 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
contrib/pg8000 is old and causes weird postgres errors
Please update the contrib/pg8000 driver to the current version.
Otherwise errors like Broken Pipe, Operationalerror,.. occur,
- at least for postgres 9.6,
- especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING).
related links:
https://github.com/mfenniak/pg8000/issues/73
https://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU
..in copy into issues: web2py/web2py, web2py/pydal
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 from setuptools import setup
4 from gluon.fileutils import tar, untar, read_file, write_file
5 import tarfile
6 import sys
7
8
9 def tar(file, filelist, expression='^.+$'):
10 """
11 tars dir/files into file, only tars file that match expression
12 """
13
14 tar = tarfile.TarFile(file, 'w')
15 try:
16 for element in filelist:
17 try:
18 for file in listdir(element, expression, add_dirs=True):
19 tar.add(os.path.join(element, file), file, False)
20 except:
21 tar.add(element)
22 finally:
23 tar.close()
24
25
26 def start():
27 if 'sdist' in sys.argv:
28 tar('gluon/env.tar', ['applications', 'VERSION',
29 'extras/icons/splashlogo.gif'])
30
31 setup(name='web2py',
32 version=read_file("VERSION").split()[1],
33 description="""full-stack framework for rapid development and prototyping
34 of secure database-driven web-based applications, written and
35 programmable in Python.""",
36 long_description="""
37 Everything in one package with no dependencies. Development, deployment,
38 debugging, testing, database administration and maintenance of applications can
39 be done via the provided web interface. web2py has no configuration files,
40 requires no installation, can run off a USB drive. web2py uses Python for the
41 Model, the Views and the Controllers, has a built-in ticketing system to manage
42 errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,
43 MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a
44 Database Abstraction Layer. web2py includes libraries to handle
45 HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production
46 ready, capable of upload/download streaming of very large files, and always
47 backward compatible.
48 """,
49 author='Massimo Di Pierro',
50 author_email='[email protected]',
51 license='http://web2py.com/examples/default/license',
52 classifiers=["Development Status :: 5 - Production/Stable"],
53 url='http://web2py.com',
54 platforms='Windows, Linux, Mac, Unix,Windows Mobile',
55 packages=['gluon',
56 'gluon/contrib',
57 'gluon/contrib/gateways',
58 'gluon/contrib/login_methods',
59 'gluon/contrib/markdown',
60 'gluon/contrib/markmin',
61 'gluon/contrib/memcache',
62 'gluon/contrib/fpdf',
63 'gluon/contrib/pymysql',
64 'gluon/contrib/pyrtf',
65 'gluon/contrib/pysimplesoap',
66 'gluon/contrib/pg8000',
67 'gluon/contrib/plural_rules',
68 'gluon/contrib/minify',
69 'gluon/contrib/pyaes',
70 'gluon/contrib/pyuca',
71 'gluon/tests',
72 ],
73 package_data={'gluon': ['env.tar']},
74 # scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],
75 )
76
77 if __name__ == '__main__':
78 #print "web2py does not require installation and"
79 #print "you should just start it with:"
80 #print
81 #print "$ python web2py.py"
82 #print
83 #print "are you sure you want to install it anyway (y/n)?"
84 #s = raw_input('>')
85 #if s.lower()[:1]=='y':
86 start()
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -63,7 +63,6 @@
'gluon/contrib/pymysql',
'gluon/contrib/pyrtf',
'gluon/contrib/pysimplesoap',
- 'gluon/contrib/pg8000',
'gluon/contrib/plural_rules',
'gluon/contrib/minify',
'gluon/contrib/pyaes',
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -63,7 +63,6 @@\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n- 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n", "issue": "contrib/pg8000 is old and causes weird postgres errors\nPlease update the contrib/pg8000 driver to the current version.\r\nOtherwise errors like Broken Pipe, Operationalerror,.. occur,\r\n- at least for postgres 9.6,\r\n- especially for long running task (ie. scheduler, where they are not properly handled (scheduler workers will restart and earlier run rmains as RUNNING).\r\n\r\nrelated links:\r\nhttps://github.com/mfenniak/pg8000/issues/73\r\nhttps://groups.google.com/forum/#!topic/web2py/HAeJevtGtdU\r\n\r\n..in copy into issues: web2py/web2py, web2py/pydal\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom gluon.fileutils import tar, untar, read_file, write_file\nimport tarfile\nimport sys\n\n\ndef tar(file, filelist, expression='^.+$'):\n \"\"\"\n tars dir/files into file, only tars file that match expression\n \"\"\"\n\n tar = tarfile.TarFile(file, 'w')\n try:\n for element in filelist:\n try:\n for file in listdir(element, expression, add_dirs=True):\n tar.add(os.path.join(element, file), file, False)\n except:\n tar.add(element)\n finally:\n tar.close()\n\n\ndef start():\n if 'sdist' in sys.argv:\n tar('gluon/env.tar', ['applications', 'VERSION',\n 'extras/icons/splashlogo.gif'])\n\n setup(name='web2py',\n version=read_file(\"VERSION\").split()[1],\n description=\"\"\"full-stack framework for rapid development and prototyping\n of secure database-driven web-based applications, written and\n programmable in Python.\"\"\",\n long_description=\"\"\"\n Everything in one package with no dependencies. Development, deployment,\n debugging, testing, database administration and maintenance of applications can\n be done via the provided web interface. web2py has no configuration files,\n requires no installation, can run off a USB drive. web2py uses Python for the\n Model, the Views and the Controllers, has a built-in ticketing system to manage\n errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,\n MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a\n Database Abstraction Layer. web2py includes libraries to handle\n HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production\n ready, capable of upload/download streaming of very large files, and always\n backward compatible.\n \"\"\",\n author='Massimo Di Pierro',\n author_email='[email protected]',\n license='http://web2py.com/examples/default/license',\n classifiers=[\"Development Status :: 5 - Production/Stable\"],\n url='http://web2py.com',\n platforms='Windows, Linux, Mac, Unix,Windows Mobile',\n packages=['gluon',\n 'gluon/contrib',\n 'gluon/contrib/gateways',\n 'gluon/contrib/login_methods',\n 'gluon/contrib/markdown',\n 'gluon/contrib/markmin',\n 'gluon/contrib/memcache',\n 'gluon/contrib/fpdf',\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n 'gluon/contrib/pg8000',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n 'gluon/contrib/pyuca',\n 'gluon/tests',\n ],\n package_data={'gluon': ['env.tar']},\n# scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],\n )\n\nif __name__ == '__main__':\n #print \"web2py does not require installation and\"\n #print \"you should just start it with:\"\n #print\n #print \"$ python web2py.py\"\n #print\n #print \"are you sure you want to install it anyway (y/n)?\"\n #s = raw_input('>')\n #if s.lower()[:1]=='y':\n start()\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup\nfrom gluon.fileutils import tar, untar, read_file, write_file\nimport tarfile\nimport sys\n\n\ndef tar(file, filelist, expression='^.+$'):\n \"\"\"\n tars dir/files into file, only tars file that match expression\n \"\"\"\n\n tar = tarfile.TarFile(file, 'w')\n try:\n for element in filelist:\n try:\n for file in listdir(element, expression, add_dirs=True):\n tar.add(os.path.join(element, file), file, False)\n except:\n tar.add(element)\n finally:\n tar.close()\n\n\ndef start():\n if 'sdist' in sys.argv:\n tar('gluon/env.tar', ['applications', 'VERSION',\n 'extras/icons/splashlogo.gif'])\n\n setup(name='web2py',\n version=read_file(\"VERSION\").split()[1],\n description=\"\"\"full-stack framework for rapid development and prototyping\n of secure database-driven web-based applications, written and\n programmable in Python.\"\"\",\n long_description=\"\"\"\n Everything in one package with no dependencies. Development, deployment,\n debugging, testing, database administration and maintenance of applications can\n be done via the provided web interface. web2py has no configuration files,\n requires no installation, can run off a USB drive. web2py uses Python for the\n Model, the Views and the Controllers, has a built-in ticketing system to manage\n errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL,\n MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a\n Database Abstraction Layer. web2py includes libraries to handle\n HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production\n ready, capable of upload/download streaming of very large files, and always\n backward compatible.\n \"\"\",\n author='Massimo Di Pierro',\n author_email='[email protected]',\n license='http://web2py.com/examples/default/license',\n classifiers=[\"Development Status :: 5 - Production/Stable\"],\n url='http://web2py.com',\n platforms='Windows, Linux, Mac, Unix,Windows Mobile',\n packages=['gluon',\n 'gluon/contrib',\n 'gluon/contrib/gateways',\n 'gluon/contrib/login_methods',\n 'gluon/contrib/markdown',\n 'gluon/contrib/markmin',\n 'gluon/contrib/memcache',\n 'gluon/contrib/fpdf',\n 'gluon/contrib/pymysql',\n 'gluon/contrib/pyrtf',\n 'gluon/contrib/pysimplesoap',\n 'gluon/contrib/plural_rules',\n 'gluon/contrib/minify',\n 'gluon/contrib/pyaes',\n 'gluon/contrib/pyuca',\n 'gluon/tests',\n ],\n package_data={'gluon': ['env.tar']},\n# scripts=['w2p_apps', 'w2p_run', 'w2p_clone'],\n )\n\nif __name__ == '__main__':\n #print \"web2py does not require installation and\"\n #print \"you should just start it with:\"\n #print\n #print \"$ python web2py.py\"\n #print\n #print \"are you sure you want to install it anyway (y/n)?\"\n #s = raw_input('>')\n #if s.lower()[:1]=='y':\n start()\n", "path": "setup.py"}]} | 1,370 | 113 |
gh_patches_debug_18335 | rasdani/github-patches | git_diff | searx__searx-1301 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Gigablast crash
Greetings,
I have been experimenting with SearX lately and have been seeing this message:
"
Engines cannot retrieve results:
gigablast (unexpected crash: No JSON object could be decoded)
"
Seems like something is wrong with the Gigablast driver but I am not sure how to fix it.
I'm using: searx - 0.14.0
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/gigablast.py`
Content:
```
1 """
2 Gigablast (Web)
3
4 @website https://gigablast.com
5 @provide-api yes (https://gigablast.com/api.html)
6
7 @using-api yes
8 @results XML
9 @stable yes
10 @parse url, title, content
11 """
12
13 import random
14 from json import loads
15 from time import time
16 from lxml.html import fromstring
17 from searx.url_utils import urlencode
18
19 # engine dependent config
20 categories = ['general']
21 paging = True
22 number_of_results = 10
23 language_support = True
24 safesearch = True
25
26 # search-url
27 base_url = 'https://gigablast.com/'
28 search_string = 'search?{query}'\
29 '&n={number_of_results}'\
30 '&c=main'\
31 '&s={offset}'\
32 '&format=json'\
33 '&qh=0'\
34 '&qlang={lang}'\
35 '&ff={safesearch}'\
36 '&rxiec={rxieu}'\
37 '&rand={rxikd}' # current unix timestamp
38
39 # specific xpath variables
40 results_xpath = '//response//result'
41 url_xpath = './/url'
42 title_xpath = './/title'
43 content_xpath = './/sum'
44
45 supported_languages_url = 'https://gigablast.com/search?&rxikd=1'
46
47
48 # do search-request
49 def request(query, params):
50 offset = (params['pageno'] - 1) * number_of_results
51
52 language = params['language'].replace('-', '_').lower()
53 if language.split('-')[0] != 'zh':
54 language = language.split('-')[0]
55
56 if params['safesearch'] >= 1:
57 safesearch = 1
58 else:
59 safesearch = 0
60
61 # rxieu is some kind of hash from the search query, but accepts random atm
62 search_path = search_string.format(query=urlencode({'q': query}),
63 offset=offset,
64 number_of_results=number_of_results,
65 rxikd=int(time() * 1000),
66 rxieu=random.randint(1000000000, 9999999999),
67 lang=language,
68 safesearch=safesearch)
69
70 params['url'] = base_url + search_path
71
72 return params
73
74
75 # get response from search-request
76 def response(resp):
77 results = []
78
79 # parse results
80 response_json = loads(resp.text)
81
82 for result in response_json['results']:
83 # append result
84 results.append({'url': result['url'],
85 'title': result['title'],
86 'content': result['sum']})
87
88 # return results
89 return results
90
91
92 # get supported languages from their site
93 def _fetch_supported_languages(resp):
94 supported_languages = []
95 dom = fromstring(resp.text)
96 links = dom.xpath('//span[@id="menu2"]/a')
97 for link in links:
98 href = link.xpath('./@href')[0].split('lang%3A')
99 if len(href) == 2:
100 code = href[1].split('_')
101 if len(code) == 2:
102 code = code[0] + '-' + code[1].upper()
103 else:
104 code = code[0]
105 supported_languages.append(code)
106
107 return supported_languages
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -34,6 +34,7 @@
'&qlang={lang}'\
'&ff={safesearch}'\
'&rxiec={rxieu}'\
+ '&ulse={ulse}'\
'&rand={rxikd}' # current unix timestamp
# specific xpath variables
@@ -64,6 +65,7 @@
number_of_results=number_of_results,
rxikd=int(time() * 1000),
rxieu=random.randint(1000000000, 9999999999),
+ ulse=random.randint(100000000, 999999999),
lang=language,
safesearch=safesearch)
| {"golden_diff": "diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py\n--- a/searx/engines/gigablast.py\n+++ b/searx/engines/gigablast.py\n@@ -34,6 +34,7 @@\n '&qlang={lang}'\\\n '&ff={safesearch}'\\\n '&rxiec={rxieu}'\\\n+ '&ulse={ulse}'\\\n '&rand={rxikd}' # current unix timestamp\n \n # specific xpath variables\n@@ -64,6 +65,7 @@\n number_of_results=number_of_results,\n rxikd=int(time() * 1000),\n rxieu=random.randint(1000000000, 9999999999),\n+ ulse=random.randint(100000000, 999999999),\n lang=language,\n safesearch=safesearch)\n", "issue": "Gigablast crash\nGreetings,\r\n\r\nI have been experimenting with SearX lately and have been seeing this message:\r\n\r\n\"\r\nEngines cannot retrieve results:\r\n\r\ngigablast (unexpected crash: No JSON object could be decoded)\r\n\"\r\n\r\nSeems like something is wrong with the Gigablast driver but I am not sure how to fix it.\r\n\r\nI'm using: searx - 0.14.0 \r\n\r\nThanks\n", "before_files": [{"content": "\"\"\"\n Gigablast (Web)\n\n @website https://gigablast.com\n @provide-api yes (https://gigablast.com/api.html)\n\n @using-api yes\n @results XML\n @stable yes\n @parse url, title, content\n\"\"\"\n\nimport random\nfrom json import loads\nfrom time import time\nfrom lxml.html import fromstring\nfrom searx.url_utils import urlencode\n\n# engine dependent config\ncategories = ['general']\npaging = True\nnumber_of_results = 10\nlanguage_support = True\nsafesearch = True\n\n# search-url\nbase_url = 'https://gigablast.com/'\nsearch_string = 'search?{query}'\\\n '&n={number_of_results}'\\\n '&c=main'\\\n '&s={offset}'\\\n '&format=json'\\\n '&qh=0'\\\n '&qlang={lang}'\\\n '&ff={safesearch}'\\\n '&rxiec={rxieu}'\\\n '&rand={rxikd}' # current unix timestamp\n\n# specific xpath variables\nresults_xpath = '//response//result'\nurl_xpath = './/url'\ntitle_xpath = './/title'\ncontent_xpath = './/sum'\n\nsupported_languages_url = 'https://gigablast.com/search?&rxikd=1'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n\n language = params['language'].replace('-', '_').lower()\n if language.split('-')[0] != 'zh':\n language = language.split('-')[0]\n\n if params['safesearch'] >= 1:\n safesearch = 1\n else:\n safesearch = 0\n\n # rxieu is some kind of hash from the search query, but accepts random atm\n search_path = search_string.format(query=urlencode({'q': query}),\n offset=offset,\n number_of_results=number_of_results,\n rxikd=int(time() * 1000),\n rxieu=random.randint(1000000000, 9999999999),\n lang=language,\n safesearch=safesearch)\n\n params['url'] = base_url + search_path\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n # parse results\n response_json = loads(resp.text)\n\n for result in response_json['results']:\n # append result\n results.append({'url': result['url'],\n 'title': result['title'],\n 'content': result['sum']})\n\n # return results\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n supported_languages = []\n dom = fromstring(resp.text)\n links = dom.xpath('//span[@id=\"menu2\"]/a')\n for link in links:\n href = link.xpath('./@href')[0].split('lang%3A')\n if len(href) == 2:\n code = href[1].split('_')\n if len(code) == 2:\n code = code[0] + '-' + code[1].upper()\n else:\n code = code[0]\n supported_languages.append(code)\n\n return supported_languages\n", "path": "searx/engines/gigablast.py"}], "after_files": [{"content": "\"\"\"\n Gigablast (Web)\n\n @website https://gigablast.com\n @provide-api yes (https://gigablast.com/api.html)\n\n @using-api yes\n @results XML\n @stable yes\n @parse url, title, content\n\"\"\"\n\nimport random\nfrom json import loads\nfrom time import time\nfrom lxml.html import fromstring\nfrom searx.url_utils import urlencode\n\n# engine dependent config\ncategories = ['general']\npaging = True\nnumber_of_results = 10\nlanguage_support = True\nsafesearch = True\n\n# search-url\nbase_url = 'https://gigablast.com/'\nsearch_string = 'search?{query}'\\\n '&n={number_of_results}'\\\n '&c=main'\\\n '&s={offset}'\\\n '&format=json'\\\n '&qh=0'\\\n '&qlang={lang}'\\\n '&ff={safesearch}'\\\n '&rxiec={rxieu}'\\\n '&ulse={ulse}'\\\n '&rand={rxikd}' # current unix timestamp\n\n# specific xpath variables\nresults_xpath = '//response//result'\nurl_xpath = './/url'\ntitle_xpath = './/title'\ncontent_xpath = './/sum'\n\nsupported_languages_url = 'https://gigablast.com/search?&rxikd=1'\n\n\n# do search-request\ndef request(query, params):\n offset = (params['pageno'] - 1) * number_of_results\n\n language = params['language'].replace('-', '_').lower()\n if language.split('-')[0] != 'zh':\n language = language.split('-')[0]\n\n if params['safesearch'] >= 1:\n safesearch = 1\n else:\n safesearch = 0\n\n # rxieu is some kind of hash from the search query, but accepts random atm\n search_path = search_string.format(query=urlencode({'q': query}),\n offset=offset,\n number_of_results=number_of_results,\n rxikd=int(time() * 1000),\n rxieu=random.randint(1000000000, 9999999999),\n ulse=random.randint(100000000, 999999999),\n lang=language,\n safesearch=safesearch)\n\n params['url'] = base_url + search_path\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n results = []\n\n # parse results\n response_json = loads(resp.text)\n\n for result in response_json['results']:\n # append result\n results.append({'url': result['url'],\n 'title': result['title'],\n 'content': result['sum']})\n\n # return results\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n supported_languages = []\n dom = fromstring(resp.text)\n links = dom.xpath('//span[@id=\"menu2\"]/a')\n for link in links:\n href = link.xpath('./@href')[0].split('lang%3A')\n if len(href) == 2:\n code = href[1].split('_')\n if len(code) == 2:\n code = code[0] + '-' + code[1].upper()\n else:\n code = code[0]\n supported_languages.append(code)\n\n return supported_languages\n", "path": "searx/engines/gigablast.py"}]} | 1,303 | 228 |
gh_patches_debug_17435 | rasdani/github-patches | git_diff | freedomofpress__securedrop-2756 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Failure to invalidate session when user resets their own password
## Description
When a user resets their own password, their session is not invalidated.
## Steps to Reproduce
1. User logs in
2. User resets password
## Expected Behavior
User is logged out and is requested to use their new password to login
## Actual Behavior
User can continue to browse without having to enter their new password again
## Comments
Related tickets: #2300, #880
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `securedrop/journalist_app/account.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from flask import (Blueprint, render_template, request, g, redirect, url_for,
4 flash)
5 from flask_babel import gettext
6
7 from db import db_session
8 from journalist_app.utils import (make_password, set_diceware_password,
9 validate_user)
10
11
12 def make_blueprint(config):
13 view = Blueprint('account', __name__)
14
15 @view.route('/account', methods=('GET',))
16 def edit():
17 password = make_password(config)
18 return render_template('edit_account.html',
19 password=password)
20
21 @view.route('/new-password', methods=('POST',))
22 def new_password():
23 user = g.user
24 current_password = request.form.get('current_password')
25 token = request.form.get('token')
26 error_message = gettext('Incorrect password or two-factor code.')
27 # If the user is validated, change their password
28 if validate_user(user.username, current_password, token,
29 error_message):
30 password = request.form.get('password')
31 set_diceware_password(user, password)
32 return redirect(url_for('account.edit'))
33
34 @view.route('/2fa', methods=('GET', 'POST'))
35 def new_two_factor():
36 if request.method == 'POST':
37 token = request.form['token']
38 if g.user.verify_token(token):
39 flash(gettext("Token in two-factor authentication verified."),
40 "notification")
41 return redirect(url_for('account.edit'))
42 else:
43 flash(gettext(
44 "Could not verify token in two-factor authentication."),
45 "error")
46
47 return render_template('account_new_two_factor.html', user=g.user)
48
49 @view.route('/reset-2fa-totp', methods=['POST'])
50 def reset_two_factor_totp():
51 g.user.is_totp = True
52 g.user.regenerate_totp_shared_secret()
53 db_session.commit()
54 return redirect(url_for('account.new_two_factor'))
55
56 @view.route('/reset-2fa-hotp', methods=['POST'])
57 def reset_two_factor_hotp():
58 otp_secret = request.form.get('otp_secret', None)
59 if otp_secret:
60 g.user.set_hotp_secret(otp_secret)
61 db_session.commit()
62 return redirect(url_for('account.new_two_factor'))
63 else:
64 return render_template('account_edit_hotp_secret.html')
65
66 return view
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/securedrop/journalist_app/account.py b/securedrop/journalist_app/account.py
--- a/securedrop/journalist_app/account.py
+++ b/securedrop/journalist_app/account.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
from flask import (Blueprint, render_template, request, g, redirect, url_for,
- flash)
+ flash, session)
from flask_babel import gettext
from db import db_session
@@ -29,6 +29,9 @@
error_message):
password = request.form.get('password')
set_diceware_password(user, password)
+ session.pop('uid', None)
+ session.pop('expires', None)
+ return redirect(url_for('main.login'))
return redirect(url_for('account.edit'))
@view.route('/2fa', methods=('GET', 'POST'))
| {"golden_diff": "diff --git a/securedrop/journalist_app/account.py b/securedrop/journalist_app/account.py\n--- a/securedrop/journalist_app/account.py\n+++ b/securedrop/journalist_app/account.py\n@@ -1,7 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from flask import (Blueprint, render_template, request, g, redirect, url_for,\n- flash)\n+ flash, session)\n from flask_babel import gettext\n \n from db import db_session\n@@ -29,6 +29,9 @@\n error_message):\n password = request.form.get('password')\n set_diceware_password(user, password)\n+ session.pop('uid', None)\n+ session.pop('expires', None)\n+ return redirect(url_for('main.login'))\n return redirect(url_for('account.edit'))\n \n @view.route('/2fa', methods=('GET', 'POST'))\n", "issue": "Failure to invalidate session when user resets their own password\n## Description\r\n\r\nWhen a user resets their own password, their session is not invalidated. \r\n\r\n## Steps to Reproduce\r\n\r\n1. User logs in\r\n2. User resets password\r\n\r\n## Expected Behavior\r\n\r\nUser is logged out and is requested to use their new password to login\r\n\r\n## Actual Behavior\r\n\r\nUser can continue to browse without having to enter their new password again\r\n\r\n## Comments\r\n\r\nRelated tickets: #2300, #880\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, render_template, request, g, redirect, url_for,\n flash)\nfrom flask_babel import gettext\n\nfrom db import db_session\nfrom journalist_app.utils import (make_password, set_diceware_password,\n validate_user)\n\n\ndef make_blueprint(config):\n view = Blueprint('account', __name__)\n\n @view.route('/account', methods=('GET',))\n def edit():\n password = make_password(config)\n return render_template('edit_account.html',\n password=password)\n\n @view.route('/new-password', methods=('POST',))\n def new_password():\n user = g.user\n current_password = request.form.get('current_password')\n token = request.form.get('token')\n error_message = gettext('Incorrect password or two-factor code.')\n # If the user is validated, change their password\n if validate_user(user.username, current_password, token,\n error_message):\n password = request.form.get('password')\n set_diceware_password(user, password)\n return redirect(url_for('account.edit'))\n\n @view.route('/2fa', methods=('GET', 'POST'))\n def new_two_factor():\n if request.method == 'POST':\n token = request.form['token']\n if g.user.verify_token(token):\n flash(gettext(\"Token in two-factor authentication verified.\"),\n \"notification\")\n return redirect(url_for('account.edit'))\n else:\n flash(gettext(\n \"Could not verify token in two-factor authentication.\"),\n \"error\")\n\n return render_template('account_new_two_factor.html', user=g.user)\n\n @view.route('/reset-2fa-totp', methods=['POST'])\n def reset_two_factor_totp():\n g.user.is_totp = True\n g.user.regenerate_totp_shared_secret()\n db_session.commit()\n return redirect(url_for('account.new_two_factor'))\n\n @view.route('/reset-2fa-hotp', methods=['POST'])\n def reset_two_factor_hotp():\n otp_secret = request.form.get('otp_secret', None)\n if otp_secret:\n g.user.set_hotp_secret(otp_secret)\n db_session.commit()\n return redirect(url_for('account.new_two_factor'))\n else:\n return render_template('account_edit_hotp_secret.html')\n\n return view\n", "path": "securedrop/journalist_app/account.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom flask import (Blueprint, render_template, request, g, redirect, url_for,\n flash, session)\nfrom flask_babel import gettext\n\nfrom db import db_session\nfrom journalist_app.utils import (make_password, set_diceware_password,\n validate_user)\n\n\ndef make_blueprint(config):\n view = Blueprint('account', __name__)\n\n @view.route('/account', methods=('GET',))\n def edit():\n password = make_password(config)\n return render_template('edit_account.html',\n password=password)\n\n @view.route('/new-password', methods=('POST',))\n def new_password():\n user = g.user\n current_password = request.form.get('current_password')\n token = request.form.get('token')\n error_message = gettext('Incorrect password or two-factor code.')\n # If the user is validated, change their password\n if validate_user(user.username, current_password, token,\n error_message):\n password = request.form.get('password')\n set_diceware_password(user, password)\n session.pop('uid', None)\n session.pop('expires', None)\n return redirect(url_for('main.login'))\n return redirect(url_for('account.edit'))\n\n @view.route('/2fa', methods=('GET', 'POST'))\n def new_two_factor():\n if request.method == 'POST':\n token = request.form['token']\n if g.user.verify_token(token):\n flash(gettext(\"Token in two-factor authentication verified.\"),\n \"notification\")\n return redirect(url_for('account.edit'))\n else:\n flash(gettext(\n \"Could not verify token in two-factor authentication.\"),\n \"error\")\n\n return render_template('account_new_two_factor.html', user=g.user)\n\n @view.route('/reset-2fa-totp', methods=['POST'])\n def reset_two_factor_totp():\n g.user.is_totp = True\n g.user.regenerate_totp_shared_secret()\n db_session.commit()\n return redirect(url_for('account.new_two_factor'))\n\n @view.route('/reset-2fa-hotp', methods=['POST'])\n def reset_two_factor_hotp():\n otp_secret = request.form.get('otp_secret', None)\n if otp_secret:\n g.user.set_hotp_secret(otp_secret)\n db_session.commit()\n return redirect(url_for('account.new_two_factor'))\n else:\n return render_template('account_edit_hotp_secret.html')\n\n return view\n", "path": "securedrop/journalist_app/account.py"}]} | 977 | 195 |
gh_patches_debug_22132 | rasdani/github-patches | git_diff | open-mmlab__mmcv-823 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
imshow_bboxes does not show bboxes if img is incontiguous
As [DKDKDDK](https://stackoverflow.com/questions/57586449/why-cv2-rectangle-sometimes-return-np-ndarray-while-sometimes-cv2-umat) asked, cv2.rectangle seems unable to draw inplacely on incontiguous arrays. When calling `mmcv.imshow_bboxes` or `mmcv.imshow_det_bboxes`, the contiguousness of argument `img` is consistent with what the user passed in. Would it be convenient to add `img = np.ascontiguousarray(img)` inside `mmcv.imshow_bboxes` and `mmcv.imshow_det_bboxes`?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmcv/visualization/image.py`
Content:
```
1 # Copyright (c) Open-MMLab. All rights reserved.
2 import cv2
3 import numpy as np
4
5 from mmcv.image import imread, imwrite
6 from .color import color_val
7
8
9 def imshow(img, win_name='', wait_time=0):
10 """Show an image.
11
12 Args:
13 img (str or ndarray): The image to be displayed.
14 win_name (str): The window name.
15 wait_time (int): Value of waitKey param.
16 """
17 cv2.imshow(win_name, imread(img))
18 if wait_time == 0: # prevent from hangning if windows was closed
19 while True:
20 ret = cv2.waitKey(1)
21
22 closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1
23 # if user closed window or if some key pressed
24 if closed or ret != -1:
25 break
26 else:
27 ret = cv2.waitKey(wait_time)
28
29
30 def imshow_bboxes(img,
31 bboxes,
32 colors='green',
33 top_k=-1,
34 thickness=1,
35 show=True,
36 win_name='',
37 wait_time=0,
38 out_file=None):
39 """Draw bboxes on an image.
40
41 Args:
42 img (str or ndarray): The image to be displayed.
43 bboxes (list or ndarray): A list of ndarray of shape (k, 4).
44 colors (list[str or tuple or Color]): A list of colors.
45 top_k (int): Plot the first k bboxes only if set positive.
46 thickness (int): Thickness of lines.
47 show (bool): Whether to show the image.
48 win_name (str): The window name.
49 wait_time (int): Value of waitKey param.
50 out_file (str, optional): The filename to write the image.
51
52 Returns:
53 ndarray: The image with bboxes drawn on it.
54 """
55 img = imread(img)
56
57 if isinstance(bboxes, np.ndarray):
58 bboxes = [bboxes]
59 if not isinstance(colors, list):
60 colors = [colors for _ in range(len(bboxes))]
61 colors = [color_val(c) for c in colors]
62 assert len(bboxes) == len(colors)
63
64 for i, _bboxes in enumerate(bboxes):
65 _bboxes = _bboxes.astype(np.int32)
66 if top_k <= 0:
67 _top_k = _bboxes.shape[0]
68 else:
69 _top_k = min(top_k, _bboxes.shape[0])
70 for j in range(_top_k):
71 left_top = (_bboxes[j, 0], _bboxes[j, 1])
72 right_bottom = (_bboxes[j, 2], _bboxes[j, 3])
73 cv2.rectangle(
74 img, left_top, right_bottom, colors[i], thickness=thickness)
75
76 if show:
77 imshow(img, win_name, wait_time)
78 if out_file is not None:
79 imwrite(img, out_file)
80 return img
81
82
83 def imshow_det_bboxes(img,
84 bboxes,
85 labels,
86 class_names=None,
87 score_thr=0,
88 bbox_color='green',
89 text_color='green',
90 thickness=1,
91 font_scale=0.5,
92 show=True,
93 win_name='',
94 wait_time=0,
95 out_file=None):
96 """Draw bboxes and class labels (with scores) on an image.
97
98 Args:
99 img (str or ndarray): The image to be displayed.
100 bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or
101 (n, 5).
102 labels (ndarray): Labels of bboxes.
103 class_names (list[str]): Names of each classes.
104 score_thr (float): Minimum score of bboxes to be shown.
105 bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.
106 text_color (str or tuple or :obj:`Color`): Color of texts.
107 thickness (int): Thickness of lines.
108 font_scale (float): Font scales of texts.
109 show (bool): Whether to show the image.
110 win_name (str): The window name.
111 wait_time (int): Value of waitKey param.
112 out_file (str or None): The filename to write the image.
113
114 Returns:
115 ndarray: The image with bboxes drawn on it.
116 """
117 assert bboxes.ndim == 2
118 assert labels.ndim == 1
119 assert bboxes.shape[0] == labels.shape[0]
120 assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
121 img = imread(img)
122
123 if score_thr > 0:
124 assert bboxes.shape[1] == 5
125 scores = bboxes[:, -1]
126 inds = scores > score_thr
127 bboxes = bboxes[inds, :]
128 labels = labels[inds]
129
130 bbox_color = color_val(bbox_color)
131 text_color = color_val(text_color)
132 img = np.ascontiguousarray(img)
133 for bbox, label in zip(bboxes, labels):
134 bbox_int = bbox.astype(np.int32)
135 left_top = (bbox_int[0], bbox_int[1])
136 right_bottom = (bbox_int[2], bbox_int[3])
137 cv2.rectangle(
138 img, left_top, right_bottom, bbox_color, thickness=thickness)
139 label_text = class_names[
140 label] if class_names is not None else f'cls {label}'
141 if len(bbox) > 4:
142 label_text += f'|{bbox[-1]:.02f}'
143 cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),
144 cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)
145
146 if show:
147 imshow(img, win_name, wait_time)
148 if out_file is not None:
149 imwrite(img, out_file)
150 return img
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mmcv/visualization/image.py b/mmcv/visualization/image.py
--- a/mmcv/visualization/image.py
+++ b/mmcv/visualization/image.py
@@ -53,6 +53,7 @@
ndarray: The image with bboxes drawn on it.
"""
img = imread(img)
+ img = np.ascontiguousarray(img)
if isinstance(bboxes, np.ndarray):
bboxes = [bboxes]
@@ -119,6 +120,7 @@
assert bboxes.shape[0] == labels.shape[0]
assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5
img = imread(img)
+ img = np.ascontiguousarray(img)
if score_thr > 0:
assert bboxes.shape[1] == 5
@@ -129,7 +131,7 @@
bbox_color = color_val(bbox_color)
text_color = color_val(text_color)
- img = np.ascontiguousarray(img)
+
for bbox, label in zip(bboxes, labels):
bbox_int = bbox.astype(np.int32)
left_top = (bbox_int[0], bbox_int[1])
| {"golden_diff": "diff --git a/mmcv/visualization/image.py b/mmcv/visualization/image.py\n--- a/mmcv/visualization/image.py\n+++ b/mmcv/visualization/image.py\n@@ -53,6 +53,7 @@\n ndarray: The image with bboxes drawn on it.\n \"\"\"\n img = imread(img)\n+ img = np.ascontiguousarray(img)\n \n if isinstance(bboxes, np.ndarray):\n bboxes = [bboxes]\n@@ -119,6 +120,7 @@\n assert bboxes.shape[0] == labels.shape[0]\n assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5\n img = imread(img)\n+ img = np.ascontiguousarray(img)\n \n if score_thr > 0:\n assert bboxes.shape[1] == 5\n@@ -129,7 +131,7 @@\n \n bbox_color = color_val(bbox_color)\n text_color = color_val(text_color)\n- img = np.ascontiguousarray(img)\n+\n for bbox, label in zip(bboxes, labels):\n bbox_int = bbox.astype(np.int32)\n left_top = (bbox_int[0], bbox_int[1])\n", "issue": "imshow_bboxes does not show bboxes if img is incontiguous \nAs [DKDKDDK](https://stackoverflow.com/questions/57586449/why-cv2-rectangle-sometimes-return-np-ndarray-while-sometimes-cv2-umat) asked, cv2.rectangle seems unable to draw inplacely on incontiguous arrays. When calling `mmcv.imshow_bboxes` or `mmcv.imshow_det_bboxes`, the contiguousness of argument `img` is consistent with what the user passed in. Would it be convenient to add `img = np.ascontiguousarray(img)` inside `mmcv.imshow_bboxes` and `mmcv.imshow_det_bboxes`?\n", "before_files": [{"content": "# Copyright (c) Open-MMLab. All rights reserved.\nimport cv2\nimport numpy as np\n\nfrom mmcv.image import imread, imwrite\nfrom .color import color_val\n\n\ndef imshow(img, win_name='', wait_time=0):\n \"\"\"Show an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n \"\"\"\n cv2.imshow(win_name, imread(img))\n if wait_time == 0: # prevent from hangning if windows was closed\n while True:\n ret = cv2.waitKey(1)\n\n closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1\n # if user closed window or if some key pressed\n if closed or ret != -1:\n break\n else:\n ret = cv2.waitKey(wait_time)\n\n\ndef imshow_bboxes(img,\n bboxes,\n colors='green',\n top_k=-1,\n thickness=1,\n show=True,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Draw bboxes on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n bboxes (list or ndarray): A list of ndarray of shape (k, 4).\n colors (list[str or tuple or Color]): A list of colors.\n top_k (int): Plot the first k bboxes only if set positive.\n thickness (int): Thickness of lines.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str, optional): The filename to write the image.\n\n Returns:\n ndarray: The image with bboxes drawn on it.\n \"\"\"\n img = imread(img)\n\n if isinstance(bboxes, np.ndarray):\n bboxes = [bboxes]\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(bboxes))]\n colors = [color_val(c) for c in colors]\n assert len(bboxes) == len(colors)\n\n for i, _bboxes in enumerate(bboxes):\n _bboxes = _bboxes.astype(np.int32)\n if top_k <= 0:\n _top_k = _bboxes.shape[0]\n else:\n _top_k = min(top_k, _bboxes.shape[0])\n for j in range(_top_k):\n left_top = (_bboxes[j, 0], _bboxes[j, 1])\n right_bottom = (_bboxes[j, 2], _bboxes[j, 3])\n cv2.rectangle(\n img, left_top, right_bottom, colors[i], thickness=thickness)\n\n if show:\n imshow(img, win_name, wait_time)\n if out_file is not None:\n imwrite(img, out_file)\n return img\n\n\ndef imshow_det_bboxes(img,\n bboxes,\n labels,\n class_names=None,\n score_thr=0,\n bbox_color='green',\n text_color='green',\n thickness=1,\n font_scale=0.5,\n show=True,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Draw bboxes and class labels (with scores) on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or\n (n, 5).\n labels (ndarray): Labels of bboxes.\n class_names (list[str]): Names of each classes.\n score_thr (float): Minimum score of bboxes to be shown.\n bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.\n text_color (str or tuple or :obj:`Color`): Color of texts.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str or None): The filename to write the image.\n\n Returns:\n ndarray: The image with bboxes drawn on it.\n \"\"\"\n assert bboxes.ndim == 2\n assert labels.ndim == 1\n assert bboxes.shape[0] == labels.shape[0]\n assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5\n img = imread(img)\n\n if score_thr > 0:\n assert bboxes.shape[1] == 5\n scores = bboxes[:, -1]\n inds = scores > score_thr\n bboxes = bboxes[inds, :]\n labels = labels[inds]\n\n bbox_color = color_val(bbox_color)\n text_color = color_val(text_color)\n img = np.ascontiguousarray(img)\n for bbox, label in zip(bboxes, labels):\n bbox_int = bbox.astype(np.int32)\n left_top = (bbox_int[0], bbox_int[1])\n right_bottom = (bbox_int[2], bbox_int[3])\n cv2.rectangle(\n img, left_top, right_bottom, bbox_color, thickness=thickness)\n label_text = class_names[\n label] if class_names is not None else f'cls {label}'\n if len(bbox) > 4:\n label_text += f'|{bbox[-1]:.02f}'\n cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),\n cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)\n\n if show:\n imshow(img, win_name, wait_time)\n if out_file is not None:\n imwrite(img, out_file)\n return img\n", "path": "mmcv/visualization/image.py"}], "after_files": [{"content": "# Copyright (c) Open-MMLab. All rights reserved.\nimport cv2\nimport numpy as np\n\nfrom mmcv.image import imread, imwrite\nfrom .color import color_val\n\n\ndef imshow(img, win_name='', wait_time=0):\n \"\"\"Show an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n \"\"\"\n cv2.imshow(win_name, imread(img))\n if wait_time == 0: # prevent from hangning if windows was closed\n while True:\n ret = cv2.waitKey(1)\n\n closed = cv2.getWindowProperty(win_name, cv2.WND_PROP_VISIBLE) < 1\n # if user closed window or if some key pressed\n if closed or ret != -1:\n break\n else:\n ret = cv2.waitKey(wait_time)\n\n\ndef imshow_bboxes(img,\n bboxes,\n colors='green',\n top_k=-1,\n thickness=1,\n show=True,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Draw bboxes on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n bboxes (list or ndarray): A list of ndarray of shape (k, 4).\n colors (list[str or tuple or Color]): A list of colors.\n top_k (int): Plot the first k bboxes only if set positive.\n thickness (int): Thickness of lines.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str, optional): The filename to write the image.\n\n Returns:\n ndarray: The image with bboxes drawn on it.\n \"\"\"\n img = imread(img)\n img = np.ascontiguousarray(img)\n\n if isinstance(bboxes, np.ndarray):\n bboxes = [bboxes]\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(bboxes))]\n colors = [color_val(c) for c in colors]\n assert len(bboxes) == len(colors)\n\n for i, _bboxes in enumerate(bboxes):\n _bboxes = _bboxes.astype(np.int32)\n if top_k <= 0:\n _top_k = _bboxes.shape[0]\n else:\n _top_k = min(top_k, _bboxes.shape[0])\n for j in range(_top_k):\n left_top = (_bboxes[j, 0], _bboxes[j, 1])\n right_bottom = (_bboxes[j, 2], _bboxes[j, 3])\n cv2.rectangle(\n img, left_top, right_bottom, colors[i], thickness=thickness)\n\n if show:\n imshow(img, win_name, wait_time)\n if out_file is not None:\n imwrite(img, out_file)\n return img\n\n\ndef imshow_det_bboxes(img,\n bboxes,\n labels,\n class_names=None,\n score_thr=0,\n bbox_color='green',\n text_color='green',\n thickness=1,\n font_scale=0.5,\n show=True,\n win_name='',\n wait_time=0,\n out_file=None):\n \"\"\"Draw bboxes and class labels (with scores) on an image.\n\n Args:\n img (str or ndarray): The image to be displayed.\n bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or\n (n, 5).\n labels (ndarray): Labels of bboxes.\n class_names (list[str]): Names of each classes.\n score_thr (float): Minimum score of bboxes to be shown.\n bbox_color (str or tuple or :obj:`Color`): Color of bbox lines.\n text_color (str or tuple or :obj:`Color`): Color of texts.\n thickness (int): Thickness of lines.\n font_scale (float): Font scales of texts.\n show (bool): Whether to show the image.\n win_name (str): The window name.\n wait_time (int): Value of waitKey param.\n out_file (str or None): The filename to write the image.\n\n Returns:\n ndarray: The image with bboxes drawn on it.\n \"\"\"\n assert bboxes.ndim == 2\n assert labels.ndim == 1\n assert bboxes.shape[0] == labels.shape[0]\n assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5\n img = imread(img)\n img = np.ascontiguousarray(img)\n\n if score_thr > 0:\n assert bboxes.shape[1] == 5\n scores = bboxes[:, -1]\n inds = scores > score_thr\n bboxes = bboxes[inds, :]\n labels = labels[inds]\n\n bbox_color = color_val(bbox_color)\n text_color = color_val(text_color)\n\n for bbox, label in zip(bboxes, labels):\n bbox_int = bbox.astype(np.int32)\n left_top = (bbox_int[0], bbox_int[1])\n right_bottom = (bbox_int[2], bbox_int[3])\n cv2.rectangle(\n img, left_top, right_bottom, bbox_color, thickness=thickness)\n label_text = class_names[\n label] if class_names is not None else f'cls {label}'\n if len(bbox) > 4:\n label_text += f'|{bbox[-1]:.02f}'\n cv2.putText(img, label_text, (bbox_int[0], bbox_int[1] - 2),\n cv2.FONT_HERSHEY_COMPLEX, font_scale, text_color)\n\n if show:\n imshow(img, win_name, wait_time)\n if out_file is not None:\n imwrite(img, out_file)\n return img\n", "path": "mmcv/visualization/image.py"}]} | 2,027 | 272 |
gh_patches_debug_39977 | rasdani/github-patches | git_diff | intel__dffml-568 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
operations: io: Fixup example
https://github.com/intel/dffml/blob/c0946b2d212239cfe4e470e63ab3da22c9cd97c4/dffml/operation/io.py#L116
copy the code here into a Python file and format it with black, then copy it back.
We also want to change
```python
... definition=dataflow.definitions["DataToPrint"],
... parents=None,)]
```
to
```python
... definition=print_output.inputs["data"],
... )
... ]
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dffml/operation/preprocess.py`
Content:
```
1 import ast
2 from dffml.df.types import Definition
3 from dffml.df.base import op
4
5
6 # Definitions
7 InputStr = Definition(name="InputStr", primitive="str")
8 EvaluatedStr = Definition(name="EvaluatedStr", primitive="generic")
9
10
11 @op(
12 inputs={"str_to_eval": InputStr},
13 outputs={"str_after_eval": EvaluatedStr},
14 conditions=[],
15 )
16 async def literal_eval(str_to_eval: str):
17 """
18 Evaluate the input using ast.literal_eval()
19
20 Parameters
21 ++++++++++
22 inputs : str
23 A string to be evaluated.
24
25 Returns
26 +++++++
27 A python literal.
28
29 Examples
30 ++++++++
31
32 The following example shows how to use literal_eval.
33
34 >>> dataflow = DataFlow.auto(literal_eval, GetSingle)
35 >>> dataflow.seed.append(
36 ... Input(
37 ... value=[literal_eval.op.outputs["str_after_eval"].name,],
38 ... definition=GetSingle.op.inputs["spec"],
39 ... )
40 ... )
41 >>> inputs = [
42 ... Input(
43 ... value="[1,2,3]",
44 ... definition=literal_eval.op.inputs["str_to_eval"],
45 ... parents=None,
46 ... )
47 ... ]
48 >>>
49 >>> async def main():
50 ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):
51 ... print(results)
52 >>>
53 >>> asyncio.run(main())
54 {'EvaluatedStr': [1, 2, 3]}
55 """
56 value = ast.literal_eval(str_to_eval)
57 return {"str_after_eval": value}
58
```
Path: `dffml/operation/io.py`
Content:
```
1 import asyncio
2 import concurrent.futures
3 from typing import Dict, Any
4
5 from dffml.df.types import Operation, Definition
6 from dffml.df.base import (
7 op,
8 OperationImplementationContext,
9 OperationImplementation,
10 )
11
12
13 # Definitions
14 UserInput = Definition(name="UserInput", primitive="str")
15 DataToPrint = Definition(name="DataToPrint", primitive="str")
16
17 AcceptUserInput = Operation(
18 name="AcceptUserInput",
19 inputs={},
20 outputs={"InputData": UserInput},
21 conditions=[],
22 )
23
24
25 class AcceptUserInputContext(OperationImplementationContext):
26 @staticmethod
27 def receive_input():
28 print("Enter the value: ", end="")
29 return input()
30
31 async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
32 user_input = await self.parent.loop.run_in_executor(
33 self.parent.pool, self.receive_input
34 )
35 return {"InputData": user_input}
36
37
38 class AcceptUserInput(OperationImplementation):
39 """
40 Accept input from stdin using python input()
41
42 Parameters
43 ++++++++++
44 inputs : dict
45 A dictionary with a key and empty list as value.
46
47 Returns
48 +++++++
49 dict
50 A dictionary containing user input.
51
52 Examples
53 ++++++++
54
55 The following example shows how to use AcceptUserInput.
56 (Assumes that the input from stdio is "Data flow is awesome"!)
57
58 >>> dataflow = DataFlow.auto(AcceptUserInput, GetSingle)
59 >>> dataflow.seed.append(
60 ... Input(
61 ... value=[AcceptUserInput.op.outputs["InputData"].name],
62 ... definition=GetSingle.op.inputs["spec"]
63 ... )
64 ... )
65 >>>
66 >>> async def main():
67 ... async for ctx, results in MemoryOrchestrator.run(dataflow, {"input":[]}):
68 ... print(results)
69 >>>
70 >>> asyncio.run(main())
71 Enter the value: {'UserInput': 'Data flow is awesome'}
72 """
73
74 op = AcceptUserInput
75 CONTEXT = AcceptUserInputContext
76
77 def __init__(self, *args, **kwargs):
78 super().__init__(*args, **kwargs)
79 self.loop = None
80 self.pool = None
81 self.__pool = None
82
83 async def __aenter__(self) -> "OperationImplementationContext":
84 self.loop = asyncio.get_event_loop()
85 self.pool = concurrent.futures.ThreadPoolExecutor()
86 self.__pool = self.pool.__enter__()
87 return self
88
89 async def __aexit__(self, exc_type, exc_value, traceback):
90 self.__pool.__exit__(exc_type, exc_value, traceback)
91 self.__pool = None
92 self.pool = None
93 self.loop = None
94
95
96 @op(inputs={"data": DataToPrint}, outputs={}, conditions=[])
97 async def print_output(data: str):
98 """
99 Print the output on stdout using python print()
100
101 Parameters
102 ++++++++++
103 inputs : list
104 A list of Inputs whose value is to be printed.
105
106 Examples
107 ++++++++
108
109 The following example shows how to use print_output.
110
111 >>> dataflow = DataFlow.auto(print_output, GetSingle)
112 >>> inputs = [
113 ... Input(
114 ... value="print_output example",
115 ... definition=dataflow.definitions["DataToPrint"],
116 ... parents=None,)]
117 >>>
118 >>> async def main():
119 ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):
120 ... print("String to be printed is 'print_output example'")
121 >>>
122 >>> asyncio.run(main())
123 print_output example
124 String to be printed is 'print_output example'
125 """
126 print(data)
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dffml/operation/io.py b/dffml/operation/io.py
--- a/dffml/operation/io.py
+++ b/dffml/operation/io.py
@@ -12,7 +12,7 @@
# Definitions
UserInput = Definition(name="UserInput", primitive="str")
-DataToPrint = Definition(name="DataToPrint", primitive="str")
+DataToPrint = Definition(name="DataToPrint", primitive="generic")
AcceptUserInput = Operation(
name="AcceptUserInput",
@@ -39,11 +39,6 @@
"""
Accept input from stdin using python input()
- Parameters
- ++++++++++
- inputs : dict
- A dictionary with a key and empty list as value.
-
Returns
+++++++
dict
@@ -59,12 +54,12 @@
>>> dataflow.seed.append(
... Input(
... value=[AcceptUserInput.op.outputs["InputData"].name],
- ... definition=GetSingle.op.inputs["spec"]
+ ... definition=GetSingle.op.inputs["spec"],
... )
... )
>>>
>>> async def main():
- ... async for ctx, results in MemoryOrchestrator.run(dataflow, {"input":[]}):
+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, {"input": []}):
... print(results)
>>>
>>> asyncio.run(main())
@@ -94,33 +89,32 @@
@op(inputs={"data": DataToPrint}, outputs={}, conditions=[])
-async def print_output(data: str):
+async def print_output(data: Any):
"""
Print the output on stdout using python print()
Parameters
++++++++++
- inputs : list
- A list of Inputs whose value is to be printed.
+ data : Any
+ A python literal to be printed.
Examples
++++++++
The following example shows how to use print_output.
- >>> dataflow = DataFlow.auto(print_output, GetSingle)
+ >>> dataflow = DataFlow.auto(print_output)
>>> inputs = [
... Input(
- ... value="print_output example",
- ... definition=dataflow.definitions["DataToPrint"],
- ... parents=None,)]
+ ... value="print_output example", definition=print_output.op.inputs["data"]
+ ... )
+ ... ]
>>>
>>> async def main():
... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):
- ... print("String to be printed is 'print_output example'")
+ ... pass
>>>
>>> asyncio.run(main())
print_output example
- String to be printed is 'print_output example'
"""
print(data)
diff --git a/dffml/operation/preprocess.py b/dffml/operation/preprocess.py
--- a/dffml/operation/preprocess.py
+++ b/dffml/operation/preprocess.py
@@ -19,12 +19,13 @@
Parameters
++++++++++
- inputs : str
+ str_to_eval : str
A string to be evaluated.
Returns
+++++++
- A python literal.
+ dict
+ A dict containing python literal.
Examples
++++++++
| {"golden_diff": "diff --git a/dffml/operation/io.py b/dffml/operation/io.py\n--- a/dffml/operation/io.py\n+++ b/dffml/operation/io.py\n@@ -12,7 +12,7 @@\n \n # Definitions\n UserInput = Definition(name=\"UserInput\", primitive=\"str\")\n-DataToPrint = Definition(name=\"DataToPrint\", primitive=\"str\")\n+DataToPrint = Definition(name=\"DataToPrint\", primitive=\"generic\")\n \n AcceptUserInput = Operation(\n name=\"AcceptUserInput\",\n@@ -39,11 +39,6 @@\n \"\"\"\n Accept input from stdin using python input()\n \n- Parameters\n- ++++++++++\n- inputs : dict\n- A dictionary with a key and empty list as value.\n-\n Returns\n +++++++\n dict\n@@ -59,12 +54,12 @@\n >>> dataflow.seed.append(\n ... Input(\n ... value=[AcceptUserInput.op.outputs[\"InputData\"].name],\n- ... definition=GetSingle.op.inputs[\"spec\"]\n+ ... definition=GetSingle.op.inputs[\"spec\"],\n ... )\n ... )\n >>>\n >>> async def main():\n- ... async for ctx, results in MemoryOrchestrator.run(dataflow, {\"input\":[]}):\n+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, {\"input\": []}):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n@@ -94,33 +89,32 @@\n \n \n @op(inputs={\"data\": DataToPrint}, outputs={}, conditions=[])\n-async def print_output(data: str):\n+async def print_output(data: Any):\n \"\"\"\n Print the output on stdout using python print()\n \n Parameters\n ++++++++++\n- inputs : list\n- A list of Inputs whose value is to be printed.\n+ data : Any\n+ A python literal to be printed.\n \n Examples\n ++++++++\n \n The following example shows how to use print_output.\n \n- >>> dataflow = DataFlow.auto(print_output, GetSingle)\n+ >>> dataflow = DataFlow.auto(print_output)\n >>> inputs = [\n ... Input(\n- ... value=\"print_output example\",\n- ... definition=dataflow.definitions[\"DataToPrint\"],\n- ... parents=None,)]\n+ ... value=\"print_output example\", definition=print_output.op.inputs[\"data\"]\n+ ... )\n+ ... ]\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n- ... print(\"String to be printed is 'print_output example'\")\n+ ... pass\n >>>\n >>> asyncio.run(main())\n print_output example\n- String to be printed is 'print_output example'\n \"\"\"\n print(data)\ndiff --git a/dffml/operation/preprocess.py b/dffml/operation/preprocess.py\n--- a/dffml/operation/preprocess.py\n+++ b/dffml/operation/preprocess.py\n@@ -19,12 +19,13 @@\n \n Parameters\n ++++++++++\n- inputs : str\n+ str_to_eval : str\n A string to be evaluated.\n \n Returns\n +++++++\n- A python literal.\n+ dict\n+ A dict containing python literal.\n \n Examples\n ++++++++\n", "issue": "operations: io: Fixup example\nhttps://github.com/intel/dffml/blob/c0946b2d212239cfe4e470e63ab3da22c9cd97c4/dffml/operation/io.py#L116\r\n\r\ncopy the code here into a Python file and format it with black, then copy it back.\r\n\r\nWe also want to change\r\n\r\n```python\r\n ... definition=dataflow.definitions[\"DataToPrint\"],\r\n ... parents=None,)]\r\n```\r\n\r\nto\r\n\r\n```python\r\n ... definition=print_output.inputs[\"data\"],\r\n ... )\r\n ... ]\r\n```\n", "before_files": [{"content": "import ast\nfrom dffml.df.types import Definition\nfrom dffml.df.base import op\n\n\n# Definitions\nInputStr = Definition(name=\"InputStr\", primitive=\"str\")\nEvaluatedStr = Definition(name=\"EvaluatedStr\", primitive=\"generic\")\n\n\n@op(\n inputs={\"str_to_eval\": InputStr},\n outputs={\"str_after_eval\": EvaluatedStr},\n conditions=[],\n)\nasync def literal_eval(str_to_eval: str):\n \"\"\"\n Evaluate the input using ast.literal_eval()\n\n Parameters\n ++++++++++\n inputs : str\n A string to be evaluated.\n\n Returns\n +++++++\n A python literal.\n\n Examples\n ++++++++\n\n The following example shows how to use literal_eval.\n\n >>> dataflow = DataFlow.auto(literal_eval, GetSingle)\n >>> dataflow.seed.append(\n ... Input(\n ... value=[literal_eval.op.outputs[\"str_after_eval\"].name,],\n ... definition=GetSingle.op.inputs[\"spec\"],\n ... )\n ... )\n >>> inputs = [\n ... Input(\n ... value=\"[1,2,3]\",\n ... definition=literal_eval.op.inputs[\"str_to_eval\"],\n ... parents=None,\n ... )\n ... ]\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n {'EvaluatedStr': [1, 2, 3]}\n \"\"\"\n value = ast.literal_eval(str_to_eval)\n return {\"str_after_eval\": value}\n", "path": "dffml/operation/preprocess.py"}, {"content": "import asyncio\nimport concurrent.futures\nfrom typing import Dict, Any\n\nfrom dffml.df.types import Operation, Definition\nfrom dffml.df.base import (\n op,\n OperationImplementationContext,\n OperationImplementation,\n)\n\n\n# Definitions\nUserInput = Definition(name=\"UserInput\", primitive=\"str\")\nDataToPrint = Definition(name=\"DataToPrint\", primitive=\"str\")\n\nAcceptUserInput = Operation(\n name=\"AcceptUserInput\",\n inputs={},\n outputs={\"InputData\": UserInput},\n conditions=[],\n)\n\n\nclass AcceptUserInputContext(OperationImplementationContext):\n @staticmethod\n def receive_input():\n print(\"Enter the value: \", end=\"\")\n return input()\n\n async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:\n user_input = await self.parent.loop.run_in_executor(\n self.parent.pool, self.receive_input\n )\n return {\"InputData\": user_input}\n\n\nclass AcceptUserInput(OperationImplementation):\n \"\"\"\n Accept input from stdin using python input()\n\n Parameters\n ++++++++++\n inputs : dict\n A dictionary with a key and empty list as value.\n\n Returns\n +++++++\n dict\n A dictionary containing user input.\n\n Examples\n ++++++++\n\n The following example shows how to use AcceptUserInput.\n (Assumes that the input from stdio is \"Data flow is awesome\"!)\n\n >>> dataflow = DataFlow.auto(AcceptUserInput, GetSingle)\n >>> dataflow.seed.append(\n ... Input(\n ... value=[AcceptUserInput.op.outputs[\"InputData\"].name],\n ... definition=GetSingle.op.inputs[\"spec\"]\n ... )\n ... )\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, {\"input\":[]}):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n Enter the value: {'UserInput': 'Data flow is awesome'}\n \"\"\"\n\n op = AcceptUserInput\n CONTEXT = AcceptUserInputContext\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.loop = None\n self.pool = None\n self.__pool = None\n\n async def __aenter__(self) -> \"OperationImplementationContext\":\n self.loop = asyncio.get_event_loop()\n self.pool = concurrent.futures.ThreadPoolExecutor()\n self.__pool = self.pool.__enter__()\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n self.__pool.__exit__(exc_type, exc_value, traceback)\n self.__pool = None\n self.pool = None\n self.loop = None\n\n\n@op(inputs={\"data\": DataToPrint}, outputs={}, conditions=[])\nasync def print_output(data: str):\n \"\"\"\n Print the output on stdout using python print()\n\n Parameters\n ++++++++++\n inputs : list\n A list of Inputs whose value is to be printed.\n\n Examples\n ++++++++\n\n The following example shows how to use print_output.\n\n >>> dataflow = DataFlow.auto(print_output, GetSingle)\n >>> inputs = [\n ... Input(\n ... value=\"print_output example\",\n ... definition=dataflow.definitions[\"DataToPrint\"],\n ... parents=None,)]\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n ... print(\"String to be printed is 'print_output example'\")\n >>>\n >>> asyncio.run(main())\n print_output example\n String to be printed is 'print_output example'\n \"\"\"\n print(data)\n", "path": "dffml/operation/io.py"}], "after_files": [{"content": "import ast\nfrom dffml.df.types import Definition\nfrom dffml.df.base import op\n\n\n# Definitions\nInputStr = Definition(name=\"InputStr\", primitive=\"str\")\nEvaluatedStr = Definition(name=\"EvaluatedStr\", primitive=\"generic\")\n\n\n@op(\n inputs={\"str_to_eval\": InputStr},\n outputs={\"str_after_eval\": EvaluatedStr},\n conditions=[],\n)\nasync def literal_eval(str_to_eval: str):\n \"\"\"\n Evaluate the input using ast.literal_eval()\n\n Parameters\n ++++++++++\n str_to_eval : str\n A string to be evaluated.\n\n Returns\n +++++++\n dict\n A dict containing python literal.\n\n Examples\n ++++++++\n\n The following example shows how to use literal_eval.\n\n >>> dataflow = DataFlow.auto(literal_eval, GetSingle)\n >>> dataflow.seed.append(\n ... Input(\n ... value=[literal_eval.op.outputs[\"str_after_eval\"].name,],\n ... definition=GetSingle.op.inputs[\"spec\"],\n ... )\n ... )\n >>> inputs = [\n ... Input(\n ... value=\"[1,2,3]\",\n ... definition=literal_eval.op.inputs[\"str_to_eval\"],\n ... parents=None,\n ... )\n ... ]\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n {'EvaluatedStr': [1, 2, 3]}\n \"\"\"\n value = ast.literal_eval(str_to_eval)\n return {\"str_after_eval\": value}\n", "path": "dffml/operation/preprocess.py"}, {"content": "import asyncio\nimport concurrent.futures\nfrom typing import Dict, Any\n\nfrom dffml.df.types import Operation, Definition\nfrom dffml.df.base import (\n op,\n OperationImplementationContext,\n OperationImplementation,\n)\n\n\n# Definitions\nUserInput = Definition(name=\"UserInput\", primitive=\"str\")\nDataToPrint = Definition(name=\"DataToPrint\", primitive=\"generic\")\n\nAcceptUserInput = Operation(\n name=\"AcceptUserInput\",\n inputs={},\n outputs={\"InputData\": UserInput},\n conditions=[],\n)\n\n\nclass AcceptUserInputContext(OperationImplementationContext):\n @staticmethod\n def receive_input():\n print(\"Enter the value: \", end=\"\")\n return input()\n\n async def run(self, inputs: Dict[str, Any]) -> Dict[str, Any]:\n user_input = await self.parent.loop.run_in_executor(\n self.parent.pool, self.receive_input\n )\n return {\"InputData\": user_input}\n\n\nclass AcceptUserInput(OperationImplementation):\n \"\"\"\n Accept input from stdin using python input()\n\n Returns\n +++++++\n dict\n A dictionary containing user input.\n\n Examples\n ++++++++\n\n The following example shows how to use AcceptUserInput.\n (Assumes that the input from stdio is \"Data flow is awesome\"!)\n\n >>> dataflow = DataFlow.auto(AcceptUserInput, GetSingle)\n >>> dataflow.seed.append(\n ... Input(\n ... value=[AcceptUserInput.op.outputs[\"InputData\"].name],\n ... definition=GetSingle.op.inputs[\"spec\"],\n ... )\n ... )\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, {\"input\": []}):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n Enter the value: {'UserInput': 'Data flow is awesome'}\n \"\"\"\n\n op = AcceptUserInput\n CONTEXT = AcceptUserInputContext\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.loop = None\n self.pool = None\n self.__pool = None\n\n async def __aenter__(self) -> \"OperationImplementationContext\":\n self.loop = asyncio.get_event_loop()\n self.pool = concurrent.futures.ThreadPoolExecutor()\n self.__pool = self.pool.__enter__()\n return self\n\n async def __aexit__(self, exc_type, exc_value, traceback):\n self.__pool.__exit__(exc_type, exc_value, traceback)\n self.__pool = None\n self.pool = None\n self.loop = None\n\n\n@op(inputs={\"data\": DataToPrint}, outputs={}, conditions=[])\nasync def print_output(data: Any):\n \"\"\"\n Print the output on stdout using python print()\n\n Parameters\n ++++++++++\n data : Any\n A python literal to be printed.\n\n Examples\n ++++++++\n\n The following example shows how to use print_output.\n\n >>> dataflow = DataFlow.auto(print_output)\n >>> inputs = [\n ... Input(\n ... value=\"print_output example\", definition=print_output.op.inputs[\"data\"]\n ... )\n ... ]\n >>>\n >>> async def main():\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n ... pass\n >>>\n >>> asyncio.run(main())\n print_output example\n \"\"\"\n print(data)\n", "path": "dffml/operation/io.py"}]} | 1,956 | 753 |
gh_patches_debug_7947 | rasdani/github-patches | git_diff | coreruleset__coreruleset-3232 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Review links to OWASP wiki
### Describe the bug
We have references to other OWASP projects in our files:
```
rules/REQUEST-933-APPLICATION-ATTACK-PHP.conf
28:# https://www.owasp.org/index.php/PHP_Top_5#P1:_Remote_Code_Executionh
366:# https://www.owasp.org/index.php/PHP_Object_Injection
rules/REQUEST-921-PROTOCOL-ATTACK.conf
194:# Reference: https://www.owasp.org/index.php/Testing_for_HTTP_Splitting/Smuggling_(OTG-INPVAL-016)
rules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf
97:# https://www.owasp.org/index.php/ModSecurity_CRS_RuleID-96000
CHANGES.md
977: https://www.owasp.org/index.php/AppSensor_DetectionPoints
rules/REQUEST-932-APPLICATION-ATTACK-RCE.conf
690:# https://www.owasp.org/index.php/Unrestricted_File_Upload
rules/scanners-user-agents.data
58:# https://www.owasp.org/index.php/Category:OWASP_DirBuster_Project
```
We need to double check they are still valid and update if not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `util/regexp-tricks/negative-lookahead.py`
Content:
```
1 import argparse
2
3 # WARNING: This script is EXPERIMENTAL. Use with caution.
4 #
5 # Known issues:
6 # * At the moment, it will probably not work with more than two strings.
7 #
8 # Known limitations:
9 # * Any substrings of a target string will also NOT be matched. This is probably due to a limitation in this technique,
10 # make sure that subtrings of the negative lookahead are not harmful in any way.
11
12 parser = argparse.ArgumentParser(description="This script takes a list of strings and converts them into \
13 a regex that acts like a negative lookahead")
14 parser.add_argument("strings", type=str, nargs='+',
15 help="the strings to convert into a negative lookahead")
16 parser.add_argument("--prefix", type=str, default="",
17 help="sets a prefix for the resulting regex")
18 parser.add_argument("--suffix", type=str, default="",
19 help="sets a suffix for the resulting regex")
20
21 args = parser.parse_args()
22
23 # Return the longest prefix of all list elements. Shamelessly copied from:
24 # https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings
25 def commonprefix(m):
26 "Given a list of pathnames, returns the longest common leading component"
27 if not m: return ''
28 s1 = min(m)
29 s2 = max(m)
30 for i, c in enumerate(s1):
31 if c != s2[i]:
32 return s1[:i]
33 return s1
34
35 # flatten returns a string with concatenated dictionary keys
36 def flatten(dict):
37 s = ""
38
39 for key in dict.keys():
40 s += key
41
42 return s
43
44 # set returns a character set containing the unique characters across all strings for the given index
45 def set(strings, index, flags):
46 dict = {}
47
48 for s in strings:
49 # Continue so we don't panic
50 if index > len(s) -1:
51 continue
52
53 dict[s[index]] = ''
54
55 return "[" + flags + flatten(dict) + "]"
56
57 # prepare converts a string for negative lookaheads emulation
58 def prepare(s, offset):
59 r = ""
60
61 if len(s) == 0:
62 return r
63
64 for i in range(offset, len(s)):
65 for j in range(0, i + 1):
66 if j == i:
67 r += "[^" + s[j] + "]"
68 else:
69 r += s[j]
70
71 if i != len(s) - 1:
72 r += "|"
73
74 return r
75
76 # run runs the
77 def run():
78 strings = args.strings
79
80 r = ""
81 r += set(strings, 0, "^")
82
83 c = ""
84 d = {}
85
86 # Only find common string if we have more than one
87 if len(strings) > 1:
88 c = commonprefix(strings)
89
90 # Collect all characters after the common substring from every string
91 for s in strings:
92 if len(s) > len(c) and s.startswith(c):
93 d[s[len(c)]] = ''
94
95 # Add the common string to the regex to prevent accidental matching
96 if len(c) > 0:
97 if len(c) > 1:
98 r += "|" + "(?:" + prepare(c, 1) + ")"
99
100 r += "|" + "(?:" + c + "[^" + flatten(d) + "]" + ")"
101
102 for s in strings:
103 g = ""
104
105 # When the common string is > 0, offset with len(c) + 1 because we handled this earlier
106 if len(c) > 0:
107 g = prepare(s, len(c) + 1)
108 else:
109 g = prepare(s, 1)
110
111 # Add OR boolean if necessary
112 if len(g) > 0:
113 r += "|"
114
115 r += g
116
117 print(args.prefix + "(?:" + r + ")" + args.suffix)
118
119 # Only run if script is called directly
120 if __name__ == "__main__":
121 run()
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/util/regexp-tricks/negative-lookahead.py b/util/regexp-tricks/negative-lookahead.py
--- a/util/regexp-tricks/negative-lookahead.py
+++ b/util/regexp-tricks/negative-lookahead.py
@@ -21,7 +21,7 @@
args = parser.parse_args()
# Return the longest prefix of all list elements. Shamelessly copied from:
-# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings
+# https://stackoverflow.com/questions/6718196/determine-the-common-prefix-of-multiple-strings
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
| {"golden_diff": "diff --git a/util/regexp-tricks/negative-lookahead.py b/util/regexp-tricks/negative-lookahead.py\n--- a/util/regexp-tricks/negative-lookahead.py\n+++ b/util/regexp-tricks/negative-lookahead.py\n@@ -21,7 +21,7 @@\n args = parser.parse_args()\n \n # Return the longest prefix of all list elements. Shamelessly copied from:\n-# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings\n+# https://stackoverflow.com/questions/6718196/determine-the-common-prefix-of-multiple-strings\n def commonprefix(m):\n \"Given a list of pathnames, returns the longest common leading component\"\n if not m: return ''\n", "issue": "Review links to OWASP wiki\n### Describe the bug\r\n\r\nWe have references to other OWASP projects in our files:\r\n\r\n```\r\nrules/REQUEST-933-APPLICATION-ATTACK-PHP.conf\r\n28:# https://www.owasp.org/index.php/PHP_Top_5#P1:_Remote_Code_Executionh\r\n366:# https://www.owasp.org/index.php/PHP_Object_Injection\r\n\r\nrules/REQUEST-921-PROTOCOL-ATTACK.conf\r\n194:# Reference: https://www.owasp.org/index.php/Testing_for_HTTP_Splitting/Smuggling_(OTG-INPVAL-016)\r\n\r\nrules/REQUEST-920-PROTOCOL-ENFORCEMENT.conf\r\n97:# https://www.owasp.org/index.php/ModSecurity_CRS_RuleID-96000\r\n\r\nCHANGES.md\r\n977: https://www.owasp.org/index.php/AppSensor_DetectionPoints\r\n\r\nrules/REQUEST-932-APPLICATION-ATTACK-RCE.conf\r\n690:# https://www.owasp.org/index.php/Unrestricted_File_Upload\r\n\r\nrules/scanners-user-agents.data\r\n58:# https://www.owasp.org/index.php/Category:OWASP_DirBuster_Project\r\n```\r\nWe need to double check they are still valid and update if not.\n", "before_files": [{"content": "import argparse\n\n# WARNING: This script is EXPERIMENTAL. Use with caution.\n#\n# Known issues:\n# * At the moment, it will probably not work with more than two strings.\n#\n# Known limitations:\n# * Any substrings of a target string will also NOT be matched. This is probably due to a limitation in this technique,\n# make sure that subtrings of the negative lookahead are not harmful in any way.\n\nparser = argparse.ArgumentParser(description=\"This script takes a list of strings and converts them into \\\n a regex that acts like a negative lookahead\")\nparser.add_argument(\"strings\", type=str, nargs='+',\n help=\"the strings to convert into a negative lookahead\")\nparser.add_argument(\"--prefix\", type=str, default=\"\",\n help=\"sets a prefix for the resulting regex\")\nparser.add_argument(\"--suffix\", type=str, default=\"\",\n help=\"sets a suffix for the resulting regex\")\n\nargs = parser.parse_args()\n\n# Return the longest prefix of all list elements. Shamelessly copied from:\n# https://stackoverflow.com/questions/6718196/determine-prefix-from-a-set-of-similar-strings\ndef commonprefix(m):\n \"Given a list of pathnames, returns the longest common leading component\"\n if not m: return ''\n s1 = min(m)\n s2 = max(m)\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i]\n return s1\n\n# flatten returns a string with concatenated dictionary keys\ndef flatten(dict):\n s = \"\"\n\n for key in dict.keys():\n s += key\n\n return s\n\n# set returns a character set containing the unique characters across all strings for the given index\ndef set(strings, index, flags):\n dict = {}\n\n for s in strings:\n # Continue so we don't panic\n if index > len(s) -1:\n continue\n \n dict[s[index]] = ''\n \n return \"[\" + flags + flatten(dict) + \"]\"\n\n# prepare converts a string for negative lookaheads emulation\ndef prepare(s, offset):\n r = \"\"\n\n if len(s) == 0:\n return r\n\n for i in range(offset, len(s)):\n for j in range(0, i + 1):\n if j == i:\n r += \"[^\" + s[j] + \"]\"\n else:\n r += s[j]\n\n if i != len(s) - 1:\n r += \"|\"\n\n return r\n\n# run runs the \ndef run():\n strings = args.strings\n\n r = \"\"\n r += set(strings, 0, \"^\")\n\n c = \"\"\n d = {}\n\n # Only find common string if we have more than one\n if len(strings) > 1:\n c = commonprefix(strings)\n \n # Collect all characters after the common substring from every string\n for s in strings:\n if len(s) > len(c) and s.startswith(c):\n d[s[len(c)]] = ''\n\n # Add the common string to the regex to prevent accidental matching\n if len(c) > 0:\n if len(c) > 1:\n r += \"|\" + \"(?:\" + prepare(c, 1) + \")\"\n\n r += \"|\" + \"(?:\" + c + \"[^\" + flatten(d) + \"]\" + \")\"\n\n for s in strings:\n g = \"\"\n\n # When the common string is > 0, offset with len(c) + 1 because we handled this earlier\n if len(c) > 0:\n g = prepare(s, len(c) + 1)\n else:\n g = prepare(s, 1)\n \n # Add OR boolean if necessary\n if len(g) > 0:\n r += \"|\"\n\n r += g\n\n print(args.prefix + \"(?:\" + r + \")\" + args.suffix)\n\n# Only run if script is called directly\nif __name__ == \"__main__\":\n run()\n", "path": "util/regexp-tricks/negative-lookahead.py"}], "after_files": [{"content": "import argparse\n\n# WARNING: This script is EXPERIMENTAL. Use with caution.\n#\n# Known issues:\n# * At the moment, it will probably not work with more than two strings.\n#\n# Known limitations:\n# * Any substrings of a target string will also NOT be matched. This is probably due to a limitation in this technique,\n# make sure that subtrings of the negative lookahead are not harmful in any way.\n\nparser = argparse.ArgumentParser(description=\"This script takes a list of strings and converts them into \\\n a regex that acts like a negative lookahead\")\nparser.add_argument(\"strings\", type=str, nargs='+',\n help=\"the strings to convert into a negative lookahead\")\nparser.add_argument(\"--prefix\", type=str, default=\"\",\n help=\"sets a prefix for the resulting regex\")\nparser.add_argument(\"--suffix\", type=str, default=\"\",\n help=\"sets a suffix for the resulting regex\")\n\nargs = parser.parse_args()\n\n# Return the longest prefix of all list elements. Shamelessly copied from:\n# https://stackoverflow.com/questions/6718196/determine-the-common-prefix-of-multiple-strings\ndef commonprefix(m):\n \"Given a list of pathnames, returns the longest common leading component\"\n if not m: return ''\n s1 = min(m)\n s2 = max(m)\n for i, c in enumerate(s1):\n if c != s2[i]:\n return s1[:i]\n return s1\n\n# flatten returns a string with concatenated dictionary keys\ndef flatten(dict):\n s = \"\"\n\n for key in dict.keys():\n s += key\n\n return s\n\n# set returns a character set containing the unique characters across all strings for the given index\ndef set(strings, index, flags):\n dict = {}\n\n for s in strings:\n # Continue so we don't panic\n if index > len(s) -1:\n continue\n \n dict[s[index]] = ''\n \n return \"[\" + flags + flatten(dict) + \"]\"\n\n# prepare converts a string for negative lookaheads emulation\ndef prepare(s, offset):\n r = \"\"\n\n if len(s) == 0:\n return r\n\n for i in range(offset, len(s)):\n for j in range(0, i + 1):\n if j == i:\n r += \"[^\" + s[j] + \"]\"\n else:\n r += s[j]\n\n if i != len(s) - 1:\n r += \"|\"\n\n return r\n\n# run runs the \ndef run():\n strings = args.strings\n\n r = \"\"\n r += set(strings, 0, \"^\")\n\n c = \"\"\n d = {}\n\n # Only find common string if we have more than one\n if len(strings) > 1:\n c = commonprefix(strings)\n \n # Collect all characters after the common substring from every string\n for s in strings:\n if len(s) > len(c) and s.startswith(c):\n d[s[len(c)]] = ''\n\n # Add the common string to the regex to prevent accidental matching\n if len(c) > 0:\n if len(c) > 1:\n r += \"|\" + \"(?:\" + prepare(c, 1) + \")\"\n\n r += \"|\" + \"(?:\" + c + \"[^\" + flatten(d) + \"]\" + \")\"\n\n for s in strings:\n g = \"\"\n\n # When the common string is > 0, offset with len(c) + 1 because we handled this earlier\n if len(c) > 0:\n g = prepare(s, len(c) + 1)\n else:\n g = prepare(s, 1)\n \n # Add OR boolean if necessary\n if len(g) > 0:\n r += \"|\"\n\n r += g\n\n print(args.prefix + \"(?:\" + r + \")\" + args.suffix)\n\n# Only run if script is called directly\nif __name__ == \"__main__\":\n run()\n", "path": "util/regexp-tricks/negative-lookahead.py"}]} | 1,680 | 170 |
gh_patches_debug_26830 | rasdani/github-patches | git_diff | nilearn__nilearn-1219 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sklearn.covariance.LedoitWolf or ConnectivityMeasure in plot_adhd_spheres
`ConnectivityMeasure` can be used here, and its default covariance estimator is `LedoitWolf`from `sklearn.covariance`.
I also prefer using partial correlations rather than precision, because no need for negating the connections.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/03_connectivity/plot_adhd_spheres.py`
Content:
```
1 """
2 Extracting brain signal from spheres
3 ====================================
4
5 This example extract brain signals from spheres described by the coordinates
6 of their center in MNI space and a given radius in millimeters. In particular,
7 this example extracts signals from Default Mode Network regions and compute a
8 connectome from them.
9
10 """
11
12 ##########################################################################
13 # Retrieve the dataset
14 from nilearn import datasets
15 adhd_dataset = datasets.fetch_adhd(n_subjects=1)
16
17 # print basic information on the dataset
18 print('First subject functional nifti image (4D) is at: %s' %
19 adhd_dataset.func[0]) # 4D data
20
21
22 ##########################################################################
23 # Coordinates of Default Mode Network
24 dmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]
25 labels = [
26 'Posterior Cingulate Cortex',
27 'Left Temporoparietal junction',
28 'Right Temporoparietal junction',
29 'Medial prefrontal cortex'
30 ]
31
32
33 ##########################################################################
34 # Extracts signal from sphere around DMN seeds
35 from nilearn import input_data
36
37 masker = input_data.NiftiSpheresMasker(
38 dmn_coords, radius=8,
39 detrend=True, standardize=True,
40 low_pass=0.1, high_pass=0.01, t_r=2.5,
41 memory='nilearn_cache', memory_level=1, verbose=2)
42
43 func_filename = adhd_dataset.func[0]
44 confound_filename = adhd_dataset.confounds[0]
45
46 time_series = masker.fit_transform(func_filename,
47 confounds=[confound_filename])
48
49 ##########################################################################
50 # Display time series
51 import matplotlib.pyplot as plt
52 for time_serie, label in zip(time_series.T, labels):
53 plt.plot(time_serie, label=label)
54
55 plt.title('Default Mode Network Time Series')
56 plt.xlabel('Scan number')
57 plt.ylabel('Normalized signal')
58 plt.legend()
59 plt.tight_layout()
60
61
62 ##########################################################################
63 # Compute precision matrices
64 from sklearn.covariance import LedoitWolf
65 cve = LedoitWolf()
66 cve.fit(time_series)
67
68
69 ##########################################################################
70 # Display connectome
71 from nilearn import plotting
72
73 plotting.plot_connectome(cve.precision_, dmn_coords,
74 title="Default Mode Network Connectivity")
75
76 # Display connectome with hemispheric projections.
77 # Notice (0, -52, 18) is included in both hemispheres since x == 0.
78 title = "Connectivity projected on hemispheres"
79 plotting.plot_connectome(cve.precision_, dmn_coords, title=title,
80 display_mode='lyrz')
81
82 plotting.show()
83
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py
--- a/examples/03_connectivity/plot_adhd_spheres.py
+++ b/examples/03_connectivity/plot_adhd_spheres.py
@@ -60,23 +60,25 @@
##########################################################################
-# Compute precision matrices
-from sklearn.covariance import LedoitWolf
-cve = LedoitWolf()
-cve.fit(time_series)
-
+# Compute partial correlation matrix using object
+# :class:`nilearn.connectome.ConnectivityMeasure`: Its default covariance
+# estimator is Ledoit-Wolf, allowing to obtain accurate partial correlations.
+from nilearn.connectome import ConnectivityMeasure
+connectivity_measure = ConnectivityMeasure(kind='partial correlation')
+partial_correlation_matrix = connectivity_measure.fit_transform(
+ [time_series])[0]
##########################################################################
# Display connectome
from nilearn import plotting
-plotting.plot_connectome(cve.precision_, dmn_coords,
+plotting.plot_connectome(partial_correlation_matrix, dmn_coords,
title="Default Mode Network Connectivity")
# Display connectome with hemispheric projections.
# Notice (0, -52, 18) is included in both hemispheres since x == 0.
title = "Connectivity projected on hemispheres"
-plotting.plot_connectome(cve.precision_, dmn_coords, title=title,
+plotting.plot_connectome(partial_correlation_matrix, dmn_coords, title=title,
display_mode='lyrz')
plotting.show()
| {"golden_diff": "diff --git a/examples/03_connectivity/plot_adhd_spheres.py b/examples/03_connectivity/plot_adhd_spheres.py\n--- a/examples/03_connectivity/plot_adhd_spheres.py\n+++ b/examples/03_connectivity/plot_adhd_spheres.py\n@@ -60,23 +60,25 @@\n \n \n ##########################################################################\n-# Compute precision matrices\n-from sklearn.covariance import LedoitWolf\n-cve = LedoitWolf()\n-cve.fit(time_series)\n-\n+# Compute partial correlation matrix using object\n+# :class:`nilearn.connectome.ConnectivityMeasure`: Its default covariance\n+# estimator is Ledoit-Wolf, allowing to obtain accurate partial correlations.\n+from nilearn.connectome import ConnectivityMeasure\n+connectivity_measure = ConnectivityMeasure(kind='partial correlation')\n+partial_correlation_matrix = connectivity_measure.fit_transform(\n+ [time_series])[0]\n \n ##########################################################################\n # Display connectome\n from nilearn import plotting\n \n-plotting.plot_connectome(cve.precision_, dmn_coords,\n+plotting.plot_connectome(partial_correlation_matrix, dmn_coords,\n title=\"Default Mode Network Connectivity\")\n \n # Display connectome with hemispheric projections.\n # Notice (0, -52, 18) is included in both hemispheres since x == 0.\n title = \"Connectivity projected on hemispheres\"\n-plotting.plot_connectome(cve.precision_, dmn_coords, title=title,\n+plotting.plot_connectome(partial_correlation_matrix, dmn_coords, title=title,\n display_mode='lyrz')\n \n plotting.show()\n", "issue": "sklearn.covariance.LedoitWolf or ConnectivityMeasure in plot_adhd_spheres\n`ConnectivityMeasure` can be used here, and its default covariance estimator is `LedoitWolf`from `sklearn.covariance`.\nI also prefer using partial correlations rather than precision, because no need for negating the connections.\n\n", "before_files": [{"content": "\"\"\"\nExtracting brain signal from spheres\n====================================\n\nThis example extract brain signals from spheres described by the coordinates\nof their center in MNI space and a given radius in millimeters. In particular,\nthis example extracts signals from Default Mode Network regions and compute a\nconnectome from them.\n\n\"\"\"\n\n##########################################################################\n# Retrieve the dataset\nfrom nilearn import datasets\nadhd_dataset = datasets.fetch_adhd(n_subjects=1)\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n adhd_dataset.func[0]) # 4D data\n\n\n##########################################################################\n# Coordinates of Default Mode Network\ndmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]\nlabels = [\n 'Posterior Cingulate Cortex',\n 'Left Temporoparietal junction',\n 'Right Temporoparietal junction',\n 'Medial prefrontal cortex'\n]\n\n\n##########################################################################\n# Extracts signal from sphere around DMN seeds\nfrom nilearn import input_data\n\nmasker = input_data.NiftiSpheresMasker(\n dmn_coords, radius=8,\n detrend=True, standardize=True,\n low_pass=0.1, high_pass=0.01, t_r=2.5,\n memory='nilearn_cache', memory_level=1, verbose=2)\n\nfunc_filename = adhd_dataset.func[0]\nconfound_filename = adhd_dataset.confounds[0]\n\ntime_series = masker.fit_transform(func_filename,\n confounds=[confound_filename])\n\n##########################################################################\n# Display time series\nimport matplotlib.pyplot as plt\nfor time_serie, label in zip(time_series.T, labels):\n plt.plot(time_serie, label=label)\n\nplt.title('Default Mode Network Time Series')\nplt.xlabel('Scan number')\nplt.ylabel('Normalized signal')\nplt.legend()\nplt.tight_layout()\n\n\n##########################################################################\n# Compute precision matrices\nfrom sklearn.covariance import LedoitWolf\ncve = LedoitWolf()\ncve.fit(time_series)\n\n\n##########################################################################\n# Display connectome\nfrom nilearn import plotting\n\nplotting.plot_connectome(cve.precision_, dmn_coords,\n title=\"Default Mode Network Connectivity\")\n\n# Display connectome with hemispheric projections.\n# Notice (0, -52, 18) is included in both hemispheres since x == 0.\ntitle = \"Connectivity projected on hemispheres\"\nplotting.plot_connectome(cve.precision_, dmn_coords, title=title,\n display_mode='lyrz')\n\nplotting.show()\n", "path": "examples/03_connectivity/plot_adhd_spheres.py"}], "after_files": [{"content": "\"\"\"\nExtracting brain signal from spheres\n====================================\n\nThis example extract brain signals from spheres described by the coordinates\nof their center in MNI space and a given radius in millimeters. In particular,\nthis example extracts signals from Default Mode Network regions and compute a\nconnectome from them.\n\n\"\"\"\n\n##########################################################################\n# Retrieve the dataset\nfrom nilearn import datasets\nadhd_dataset = datasets.fetch_adhd(n_subjects=1)\n\n# print basic information on the dataset\nprint('First subject functional nifti image (4D) is at: %s' %\n adhd_dataset.func[0]) # 4D data\n\n\n##########################################################################\n# Coordinates of Default Mode Network\ndmn_coords = [(0, -52, 18), (-46, -68, 32), (46, -68, 32), (1, 50, -5)]\nlabels = [\n 'Posterior Cingulate Cortex',\n 'Left Temporoparietal junction',\n 'Right Temporoparietal junction',\n 'Medial prefrontal cortex'\n]\n\n\n##########################################################################\n# Extracts signal from sphere around DMN seeds\nfrom nilearn import input_data\n\nmasker = input_data.NiftiSpheresMasker(\n dmn_coords, radius=8,\n detrend=True, standardize=True,\n low_pass=0.1, high_pass=0.01, t_r=2.5,\n memory='nilearn_cache', memory_level=1, verbose=2)\n\nfunc_filename = adhd_dataset.func[0]\nconfound_filename = adhd_dataset.confounds[0]\n\ntime_series = masker.fit_transform(func_filename,\n confounds=[confound_filename])\n\n##########################################################################\n# Display time series\nimport matplotlib.pyplot as plt\nfor time_serie, label in zip(time_series.T, labels):\n plt.plot(time_serie, label=label)\n\nplt.title('Default Mode Network Time Series')\nplt.xlabel('Scan number')\nplt.ylabel('Normalized signal')\nplt.legend()\nplt.tight_layout()\n\n\n##########################################################################\n# Compute partial correlation matrix using object\n# :class:`nilearn.connectome.ConnectivityMeasure`: Its default covariance\n# estimator is Ledoit-Wolf, allowing to obtain accurate partial correlations.\nfrom nilearn.connectome import ConnectivityMeasure\nconnectivity_measure = ConnectivityMeasure(kind='partial correlation')\npartial_correlation_matrix = connectivity_measure.fit_transform(\n [time_series])[0]\n\n##########################################################################\n# Display connectome\nfrom nilearn import plotting\n\nplotting.plot_connectome(partial_correlation_matrix, dmn_coords,\n title=\"Default Mode Network Connectivity\")\n\n# Display connectome with hemispheric projections.\n# Notice (0, -52, 18) is included in both hemispheres since x == 0.\ntitle = \"Connectivity projected on hemispheres\"\nplotting.plot_connectome(partial_correlation_matrix, dmn_coords, title=title,\n display_mode='lyrz')\n\nplotting.show()\n", "path": "examples/03_connectivity/plot_adhd_spheres.py"}]} | 1,074 | 341 |
gh_patches_debug_15405 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-987 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Some modules are missing from the top-level import
Issue referenced during PR here: https://github.com/pyjanitor-devs/pyjanitor/pull/977#discussion_r781732964
For example, now running:
```python
import janitor as jn
jn.io.read_csvs("") # throws AttributeError: module 'janitor' has no attribute 'io'
```
Similarly for other modules like biology or timeseries.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `janitor/__init__.py`
Content:
```
1 """Top-level janitor API lives here."""
2 try:
3 import janitor.xarray # noqa: F401
4 except ImportError:
5 pass
6
7 from .functions import * # noqa: F403, F401
8 from .math import * # noqa: F403, F401
9 from .ml import get_features_targets as _get_features_targets
10 from .utils import refactored_function
11 from .accessors import * # noqa: F403, F401
12
13
14 @refactored_function(
15 "get_features_targets() has moved. Please use ml.get_features_targets()."
16 )
17 def get_features_targets(*args, **kwargs):
18 """Wrapper for get_features_targets."""
19 return _get_features_targets(*args, **kwargs)
20
21
22 __version__ = "0.22.0"
23
```
Path: `janitor/io.py`
Content:
```
1 import os
2 import subprocess
3 from glob import glob
4 from io import StringIO
5 from typing import Iterable, Union
6
7 import pandas as pd
8
9 from .errors import JanitorError
10 from .utils import deprecated_alias, check
11
12
13 @deprecated_alias(seperate_df="separate_df", filespath="files_path")
14 def read_csvs(
15 files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs
16 ) -> Union[pd.DataFrame, dict]:
17 """
18 Read multiple CSV files and return a dictionary of DataFrames, or
19 one concatenated DataFrame.
20
21 :param files_path: The filepath pattern matching the CSV files.
22 Accepts regular expressions, with or without `.csv` extension.
23 Also accepts iterable of file paths.
24 :param separate_df: If `False` (default), returns a single Dataframe
25 with the concatenation of the csv files.
26 If `True`, returns a dictionary of separate DataFrames
27 for each CSV file.
28 :param kwargs: Keyword arguments to pass into the
29 original pandas `read_csv`.
30 :returns: DataFrame of concatenated DataFrames or dictionary of DataFrames.
31 :raises JanitorError: if `None` provided for `files_path`.
32 :raises JanitorError: if length of `files_path` is `0`.
33 :raises ValueError: if no CSV files exist in `files_path`.
34 :raises ValueError: if columns in input CSV files do not match.
35 """
36 # Sanitize input
37 if files_path is None:
38 raise JanitorError("`None` provided for `files_path`")
39 if len(files_path) == 0:
40 raise JanitorError("0 length `files_path` provided")
41
42 # Read the csv files
43 # String to file/folder or file pattern provided
44 if isinstance(files_path, str):
45 dfs_dict = {
46 os.path.basename(f): pd.read_csv(f, **kwargs)
47 for f in glob(files_path)
48 }
49 # Iterable of file paths provided
50 else:
51 dfs_dict = {
52 os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path
53 }
54 # Check if dataframes have been read
55 if len(dfs_dict) == 0:
56 raise ValueError("No CSV files to read with the given `files_path`")
57 # Concatenate the dataframes if requested (default)
58 col_names = list(dfs_dict.values())[0].columns # noqa: PD011
59 if not separate_df:
60 # If columns do not match raise an error
61 for df in dfs_dict.values(): # noqa: PD011
62 if not all(df.columns == col_names):
63 raise ValueError(
64 "Columns in input CSV files do not match."
65 "Files cannot be concatenated"
66 )
67 return pd.concat(
68 list(dfs_dict.values()),
69 ignore_index=True,
70 sort=False, # noqa: PD011
71 )
72 else:
73 return dfs_dict
74
75
76 def read_commandline(cmd: str, **kwargs) -> pd.DataFrame:
77 """
78 Read a CSV file based on a command-line command.
79
80 For example, you may wish to run the following command on `sep-quarter.csv`
81 before reading it into a pandas DataFrame:
82
83 ```bash
84 cat sep-quarter.csv | grep .SEA1AA
85 ```
86
87 In this case, you can use the following Python code to load the dataframe:
88
89 ```python
90 import janitor as jn
91 df = jn.io.read_commandline("cat data.csv | grep .SEA1AA")
92
93 This function assumes that your command line command will return
94 an output that is parsable using pandas.read_csv and StringIO.
95 We default to using pd.read_csv underneath the hood.
96 Keyword arguments are passed through to read_csv.
97 ```
98
99 :param cmd: Shell command to preprocess a file on disk.
100 :param kwargs: Keyword arguments that are passed through to pd.read_csv().
101 :raises JanitorError: If commandline command is malformed or invalid.
102 :returns: A pandas DataFrame parsed from the stdout of the underlying
103 shell.
104 """
105
106 check("cmd", cmd, [str])
107 outcome = subprocess.run(cmd, shell=True, capture_output=True, text=True)
108 if outcome.returncode != 0:
109 raise JanitorError(outcome.stderr)
110 else:
111 outcome = outcome.stdout
112 return pd.read_csv(StringIO(outcome), **kwargs)
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/janitor/__init__.py b/janitor/__init__.py
--- a/janitor/__init__.py
+++ b/janitor/__init__.py
@@ -5,6 +5,7 @@
pass
from .functions import * # noqa: F403, F401
+from .io import * # noqa: F403, F401
from .math import * # noqa: F403, F401
from .ml import get_features_targets as _get_features_targets
from .utils import refactored_function
diff --git a/janitor/io.py b/janitor/io.py
--- a/janitor/io.py
+++ b/janitor/io.py
@@ -88,7 +88,7 @@
```python
import janitor as jn
- df = jn.io.read_commandline("cat data.csv | grep .SEA1AA")
+ df = jn.read_commandline("cat data.csv | grep .SEA1AA")
This function assumes that your command line command will return
an output that is parsable using pandas.read_csv and StringIO.
| {"golden_diff": "diff --git a/janitor/__init__.py b/janitor/__init__.py\n--- a/janitor/__init__.py\n+++ b/janitor/__init__.py\n@@ -5,6 +5,7 @@\n pass\n \n from .functions import * # noqa: F403, F401\n+from .io import * # noqa: F403, F401\n from .math import * # noqa: F403, F401\n from .ml import get_features_targets as _get_features_targets\n from .utils import refactored_function\ndiff --git a/janitor/io.py b/janitor/io.py\n--- a/janitor/io.py\n+++ b/janitor/io.py\n@@ -88,7 +88,7 @@\n \n ```python\n import janitor as jn\n- df = jn.io.read_commandline(\"cat data.csv | grep .SEA1AA\")\n+ df = jn.read_commandline(\"cat data.csv | grep .SEA1AA\")\n \n This function assumes that your command line command will return\n an output that is parsable using pandas.read_csv and StringIO.\n", "issue": "Some modules are missing from the top-level import\nIssue referenced during PR here: https://github.com/pyjanitor-devs/pyjanitor/pull/977#discussion_r781732964\r\n\r\nFor example, now running:\r\n\r\n```python\r\nimport janitor as jn\r\njn.io.read_csvs(\"\") # throws AttributeError: module 'janitor' has no attribute 'io'\r\n```\r\n\r\nSimilarly for other modules like biology or timeseries.\n", "before_files": [{"content": "\"\"\"Top-level janitor API lives here.\"\"\"\ntry:\n import janitor.xarray # noqa: F401\nexcept ImportError:\n pass\n\nfrom .functions import * # noqa: F403, F401\nfrom .math import * # noqa: F403, F401\nfrom .ml import get_features_targets as _get_features_targets\nfrom .utils import refactored_function\nfrom .accessors import * # noqa: F403, F401\n\n\n@refactored_function(\n \"get_features_targets() has moved. Please use ml.get_features_targets().\"\n)\ndef get_features_targets(*args, **kwargs):\n \"\"\"Wrapper for get_features_targets.\"\"\"\n return _get_features_targets(*args, **kwargs)\n\n\n__version__ = \"0.22.0\"\n", "path": "janitor/__init__.py"}, {"content": "import os\nimport subprocess\nfrom glob import glob\nfrom io import StringIO\nfrom typing import Iterable, Union\n\nimport pandas as pd\n\nfrom .errors import JanitorError\nfrom .utils import deprecated_alias, check\n\n\n@deprecated_alias(seperate_df=\"separate_df\", filespath=\"files_path\")\ndef read_csvs(\n files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs\n) -> Union[pd.DataFrame, dict]:\n \"\"\"\n Read multiple CSV files and return a dictionary of DataFrames, or\n one concatenated DataFrame.\n\n :param files_path: The filepath pattern matching the CSV files.\n Accepts regular expressions, with or without `.csv` extension.\n Also accepts iterable of file paths.\n :param separate_df: If `False` (default), returns a single Dataframe\n with the concatenation of the csv files.\n If `True`, returns a dictionary of separate DataFrames\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n :returns: DataFrame of concatenated DataFrames or dictionary of DataFrames.\n :raises JanitorError: if `None` provided for `files_path`.\n :raises JanitorError: if length of `files_path` is `0`.\n :raises ValueError: if no CSV files exist in `files_path`.\n :raises ValueError: if columns in input CSV files do not match.\n \"\"\"\n # Sanitize input\n if files_path is None:\n raise JanitorError(\"`None` provided for `files_path`\")\n if len(files_path) == 0:\n raise JanitorError(\"0 length `files_path` provided\")\n\n # Read the csv files\n # String to file/folder or file pattern provided\n if isinstance(files_path, str):\n dfs_dict = {\n os.path.basename(f): pd.read_csv(f, **kwargs)\n for f in glob(files_path)\n }\n # Iterable of file paths provided\n else:\n dfs_dict = {\n os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path\n }\n # Check if dataframes have been read\n if len(dfs_dict) == 0:\n raise ValueError(\"No CSV files to read with the given `files_path`\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs_dict.values())[0].columns # noqa: PD011\n if not separate_df:\n # If columns do not match raise an error\n for df in dfs_dict.values(): # noqa: PD011\n if not all(df.columns == col_names):\n raise ValueError(\n \"Columns in input CSV files do not match.\"\n \"Files cannot be concatenated\"\n )\n return pd.concat(\n list(dfs_dict.values()),\n ignore_index=True,\n sort=False, # noqa: PD011\n )\n else:\n return dfs_dict\n\n\ndef read_commandline(cmd: str, **kwargs) -> pd.DataFrame:\n \"\"\"\n Read a CSV file based on a command-line command.\n\n For example, you may wish to run the following command on `sep-quarter.csv`\n before reading it into a pandas DataFrame:\n\n ```bash\n cat sep-quarter.csv | grep .SEA1AA\n ```\n\n In this case, you can use the following Python code to load the dataframe:\n\n ```python\n import janitor as jn\n df = jn.io.read_commandline(\"cat data.csv | grep .SEA1AA\")\n\n This function assumes that your command line command will return\n an output that is parsable using pandas.read_csv and StringIO.\n We default to using pd.read_csv underneath the hood.\n Keyword arguments are passed through to read_csv.\n ```\n\n :param cmd: Shell command to preprocess a file on disk.\n :param kwargs: Keyword arguments that are passed through to pd.read_csv().\n :raises JanitorError: If commandline command is malformed or invalid.\n :returns: A pandas DataFrame parsed from the stdout of the underlying\n shell.\n \"\"\"\n\n check(\"cmd\", cmd, [str])\n outcome = subprocess.run(cmd, shell=True, capture_output=True, text=True)\n if outcome.returncode != 0:\n raise JanitorError(outcome.stderr)\n else:\n outcome = outcome.stdout\n return pd.read_csv(StringIO(outcome), **kwargs)\n", "path": "janitor/io.py"}], "after_files": [{"content": "\"\"\"Top-level janitor API lives here.\"\"\"\ntry:\n import janitor.xarray # noqa: F401\nexcept ImportError:\n pass\n\nfrom .functions import * # noqa: F403, F401\nfrom .io import * # noqa: F403, F401\nfrom .math import * # noqa: F403, F401\nfrom .ml import get_features_targets as _get_features_targets\nfrom .utils import refactored_function\nfrom .accessors import * # noqa: F403, F401\n\n\n@refactored_function(\n \"get_features_targets() has moved. Please use ml.get_features_targets().\"\n)\ndef get_features_targets(*args, **kwargs):\n \"\"\"Wrapper for get_features_targets.\"\"\"\n return _get_features_targets(*args, **kwargs)\n\n\n__version__ = \"0.22.0\"\n", "path": "janitor/__init__.py"}, {"content": "import os\nimport subprocess\nfrom glob import glob\nfrom io import StringIO\nfrom typing import Iterable, Union\n\nimport pandas as pd\n\nfrom .errors import JanitorError\nfrom .utils import deprecated_alias, check\n\n\n@deprecated_alias(seperate_df=\"separate_df\", filespath=\"files_path\")\ndef read_csvs(\n files_path: Union[str, Iterable[str]], separate_df: bool = False, **kwargs\n) -> Union[pd.DataFrame, dict]:\n \"\"\"\n Read multiple CSV files and return a dictionary of DataFrames, or\n one concatenated DataFrame.\n\n :param files_path: The filepath pattern matching the CSV files.\n Accepts regular expressions, with or without `.csv` extension.\n Also accepts iterable of file paths.\n :param separate_df: If `False` (default), returns a single Dataframe\n with the concatenation of the csv files.\n If `True`, returns a dictionary of separate DataFrames\n for each CSV file.\n :param kwargs: Keyword arguments to pass into the\n original pandas `read_csv`.\n :returns: DataFrame of concatenated DataFrames or dictionary of DataFrames.\n :raises JanitorError: if `None` provided for `files_path`.\n :raises JanitorError: if length of `files_path` is `0`.\n :raises ValueError: if no CSV files exist in `files_path`.\n :raises ValueError: if columns in input CSV files do not match.\n \"\"\"\n # Sanitize input\n if files_path is None:\n raise JanitorError(\"`None` provided for `files_path`\")\n if len(files_path) == 0:\n raise JanitorError(\"0 length `files_path` provided\")\n\n # Read the csv files\n # String to file/folder or file pattern provided\n if isinstance(files_path, str):\n dfs_dict = {\n os.path.basename(f): pd.read_csv(f, **kwargs)\n for f in glob(files_path)\n }\n # Iterable of file paths provided\n else:\n dfs_dict = {\n os.path.basename(f): pd.read_csv(f, **kwargs) for f in files_path\n }\n # Check if dataframes have been read\n if len(dfs_dict) == 0:\n raise ValueError(\"No CSV files to read with the given `files_path`\")\n # Concatenate the dataframes if requested (default)\n col_names = list(dfs_dict.values())[0].columns # noqa: PD011\n if not separate_df:\n # If columns do not match raise an error\n for df in dfs_dict.values(): # noqa: PD011\n if not all(df.columns == col_names):\n raise ValueError(\n \"Columns in input CSV files do not match.\"\n \"Files cannot be concatenated\"\n )\n return pd.concat(\n list(dfs_dict.values()),\n ignore_index=True,\n sort=False, # noqa: PD011\n )\n else:\n return dfs_dict\n\n\ndef read_commandline(cmd: str, **kwargs) -> pd.DataFrame:\n \"\"\"\n Read a CSV file based on a command-line command.\n\n For example, you may wish to run the following command on `sep-quarter.csv`\n before reading it into a pandas DataFrame:\n\n ```bash\n cat sep-quarter.csv | grep .SEA1AA\n ```\n\n In this case, you can use the following Python code to load the dataframe:\n\n ```python\n import janitor as jn\n df = jn.read_commandline(\"cat data.csv | grep .SEA1AA\")\n\n This function assumes that your command line command will return\n an output that is parsable using pandas.read_csv and StringIO.\n We default to using pd.read_csv underneath the hood.\n Keyword arguments are passed through to read_csv.\n ```\n\n :param cmd: Shell command to preprocess a file on disk.\n :param kwargs: Keyword arguments that are passed through to pd.read_csv().\n :raises JanitorError: If commandline command is malformed or invalid.\n :returns: A pandas DataFrame parsed from the stdout of the underlying\n shell.\n \"\"\"\n\n check(\"cmd\", cmd, [str])\n outcome = subprocess.run(cmd, shell=True, capture_output=True, text=True)\n if outcome.returncode != 0:\n raise JanitorError(outcome.stderr)\n else:\n outcome = outcome.stdout\n return pd.read_csv(StringIO(outcome), **kwargs)\n", "path": "janitor/io.py"}]} | 1,773 | 258 |
gh_patches_debug_6697 | rasdani/github-patches | git_diff | SeldonIO__MLServer-911 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Problems with own logging configuration
Currently I have the problem that my logging configuration is not accepted everywhere. As soon as the REST server starts (Uvicorn Worker), my logging configuration is ignored. I have created a repo that represents my scenario and also which is configuration used. Maybe my configuration is just wrong. In the model itself, I print out all the loggers and the associated handlers and formatter and can see here that it should actually fit. Do you have any ideas?
Here is my small example repo: https://github.com/JustinDroege/mlserver-logging
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/metrics/server.py`
Content:
```
1 import uvicorn
2
3 from fastapi import FastAPI
4 from starlette_exporter import handle_metrics
5
6 from ..settings import Settings
7 from .logging import logger
8 from typing import Optional
9
10
11 class _NoSignalServer(uvicorn.Server):
12 def install_signal_handlers(self):
13 pass
14
15
16 class MetricsServer:
17 def __init__(self, settings: Settings):
18 self._settings = settings
19 self._app = self._get_app()
20
21 def _get_app(self):
22 app = FastAPI(debug=self._settings.debug)
23 app.add_route(self._settings.metrics_endpoint, handle_metrics)
24 return app
25
26 async def start(self):
27 cfg = self._get_config()
28 self._server = _NoSignalServer(cfg)
29
30 metrics_server = f"http://{self._settings.host}:{self._settings.metrics_port}"
31 logger.info(f"Metrics server running on {metrics_server}")
32 logger.info(
33 "Prometheus scraping endpoint can be accessed on "
34 f"{metrics_server}{self._settings.metrics_endpoint}"
35 )
36 await self._server.serve()
37
38 def _get_config(self):
39 kwargs = {}
40
41 if self._settings._custom_metrics_server_settings:
42 logger.warning(
43 "REST custom configuration is out of support. Use as your own risk"
44 )
45 kwargs.update(self._settings._custom_metrics_server_settings)
46
47 kwargs.update(
48 {
49 "host": self._settings.host,
50 "port": self._settings.metrics_port,
51 "access_log": self._settings.debug,
52 }
53 )
54
55 # TODO: we want to disable logger unless debug is enabled (otherwise,
56 # prom reqs can be spammy)
57 return uvicorn.Config(self._app, **kwargs)
58
59 async def stop(self, sig: Optional[int] = None):
60 self._server.handle_exit(sig=sig, frame=None)
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/metrics/server.py b/mlserver/metrics/server.py
--- a/mlserver/metrics/server.py
+++ b/mlserver/metrics/server.py
@@ -52,8 +52,11 @@
}
)
- # TODO: we want to disable logger unless debug is enabled (otherwise,
- # prom reqs can be spammy)
+ if self._settings.logging_settings:
+ # If not None, use ours. Otherwise, let Uvicorn fall back on its
+ # own config.
+ kwargs.update({"log_config": self._settings.logging_settings})
+
return uvicorn.Config(self._app, **kwargs)
async def stop(self, sig: Optional[int] = None):
| {"golden_diff": "diff --git a/mlserver/metrics/server.py b/mlserver/metrics/server.py\n--- a/mlserver/metrics/server.py\n+++ b/mlserver/metrics/server.py\n@@ -52,8 +52,11 @@\n }\n )\n \n- # TODO: we want to disable logger unless debug is enabled (otherwise,\n- # prom reqs can be spammy)\n+ if self._settings.logging_settings:\n+ # If not None, use ours. Otherwise, let Uvicorn fall back on its\n+ # own config.\n+ kwargs.update({\"log_config\": self._settings.logging_settings})\n+\n return uvicorn.Config(self._app, **kwargs)\n \n async def stop(self, sig: Optional[int] = None):\n", "issue": "Problems with own logging configuration\nCurrently I have the problem that my logging configuration is not accepted everywhere. As soon as the REST server starts (Uvicorn Worker), my logging configuration is ignored. I have created a repo that represents my scenario and also which is configuration used. Maybe my configuration is just wrong. In the model itself, I print out all the loggers and the associated handlers and formatter and can see here that it should actually fit. Do you have any ideas?\r\n\r\nHere is my small example repo: https://github.com/JustinDroege/mlserver-logging\n", "before_files": [{"content": "import uvicorn\n\nfrom fastapi import FastAPI\nfrom starlette_exporter import handle_metrics\n\nfrom ..settings import Settings\nfrom .logging import logger\nfrom typing import Optional\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass MetricsServer:\n def __init__(self, settings: Settings):\n self._settings = settings\n self._app = self._get_app()\n\n def _get_app(self):\n app = FastAPI(debug=self._settings.debug)\n app.add_route(self._settings.metrics_endpoint, handle_metrics)\n return app\n\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n\n metrics_server = f\"http://{self._settings.host}:{self._settings.metrics_port}\"\n logger.info(f\"Metrics server running on {metrics_server}\")\n logger.info(\n \"Prometheus scraping endpoint can be accessed on \"\n f\"{metrics_server}{self._settings.metrics_endpoint}\"\n )\n await self._server.serve()\n\n def _get_config(self):\n kwargs = {}\n\n if self._settings._custom_metrics_server_settings:\n logger.warning(\n \"REST custom configuration is out of support. Use as your own risk\"\n )\n kwargs.update(self._settings._custom_metrics_server_settings)\n\n kwargs.update(\n {\n \"host\": self._settings.host,\n \"port\": self._settings.metrics_port,\n \"access_log\": self._settings.debug,\n }\n )\n\n # TODO: we want to disable logger unless debug is enabled (otherwise,\n # prom reqs can be spammy)\n return uvicorn.Config(self._app, **kwargs)\n\n async def stop(self, sig: Optional[int] = None):\n self._server.handle_exit(sig=sig, frame=None)\n", "path": "mlserver/metrics/server.py"}], "after_files": [{"content": "import uvicorn\n\nfrom fastapi import FastAPI\nfrom starlette_exporter import handle_metrics\n\nfrom ..settings import Settings\nfrom .logging import logger\nfrom typing import Optional\n\n\nclass _NoSignalServer(uvicorn.Server):\n def install_signal_handlers(self):\n pass\n\n\nclass MetricsServer:\n def __init__(self, settings: Settings):\n self._settings = settings\n self._app = self._get_app()\n\n def _get_app(self):\n app = FastAPI(debug=self._settings.debug)\n app.add_route(self._settings.metrics_endpoint, handle_metrics)\n return app\n\n async def start(self):\n cfg = self._get_config()\n self._server = _NoSignalServer(cfg)\n\n metrics_server = f\"http://{self._settings.host}:{self._settings.metrics_port}\"\n logger.info(f\"Metrics server running on {metrics_server}\")\n logger.info(\n \"Prometheus scraping endpoint can be accessed on \"\n f\"{metrics_server}{self._settings.metrics_endpoint}\"\n )\n await self._server.serve()\n\n def _get_config(self):\n kwargs = {}\n\n if self._settings._custom_metrics_server_settings:\n logger.warning(\n \"REST custom configuration is out of support. Use as your own risk\"\n )\n kwargs.update(self._settings._custom_metrics_server_settings)\n\n kwargs.update(\n {\n \"host\": self._settings.host,\n \"port\": self._settings.metrics_port,\n \"access_log\": self._settings.debug,\n }\n )\n\n if self._settings.logging_settings:\n # If not None, use ours. Otherwise, let Uvicorn fall back on its\n # own config.\n kwargs.update({\"log_config\": self._settings.logging_settings})\n\n return uvicorn.Config(self._app, **kwargs)\n\n async def stop(self, sig: Optional[int] = None):\n self._server.handle_exit(sig=sig, frame=None)\n", "path": "mlserver/metrics/server.py"}]} | 883 | 160 |
gh_patches_debug_38993 | rasdani/github-patches | git_diff | zulip__zulip-29641 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update first message ID when first message is deleted
When a message is deleted, we should update the stored ID of the first message in the stream. Because we currently do not, deleting the first message may result in an extraneous "more topics" link in the left sidebar, with no additional topics shown when you click it.
Note: The symptom may be hard to replicate; we should focus on fixing the technical issue, as described in @timabbott 's comment below.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zerver/actions/message_delete.py`
Content:
```
1 from typing import Iterable, List, TypedDict
2
3 from zerver.lib import retention
4 from zerver.lib.retention import move_messages_to_archive
5 from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id
6 from zerver.models import Message, Realm, UserMessage, UserProfile
7 from zerver.tornado.django_api import send_event_on_commit
8
9
10 class DeleteMessagesEvent(TypedDict, total=False):
11 type: str
12 message_ids: List[int]
13 message_type: str
14 topic: str
15 stream_id: int
16
17
18 def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
19 # messages in delete_message event belong to the same topic
20 # or is a single direct message, as any other behaviour is not possible with
21 # the current callers to this method.
22 messages = list(messages)
23 message_ids = [message.id for message in messages]
24 if not message_ids:
25 return
26
27 event: DeleteMessagesEvent = {
28 "type": "delete_message",
29 "message_ids": message_ids,
30 }
31
32 sample_message = messages[0]
33 message_type = "stream"
34 users_to_notify = []
35 if not sample_message.is_stream_message():
36 assert len(messages) == 1
37 message_type = "private"
38 ums = UserMessage.objects.filter(message_id__in=message_ids)
39 users_to_notify = [um.user_profile_id for um in ums]
40 archiving_chunk_size = retention.MESSAGE_BATCH_SIZE
41
42 if message_type == "stream":
43 stream_id = sample_message.recipient.type_id
44 event["stream_id"] = stream_id
45 event["topic"] = sample_message.topic_name()
46 subscriptions = get_active_subscriptions_for_stream_id(
47 stream_id, include_deactivated_users=False
48 )
49 # We exclude long-term idle users, since they by definition have no active clients.
50 subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)
51 users_to_notify = list(subscriptions.values_list("user_profile_id", flat=True))
52 archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
53
54 move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
55
56 event["message_type"] = message_type
57 send_event_on_commit(realm, event, users_to_notify)
58
59
60 def do_delete_messages_by_sender(user: UserProfile) -> None:
61 message_ids = list(
62 # Uses index: zerver_message_realm_sender_recipient (prefix)
63 Message.objects.filter(realm_id=user.realm_id, sender=user)
64 .values_list("id", flat=True)
65 .order_by("id")
66 )
67 if message_ids:
68 move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)
69
```
Path: `version.py`
Content:
```
1 import os
2
3 ZULIP_VERSION = "9.0-dev+git"
4
5 # Add information on number of commits and commit hash to version, if available
6 zulip_git_version_file = os.path.join(
7 os.path.dirname(os.path.abspath(__file__)), "zulip-git-version"
8 )
9 lines = [ZULIP_VERSION, ""]
10 if os.path.exists(zulip_git_version_file):
11 with open(zulip_git_version_file) as f:
12 lines = [*f, "", ""]
13 ZULIP_VERSION = lines.pop(0).strip()
14 ZULIP_MERGE_BASE = lines.pop(0).strip()
15
16 LATEST_MAJOR_VERSION = "8.0"
17 LATEST_RELEASE_VERSION = "8.3"
18 LATEST_RELEASE_ANNOUNCEMENT = "https://blog.zulip.com/2023/12/15/zulip-8-0-released/"
19
20 # Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be
21 # prevented from connecting to the Zulip server. Versions above
22 # DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have
23 # a banner at the top of the page asking the user to upgrade.
24 DESKTOP_MINIMUM_VERSION = "5.4.3"
25 DESKTOP_WARNING_VERSION = "5.9.3"
26
27 # Bump the API_FEATURE_LEVEL whenever an API change is made
28 # that clients might want to condition on. If we forget at
29 # the time we make the change, then bump it later as soon
30 # as we notice; clients using API_FEATURE_LEVEL will just not
31 # use the new feature/API until the bump.
32 #
33 # Changes should be accompanied by documentation explaining what the
34 # new level means in api_docs/changelog.md, as well as "**Changes**"
35 # entries in the endpoint's documentation in `zulip.yaml`.
36 API_FEATURE_LEVEL = 255
37
38 # Bump the minor PROVISION_VERSION to indicate that folks should provision
39 # only when going from an old version of the code to a newer version. Bump
40 # the major version to indicate that folks should provision in both
41 # directions.
42
43 # Typically,
44 # * adding a dependency only requires a minor version bump;
45 # * removing a dependency requires a major version bump;
46 # * upgrading a dependency requires a major version bump, unless the
47 # upgraded dependency is backwards compatible with all of our
48 # historical commits sharing the same major version, in which case a
49 # minor version bump suffices.
50
51 PROVISION_VERSION = (269, 3) # last bumped 2024-04-29 for adding pyasyncore
52
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/version.py b/version.py
--- a/version.py
+++ b/version.py
@@ -33,7 +33,7 @@
# Changes should be accompanied by documentation explaining what the
# new level means in api_docs/changelog.md, as well as "**Changes**"
# entries in the endpoint's documentation in `zulip.yaml`.
-API_FEATURE_LEVEL = 255
+API_FEATURE_LEVEL = 256
# Bump the minor PROVISION_VERSION to indicate that folks should provision
# only when going from an old version of the code to a newer version. Bump
diff --git a/zerver/actions/message_delete.py b/zerver/actions/message_delete.py
--- a/zerver/actions/message_delete.py
+++ b/zerver/actions/message_delete.py
@@ -3,7 +3,7 @@
from zerver.lib import retention
from zerver.lib.retention import move_messages_to_archive
from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id
-from zerver.models import Message, Realm, UserMessage, UserProfile
+from zerver.models import Message, Realm, Stream, UserMessage, UserProfile
from zerver.tornado.django_api import send_event_on_commit
@@ -15,6 +15,34 @@
stream_id: int
+def check_update_first_message_id(
+ realm: Realm, stream: Stream, message_ids: List[int], users_to_notify: Iterable[int]
+) -> None:
+ # This will not update the `first_message_id` of streams where the
+ # first message was deleted prior to the implementation of this function.
+ assert stream.recipient_id is not None
+ if stream.first_message_id not in message_ids:
+ return
+ current_first_message_id = (
+ Message.objects.filter(realm_id=realm.id, recipient_id=stream.recipient_id)
+ .values_list("id", flat=True)
+ .order_by("id")
+ .first()
+ )
+
+ stream.first_message_id = current_first_message_id
+ stream.save(update_fields=["first_message_id"])
+
+ stream_event = dict(
+ type="stream",
+ op="update",
+ property="first_message_id",
+ value=stream.first_message_id,
+ stream_id=stream.id,
+ )
+ send_event_on_commit(realm, stream_event, users_to_notify)
+
+
def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:
# messages in delete_message event belong to the same topic
# or is a single direct message, as any other behaviour is not possible with
@@ -52,6 +80,9 @@
archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE
move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)
+ if message_type == "stream":
+ stream = Stream.objects.get(id=sample_message.recipient.type_id)
+ check_update_first_message_id(realm, stream, message_ids, users_to_notify)
event["message_type"] = message_type
send_event_on_commit(realm, event, users_to_notify)
| {"golden_diff": "diff --git a/version.py b/version.py\n--- a/version.py\n+++ b/version.py\n@@ -33,7 +33,7 @@\n # Changes should be accompanied by documentation explaining what the\n # new level means in api_docs/changelog.md, as well as \"**Changes**\"\n # entries in the endpoint's documentation in `zulip.yaml`.\n-API_FEATURE_LEVEL = 255\n+API_FEATURE_LEVEL = 256\n \n # Bump the minor PROVISION_VERSION to indicate that folks should provision\n # only when going from an old version of the code to a newer version. Bump\ndiff --git a/zerver/actions/message_delete.py b/zerver/actions/message_delete.py\n--- a/zerver/actions/message_delete.py\n+++ b/zerver/actions/message_delete.py\n@@ -3,7 +3,7 @@\n from zerver.lib import retention\n from zerver.lib.retention import move_messages_to_archive\n from zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id\n-from zerver.models import Message, Realm, UserMessage, UserProfile\n+from zerver.models import Message, Realm, Stream, UserMessage, UserProfile\n from zerver.tornado.django_api import send_event_on_commit\n \n \n@@ -15,6 +15,34 @@\n stream_id: int\n \n \n+def check_update_first_message_id(\n+ realm: Realm, stream: Stream, message_ids: List[int], users_to_notify: Iterable[int]\n+) -> None:\n+ # This will not update the `first_message_id` of streams where the\n+ # first message was deleted prior to the implementation of this function.\n+ assert stream.recipient_id is not None\n+ if stream.first_message_id not in message_ids:\n+ return\n+ current_first_message_id = (\n+ Message.objects.filter(realm_id=realm.id, recipient_id=stream.recipient_id)\n+ .values_list(\"id\", flat=True)\n+ .order_by(\"id\")\n+ .first()\n+ )\n+\n+ stream.first_message_id = current_first_message_id\n+ stream.save(update_fields=[\"first_message_id\"])\n+\n+ stream_event = dict(\n+ type=\"stream\",\n+ op=\"update\",\n+ property=\"first_message_id\",\n+ value=stream.first_message_id,\n+ stream_id=stream.id,\n+ )\n+ send_event_on_commit(realm, stream_event, users_to_notify)\n+\n+\n def do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:\n # messages in delete_message event belong to the same topic\n # or is a single direct message, as any other behaviour is not possible with\n@@ -52,6 +80,9 @@\n archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE\n \n move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)\n+ if message_type == \"stream\":\n+ stream = Stream.objects.get(id=sample_message.recipient.type_id)\n+ check_update_first_message_id(realm, stream, message_ids, users_to_notify)\n \n event[\"message_type\"] = message_type\n send_event_on_commit(realm, event, users_to_notify)\n", "issue": "Update first message ID when first message is deleted\nWhen a message is deleted, we should update the stored ID of the first message in the stream. Because we currently do not, deleting the first message may result in an extraneous \"more topics\" link in the left sidebar, with no additional topics shown when you click it.\r\n\r\nNote: The symptom may be hard to replicate; we should focus on fixing the technical issue, as described in @timabbott 's comment below.\n", "before_files": [{"content": "from typing import Iterable, List, TypedDict\n\nfrom zerver.lib import retention\nfrom zerver.lib.retention import move_messages_to_archive\nfrom zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id\nfrom zerver.models import Message, Realm, UserMessage, UserProfile\nfrom zerver.tornado.django_api import send_event_on_commit\n\n\nclass DeleteMessagesEvent(TypedDict, total=False):\n type: str\n message_ids: List[int]\n message_type: str\n topic: str\n stream_id: int\n\n\ndef do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:\n # messages in delete_message event belong to the same topic\n # or is a single direct message, as any other behaviour is not possible with\n # the current callers to this method.\n messages = list(messages)\n message_ids = [message.id for message in messages]\n if not message_ids:\n return\n\n event: DeleteMessagesEvent = {\n \"type\": \"delete_message\",\n \"message_ids\": message_ids,\n }\n\n sample_message = messages[0]\n message_type = \"stream\"\n users_to_notify = []\n if not sample_message.is_stream_message():\n assert len(messages) == 1\n message_type = \"private\"\n ums = UserMessage.objects.filter(message_id__in=message_ids)\n users_to_notify = [um.user_profile_id for um in ums]\n archiving_chunk_size = retention.MESSAGE_BATCH_SIZE\n\n if message_type == \"stream\":\n stream_id = sample_message.recipient.type_id\n event[\"stream_id\"] = stream_id\n event[\"topic\"] = sample_message.topic_name()\n subscriptions = get_active_subscriptions_for_stream_id(\n stream_id, include_deactivated_users=False\n )\n # We exclude long-term idle users, since they by definition have no active clients.\n subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)\n users_to_notify = list(subscriptions.values_list(\"user_profile_id\", flat=True))\n archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE\n\n move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)\n\n event[\"message_type\"] = message_type\n send_event_on_commit(realm, event, users_to_notify)\n\n\ndef do_delete_messages_by_sender(user: UserProfile) -> None:\n message_ids = list(\n # Uses index: zerver_message_realm_sender_recipient (prefix)\n Message.objects.filter(realm_id=user.realm_id, sender=user)\n .values_list(\"id\", flat=True)\n .order_by(\"id\")\n )\n if message_ids:\n move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)\n", "path": "zerver/actions/message_delete.py"}, {"content": "import os\n\nZULIP_VERSION = \"9.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = [*f, \"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"8.0\"\nLATEST_RELEASE_VERSION = \"8.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2023/12/15/zulip-8-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.4.3\"\nDESKTOP_WARNING_VERSION = \"5.9.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in api_docs/changelog.md, as well as \"**Changes**\"\n# entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 255\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = (269, 3) # last bumped 2024-04-29 for adding pyasyncore\n", "path": "version.py"}], "after_files": [{"content": "from typing import Iterable, List, TypedDict\n\nfrom zerver.lib import retention\nfrom zerver.lib.retention import move_messages_to_archive\nfrom zerver.lib.stream_subscription import get_active_subscriptions_for_stream_id\nfrom zerver.models import Message, Realm, Stream, UserMessage, UserProfile\nfrom zerver.tornado.django_api import send_event_on_commit\n\n\nclass DeleteMessagesEvent(TypedDict, total=False):\n type: str\n message_ids: List[int]\n message_type: str\n topic: str\n stream_id: int\n\n\ndef check_update_first_message_id(\n realm: Realm, stream: Stream, message_ids: List[int], users_to_notify: Iterable[int]\n) -> None:\n # This will not update the `first_message_id` of streams where the\n # first message was deleted prior to the implementation of this function.\n assert stream.recipient_id is not None\n if stream.first_message_id not in message_ids:\n return\n current_first_message_id = (\n Message.objects.filter(realm_id=realm.id, recipient_id=stream.recipient_id)\n .values_list(\"id\", flat=True)\n .order_by(\"id\")\n .first()\n )\n\n stream.first_message_id = current_first_message_id\n stream.save(update_fields=[\"first_message_id\"])\n\n stream_event = dict(\n type=\"stream\",\n op=\"update\",\n property=\"first_message_id\",\n value=stream.first_message_id,\n stream_id=stream.id,\n )\n send_event_on_commit(realm, stream_event, users_to_notify)\n\n\ndef do_delete_messages(realm: Realm, messages: Iterable[Message]) -> None:\n # messages in delete_message event belong to the same topic\n # or is a single direct message, as any other behaviour is not possible with\n # the current callers to this method.\n messages = list(messages)\n message_ids = [message.id for message in messages]\n if not message_ids:\n return\n\n event: DeleteMessagesEvent = {\n \"type\": \"delete_message\",\n \"message_ids\": message_ids,\n }\n\n sample_message = messages[0]\n message_type = \"stream\"\n users_to_notify = []\n if not sample_message.is_stream_message():\n assert len(messages) == 1\n message_type = \"private\"\n ums = UserMessage.objects.filter(message_id__in=message_ids)\n users_to_notify = [um.user_profile_id for um in ums]\n archiving_chunk_size = retention.MESSAGE_BATCH_SIZE\n\n if message_type == \"stream\":\n stream_id = sample_message.recipient.type_id\n event[\"stream_id\"] = stream_id\n event[\"topic\"] = sample_message.topic_name()\n subscriptions = get_active_subscriptions_for_stream_id(\n stream_id, include_deactivated_users=False\n )\n # We exclude long-term idle users, since they by definition have no active clients.\n subscriptions = subscriptions.exclude(user_profile__long_term_idle=True)\n users_to_notify = list(subscriptions.values_list(\"user_profile_id\", flat=True))\n archiving_chunk_size = retention.STREAM_MESSAGE_BATCH_SIZE\n\n move_messages_to_archive(message_ids, realm=realm, chunk_size=archiving_chunk_size)\n if message_type == \"stream\":\n stream = Stream.objects.get(id=sample_message.recipient.type_id)\n check_update_first_message_id(realm, stream, message_ids, users_to_notify)\n\n event[\"message_type\"] = message_type\n send_event_on_commit(realm, event, users_to_notify)\n\n\ndef do_delete_messages_by_sender(user: UserProfile) -> None:\n message_ids = list(\n # Uses index: zerver_message_realm_sender_recipient (prefix)\n Message.objects.filter(realm_id=user.realm_id, sender=user)\n .values_list(\"id\", flat=True)\n .order_by(\"id\")\n )\n if message_ids:\n move_messages_to_archive(message_ids, chunk_size=retention.STREAM_MESSAGE_BATCH_SIZE)\n", "path": "zerver/actions/message_delete.py"}, {"content": "import os\n\nZULIP_VERSION = \"9.0-dev+git\"\n\n# Add information on number of commits and commit hash to version, if available\nzulip_git_version_file = os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"zulip-git-version\"\n)\nlines = [ZULIP_VERSION, \"\"]\nif os.path.exists(zulip_git_version_file):\n with open(zulip_git_version_file) as f:\n lines = [*f, \"\", \"\"]\nZULIP_VERSION = lines.pop(0).strip()\nZULIP_MERGE_BASE = lines.pop(0).strip()\n\nLATEST_MAJOR_VERSION = \"8.0\"\nLATEST_RELEASE_VERSION = \"8.3\"\nLATEST_RELEASE_ANNOUNCEMENT = \"https://blog.zulip.com/2023/12/15/zulip-8-0-released/\"\n\n# Versions of the desktop app below DESKTOP_MINIMUM_VERSION will be\n# prevented from connecting to the Zulip server. Versions above\n# DESKTOP_MINIMUM_VERSION but below DESKTOP_WARNING_VERSION will have\n# a banner at the top of the page asking the user to upgrade.\nDESKTOP_MINIMUM_VERSION = \"5.4.3\"\nDESKTOP_WARNING_VERSION = \"5.9.3\"\n\n# Bump the API_FEATURE_LEVEL whenever an API change is made\n# that clients might want to condition on. If we forget at\n# the time we make the change, then bump it later as soon\n# as we notice; clients using API_FEATURE_LEVEL will just not\n# use the new feature/API until the bump.\n#\n# Changes should be accompanied by documentation explaining what the\n# new level means in api_docs/changelog.md, as well as \"**Changes**\"\n# entries in the endpoint's documentation in `zulip.yaml`.\nAPI_FEATURE_LEVEL = 256\n\n# Bump the minor PROVISION_VERSION to indicate that folks should provision\n# only when going from an old version of the code to a newer version. Bump\n# the major version to indicate that folks should provision in both\n# directions.\n\n# Typically,\n# * adding a dependency only requires a minor version bump;\n# * removing a dependency requires a major version bump;\n# * upgrading a dependency requires a major version bump, unless the\n# upgraded dependency is backwards compatible with all of our\n# historical commits sharing the same major version, in which case a\n# minor version bump suffices.\n\nPROVISION_VERSION = (269, 3) # last bumped 2024-04-29 for adding pyasyncore\n", "path": "version.py"}]} | 1,743 | 677 |
gh_patches_debug_26011 | rasdani/github-patches | git_diff | ray-project__ray-3711 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[tune[ partial function cannot be registered as trainable
### System information
- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 18.04
- **Ray installed from (source or binary)**: binary
- **Ray version**: 0.6.1
- **Python version**: 3.7
- **Exact command to reproduce**:
The following code fails:
```
def dummy_fn(c, a, b):
print("Called")
from functools import partial
from ray.tune import register_trainable
register_trainable("test", partial(dummy_fn, c=None))
```
while the following code works:
```
def dummy_fn(a, b):
print("Called")
from functools import partial
from ray.tune import register_trainable
register_trainable("test", dummy_fn)
```
### Describe the problem
The first code sample does not work, despite the function (after the `partial`) fullfills all requirements to be properly registered.
### Source code / logs
Traceback:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/temp/schock/conda/envs/delira_new/lib/python3.7/site-packages/ray/tune/registry.py", line 35, in register_trainable
if not issubclass(trainable, Trainable):
TypeError: issubclass() arg 1 must be a class
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/ray/tune/registry.py`
Content:
```
1 from __future__ import absolute_import
2 from __future__ import division
3 from __future__ import print_function
4
5 from types import FunctionType
6
7 import ray
8 import ray.cloudpickle as pickle
9 from ray.experimental.internal_kv import _internal_kv_initialized, \
10 _internal_kv_get, _internal_kv_put
11
12 TRAINABLE_CLASS = "trainable_class"
13 ENV_CREATOR = "env_creator"
14 RLLIB_MODEL = "rllib_model"
15 RLLIB_PREPROCESSOR = "rllib_preprocessor"
16 KNOWN_CATEGORIES = [
17 TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR
18 ]
19
20
21 def register_trainable(name, trainable):
22 """Register a trainable function or class.
23
24 Args:
25 name (str): Name to register.
26 trainable (obj): Function or tune.Trainable class. Functions must
27 take (config, status_reporter) as arguments and will be
28 automatically converted into a class during registration.
29 """
30
31 from ray.tune.trainable import Trainable, wrap_function
32
33 if isinstance(trainable, FunctionType):
34 trainable = wrap_function(trainable)
35 if not issubclass(trainable, Trainable):
36 raise TypeError("Second argument must be convertable to Trainable",
37 trainable)
38 _global_registry.register(TRAINABLE_CLASS, name, trainable)
39
40
41 def register_env(name, env_creator):
42 """Register a custom environment for use with RLlib.
43
44 Args:
45 name (str): Name to register.
46 env_creator (obj): Function that creates an env.
47 """
48
49 if not isinstance(env_creator, FunctionType):
50 raise TypeError("Second argument must be a function.", env_creator)
51 _global_registry.register(ENV_CREATOR, name, env_creator)
52
53
54 def _make_key(category, key):
55 """Generate a binary key for the given category and key.
56
57 Args:
58 category (str): The category of the item
59 key (str): The unique identifier for the item
60
61 Returns:
62 The key to use for storing a the value.
63 """
64 return (b"TuneRegistry:" + category.encode("ascii") + b"/" +
65 key.encode("ascii"))
66
67
68 class _Registry(object):
69 def __init__(self):
70 self._to_flush = {}
71
72 def register(self, category, key, value):
73 if category not in KNOWN_CATEGORIES:
74 from ray.tune import TuneError
75 raise TuneError("Unknown category {} not among {}".format(
76 category, KNOWN_CATEGORIES))
77 self._to_flush[(category, key)] = pickle.dumps(value)
78 if _internal_kv_initialized():
79 self.flush_values()
80
81 def contains(self, category, key):
82 if _internal_kv_initialized():
83 value = _internal_kv_get(_make_key(category, key))
84 return value is not None
85 else:
86 return (category, key) in self._to_flush
87
88 def get(self, category, key):
89 if _internal_kv_initialized():
90 value = _internal_kv_get(_make_key(category, key))
91 if value is None:
92 raise ValueError(
93 "Registry value for {}/{} doesn't exist.".format(
94 category, key))
95 return pickle.loads(value)
96 else:
97 return pickle.loads(self._to_flush[(category, key)])
98
99 def flush_values(self):
100 for (category, key), value in self._to_flush.items():
101 _internal_kv_put(_make_key(category, key), value, overwrite=True)
102 self._to_flush.clear()
103
104
105 _global_registry = _Registry()
106 ray.worker._post_init_hooks.append(_global_registry.flush_values)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/ray/tune/registry.py b/python/ray/tune/registry.py
--- a/python/ray/tune/registry.py
+++ b/python/ray/tune/registry.py
@@ -2,6 +2,7 @@
from __future__ import division
from __future__ import print_function
+import logging
from types import FunctionType
import ray
@@ -17,6 +18,8 @@
TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR
]
+logger = logging.getLogger(__name__)
+
def register_trainable(name, trainable):
"""Register a trainable function or class.
@@ -30,8 +33,16 @@
from ray.tune.trainable import Trainable, wrap_function
- if isinstance(trainable, FunctionType):
+ if isinstance(trainable, type):
+ logger.debug("Detected class for trainable.")
+ elif isinstance(trainable, FunctionType):
+ logger.debug("Detected function for trainable.")
+ trainable = wrap_function(trainable)
+ elif callable(trainable):
+ logger.warning(
+ "Detected unknown callable for trainable. Converting to class.")
trainable = wrap_function(trainable)
+
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable",
trainable)
| {"golden_diff": "diff --git a/python/ray/tune/registry.py b/python/ray/tune/registry.py\n--- a/python/ray/tune/registry.py\n+++ b/python/ray/tune/registry.py\n@@ -2,6 +2,7 @@\n from __future__ import division\n from __future__ import print_function\n \n+import logging\n from types import FunctionType\n \n import ray\n@@ -17,6 +18,8 @@\n TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR\n ]\n \n+logger = logging.getLogger(__name__)\n+\n \n def register_trainable(name, trainable):\n \"\"\"Register a trainable function or class.\n@@ -30,8 +33,16 @@\n \n from ray.tune.trainable import Trainable, wrap_function\n \n- if isinstance(trainable, FunctionType):\n+ if isinstance(trainable, type):\n+ logger.debug(\"Detected class for trainable.\")\n+ elif isinstance(trainable, FunctionType):\n+ logger.debug(\"Detected function for trainable.\")\n+ trainable = wrap_function(trainable)\n+ elif callable(trainable):\n+ logger.warning(\n+ \"Detected unknown callable for trainable. Converting to class.\")\n trainable = wrap_function(trainable)\n+\n if not issubclass(trainable, Trainable):\n raise TypeError(\"Second argument must be convertable to Trainable\",\n trainable)\n", "issue": "[tune[ partial function cannot be registered as trainable\n### System information\r\n- **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Ubuntu 18.04\r\n- **Ray installed from (source or binary)**: binary\r\n- **Ray version**: 0.6.1\r\n- **Python version**: 3.7\r\n- **Exact command to reproduce**:\r\n\r\nThe following code fails:\r\n```\r\ndef dummy_fn(c, a, b):\r\n print(\"Called\")\r\n\r\nfrom functools import partial\r\nfrom ray.tune import register_trainable\r\nregister_trainable(\"test\", partial(dummy_fn, c=None))\r\n\r\n```\r\n\r\nwhile the following code works:\r\n```\r\ndef dummy_fn(a, b):\r\n print(\"Called\")\r\n\r\nfrom functools import partial\r\nfrom ray.tune import register_trainable\r\nregister_trainable(\"test\", dummy_fn)\r\n\r\n```\r\n### Describe the problem\r\nThe first code sample does not work, despite the function (after the `partial`) fullfills all requirements to be properly registered.\r\n\r\n### Source code / logs\r\nTraceback:\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/temp/schock/conda/envs/delira_new/lib/python3.7/site-packages/ray/tune/registry.py\", line 35, in register_trainable\r\n if not issubclass(trainable, Trainable):\r\nTypeError: issubclass() arg 1 must be a class\r\n```\r\n\n", "before_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom types import FunctionType\n\nimport ray\nimport ray.cloudpickle as pickle\nfrom ray.experimental.internal_kv import _internal_kv_initialized, \\\n _internal_kv_get, _internal_kv_put\n\nTRAINABLE_CLASS = \"trainable_class\"\nENV_CREATOR = \"env_creator\"\nRLLIB_MODEL = \"rllib_model\"\nRLLIB_PREPROCESSOR = \"rllib_preprocessor\"\nKNOWN_CATEGORIES = [\n TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR\n]\n\n\ndef register_trainable(name, trainable):\n \"\"\"Register a trainable function or class.\n\n Args:\n name (str): Name to register.\n trainable (obj): Function or tune.Trainable class. Functions must\n take (config, status_reporter) as arguments and will be\n automatically converted into a class during registration.\n \"\"\"\n\n from ray.tune.trainable import Trainable, wrap_function\n\n if isinstance(trainable, FunctionType):\n trainable = wrap_function(trainable)\n if not issubclass(trainable, Trainable):\n raise TypeError(\"Second argument must be convertable to Trainable\",\n trainable)\n _global_registry.register(TRAINABLE_CLASS, name, trainable)\n\n\ndef register_env(name, env_creator):\n \"\"\"Register a custom environment for use with RLlib.\n\n Args:\n name (str): Name to register.\n env_creator (obj): Function that creates an env.\n \"\"\"\n\n if not isinstance(env_creator, FunctionType):\n raise TypeError(\"Second argument must be a function.\", env_creator)\n _global_registry.register(ENV_CREATOR, name, env_creator)\n\n\ndef _make_key(category, key):\n \"\"\"Generate a binary key for the given category and key.\n\n Args:\n category (str): The category of the item\n key (str): The unique identifier for the item\n\n Returns:\n The key to use for storing a the value.\n \"\"\"\n return (b\"TuneRegistry:\" + category.encode(\"ascii\") + b\"/\" +\n key.encode(\"ascii\"))\n\n\nclass _Registry(object):\n def __init__(self):\n self._to_flush = {}\n\n def register(self, category, key, value):\n if category not in KNOWN_CATEGORIES:\n from ray.tune import TuneError\n raise TuneError(\"Unknown category {} not among {}\".format(\n category, KNOWN_CATEGORIES))\n self._to_flush[(category, key)] = pickle.dumps(value)\n if _internal_kv_initialized():\n self.flush_values()\n\n def contains(self, category, key):\n if _internal_kv_initialized():\n value = _internal_kv_get(_make_key(category, key))\n return value is not None\n else:\n return (category, key) in self._to_flush\n\n def get(self, category, key):\n if _internal_kv_initialized():\n value = _internal_kv_get(_make_key(category, key))\n if value is None:\n raise ValueError(\n \"Registry value for {}/{} doesn't exist.\".format(\n category, key))\n return pickle.loads(value)\n else:\n return pickle.loads(self._to_flush[(category, key)])\n\n def flush_values(self):\n for (category, key), value in self._to_flush.items():\n _internal_kv_put(_make_key(category, key), value, overwrite=True)\n self._to_flush.clear()\n\n\n_global_registry = _Registry()\nray.worker._post_init_hooks.append(_global_registry.flush_values)\n", "path": "python/ray/tune/registry.py"}], "after_files": [{"content": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport logging\nfrom types import FunctionType\n\nimport ray\nimport ray.cloudpickle as pickle\nfrom ray.experimental.internal_kv import _internal_kv_initialized, \\\n _internal_kv_get, _internal_kv_put\n\nTRAINABLE_CLASS = \"trainable_class\"\nENV_CREATOR = \"env_creator\"\nRLLIB_MODEL = \"rllib_model\"\nRLLIB_PREPROCESSOR = \"rllib_preprocessor\"\nKNOWN_CATEGORIES = [\n TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR\n]\n\nlogger = logging.getLogger(__name__)\n\n\ndef register_trainable(name, trainable):\n \"\"\"Register a trainable function or class.\n\n Args:\n name (str): Name to register.\n trainable (obj): Function or tune.Trainable class. Functions must\n take (config, status_reporter) as arguments and will be\n automatically converted into a class during registration.\n \"\"\"\n\n from ray.tune.trainable import Trainable, wrap_function\n\n if isinstance(trainable, type):\n logger.debug(\"Detected class for trainable.\")\n elif isinstance(trainable, FunctionType):\n logger.debug(\"Detected function for trainable.\")\n trainable = wrap_function(trainable)\n elif callable(trainable):\n logger.warning(\n \"Detected unknown callable for trainable. Converting to class.\")\n trainable = wrap_function(trainable)\n\n if not issubclass(trainable, Trainable):\n raise TypeError(\"Second argument must be convertable to Trainable\",\n trainable)\n _global_registry.register(TRAINABLE_CLASS, name, trainable)\n\n\ndef register_env(name, env_creator):\n \"\"\"Register a custom environment for use with RLlib.\n\n Args:\n name (str): Name to register.\n env_creator (obj): Function that creates an env.\n \"\"\"\n\n if not isinstance(env_creator, FunctionType):\n raise TypeError(\"Second argument must be a function.\", env_creator)\n _global_registry.register(ENV_CREATOR, name, env_creator)\n\n\ndef _make_key(category, key):\n \"\"\"Generate a binary key for the given category and key.\n\n Args:\n category (str): The category of the item\n key (str): The unique identifier for the item\n\n Returns:\n The key to use for storing a the value.\n \"\"\"\n return (b\"TuneRegistry:\" + category.encode(\"ascii\") + b\"/\" +\n key.encode(\"ascii\"))\n\n\nclass _Registry(object):\n def __init__(self):\n self._to_flush = {}\n\n def register(self, category, key, value):\n if category not in KNOWN_CATEGORIES:\n from ray.tune import TuneError\n raise TuneError(\"Unknown category {} not among {}\".format(\n category, KNOWN_CATEGORIES))\n self._to_flush[(category, key)] = pickle.dumps(value)\n if _internal_kv_initialized():\n self.flush_values()\n\n def contains(self, category, key):\n if _internal_kv_initialized():\n value = _internal_kv_get(_make_key(category, key))\n return value is not None\n else:\n return (category, key) in self._to_flush\n\n def get(self, category, key):\n if _internal_kv_initialized():\n value = _internal_kv_get(_make_key(category, key))\n if value is None:\n raise ValueError(\n \"Registry value for {}/{} doesn't exist.\".format(\n category, key))\n return pickle.loads(value)\n else:\n return pickle.loads(self._to_flush[(category, key)])\n\n def flush_values(self):\n for (category, key), value in self._to_flush.items():\n _internal_kv_put(_make_key(category, key), value, overwrite=True)\n self._to_flush.clear()\n\n\n_global_registry = _Registry()\nray.worker._post_init_hooks.append(_global_registry.flush_values)\n", "path": "python/ray/tune/registry.py"}]} | 1,554 | 296 |
gh_patches_debug_20771 | rasdani/github-patches | git_diff | cupy__cupy-7068 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cupy.apply_along_axis failed with cupy.nonzero
### Description
cp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]])) - failed with error
*** AttributeError: 'tuple' object has no attribute 'shape'
np.apply_along_axis(np.nonzero, 1, np.array([[1,2],[2,3]])) - is OK
UPDATE. Problem in _shape_base.py.
line 53:
buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)
res - is tuple (as a result of cp.nonzero(1d array) of single cupy-array, so line 44 ( if cupy.isscalar(res):) doesnt convert it from tuple to cupy-array
as a temporal solution is possible to use "buffer-like" function
def cupy_nonzero (a):
return cp.nonzero(a)[0]
### To Reproduce
```py
cp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]]))
```
### Installation
_No response_
### Environment
```
# Paste the output here
```
### Additional Information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/lib/_shape_base.py`
Content:
```
1 from numpy.lib import index_tricks
2
3 import cupy
4 from cupy._core import internal
5
6
7 def apply_along_axis(func1d, axis, arr, *args, **kwargs):
8 """Apply a function to 1-D slices along the given axis.
9
10 Args:
11 func1d (function (M,) -> (Nj...)): This function should accept 1-D
12 arrays. It is applied to 1-D slices of ``arr`` along the specified
13 axis. It must return a 1-D ``cupy.ndarray``.
14 axis (integer): Axis along which ``arr`` is sliced.
15 arr (cupy.ndarray (Ni..., M, Nk...)): Input array.
16 args: Additional arguments for ``func1d``.
17 kwargs: Additional keyword arguments for ``func1d``.
18
19 Returns:
20 cupy.ndarray: The output array. The shape of ``out`` is identical to
21 the shape of ``arr``, except along the ``axis`` dimension. This
22 axis is removed, and replaced with new dimensions equal to the
23 shape of the return value of ``func1d``. So if ``func1d`` returns a
24 scalar ``out`` will have one fewer dimensions than ``arr``.
25
26 .. seealso:: :func:`numpy.apply_along_axis`
27 """
28 ndim = arr.ndim
29 axis = internal._normalize_axis_index(axis, ndim)
30 inarr_view = cupy.moveaxis(arr, axis, -1)
31
32 # compute indices for the iteration axes, and append a trailing ellipsis to
33 # prevent 0d arrays decaying to scalars
34 inds = index_tricks.ndindex(inarr_view.shape[:-1])
35 inds = (ind + (Ellipsis,) for ind in inds)
36
37 # invoke the function on the first item
38 try:
39 ind0 = next(inds)
40 except StopIteration:
41 raise ValueError(
42 'Cannot apply_along_axis when any iteration dimensions are 0'
43 )
44 res = func1d(inarr_view[ind0], *args, **kwargs)
45 if cupy.isscalar(res):
46 # scalar outputs need to be transfered to a device ndarray
47 res = cupy.asarray(res)
48
49 # build a buffer for storing evaluations of func1d.
50 # remove the requested axis, and add the new ones on the end.
51 # laid out so that each write is contiguous.
52 # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
53 buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)
54
55 # save the first result, then compute and save all remaining results
56 buff[ind0] = res
57 for ind in inds:
58 buff[ind] = func1d(inarr_view[ind], *args, **kwargs)
59
60 # restore the inserted axes back to where they belong
61 for i in range(res.ndim):
62 buff = cupy.moveaxis(buff, -1, axis)
63
64 return buff
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/lib/_shape_base.py b/cupy/lib/_shape_base.py
--- a/cupy/lib/_shape_base.py
+++ b/cupy/lib/_shape_base.py
@@ -42,9 +42,7 @@
'Cannot apply_along_axis when any iteration dimensions are 0'
)
res = func1d(inarr_view[ind0], *args, **kwargs)
- if cupy.isscalar(res):
- # scalar outputs need to be transfered to a device ndarray
- res = cupy.asarray(res)
+ res = cupy.asarray(res)
# build a buffer for storing evaluations of func1d.
# remove the requested axis, and add the new ones on the end.
@@ -55,7 +53,8 @@
# save the first result, then compute and save all remaining results
buff[ind0] = res
for ind in inds:
- buff[ind] = func1d(inarr_view[ind], *args, **kwargs)
+ out = func1d(inarr_view[ind], *args, **kwargs)
+ buff[ind] = cupy.asarray(out)
# restore the inserted axes back to where they belong
for i in range(res.ndim):
| {"golden_diff": "diff --git a/cupy/lib/_shape_base.py b/cupy/lib/_shape_base.py\n--- a/cupy/lib/_shape_base.py\n+++ b/cupy/lib/_shape_base.py\n@@ -42,9 +42,7 @@\n 'Cannot apply_along_axis when any iteration dimensions are 0'\n )\n res = func1d(inarr_view[ind0], *args, **kwargs)\n- if cupy.isscalar(res):\n- # scalar outputs need to be transfered to a device ndarray\n- res = cupy.asarray(res)\n+ res = cupy.asarray(res)\n \n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n@@ -55,7 +53,8 @@\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n- buff[ind] = func1d(inarr_view[ind], *args, **kwargs)\n+ out = func1d(inarr_view[ind], *args, **kwargs)\n+ buff[ind] = cupy.asarray(out)\n \n # restore the inserted axes back to where they belong\n for i in range(res.ndim):\n", "issue": "cupy.apply_along_axis failed with cupy.nonzero\n### Description\r\n\r\ncp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]])) - failed with error\r\n\r\n*** AttributeError: 'tuple' object has no attribute 'shape'\r\n\r\nnp.apply_along_axis(np.nonzero, 1, np.array([[1,2],[2,3]])) - is OK\r\n\r\nUPDATE. Problem in _shape_base.py. \r\nline 53:\r\nbuff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)\r\n\r\nres - is tuple (as a result of cp.nonzero(1d array) of single cupy-array, so line 44 ( if cupy.isscalar(res):) doesnt convert it from tuple to cupy-array\r\n\r\nas a temporal solution is possible to use \"buffer-like\" function\r\ndef cupy_nonzero (a):\r\n return cp.nonzero(a)[0]\r\n\r\n### To Reproduce\r\n\r\n```py\r\ncp.apply_along_axis(cp.nonzero, 1, cp.array([[1,2],[2,3]]))\r\n```\r\n\r\n\r\n### Installation\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n```\r\n# Paste the output here\r\n```\r\n\r\n\r\n### Additional Information\r\n\r\n_No response_\n", "before_files": [{"content": "from numpy.lib import index_tricks\n\nimport cupy\nfrom cupy._core import internal\n\n\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n \"\"\"Apply a function to 1-D slices along the given axis.\n\n Args:\n func1d (function (M,) -> (Nj...)): This function should accept 1-D\n arrays. It is applied to 1-D slices of ``arr`` along the specified\n axis. It must return a 1-D ``cupy.ndarray``.\n axis (integer): Axis along which ``arr`` is sliced.\n arr (cupy.ndarray (Ni..., M, Nk...)): Input array.\n args: Additional arguments for ``func1d``.\n kwargs: Additional keyword arguments for ``func1d``.\n\n Returns:\n cupy.ndarray: The output array. The shape of ``out`` is identical to\n the shape of ``arr``, except along the ``axis`` dimension. This\n axis is removed, and replaced with new dimensions equal to the\n shape of the return value of ``func1d``. So if ``func1d`` returns a\n scalar ``out`` will have one fewer dimensions than ``arr``.\n\n .. seealso:: :func:`numpy.apply_along_axis`\n \"\"\"\n ndim = arr.ndim\n axis = internal._normalize_axis_index(axis, ndim)\n inarr_view = cupy.moveaxis(arr, axis, -1)\n\n # compute indices for the iteration axes, and append a trailing ellipsis to\n # prevent 0d arrays decaying to scalars\n inds = index_tricks.ndindex(inarr_view.shape[:-1])\n inds = (ind + (Ellipsis,) for ind in inds)\n\n # invoke the function on the first item\n try:\n ind0 = next(inds)\n except StopIteration:\n raise ValueError(\n 'Cannot apply_along_axis when any iteration dimensions are 0'\n )\n res = func1d(inarr_view[ind0], *args, **kwargs)\n if cupy.isscalar(res):\n # scalar outputs need to be transfered to a device ndarray\n res = cupy.asarray(res)\n\n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n # laid out so that each write is contiguous.\n # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])\n buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)\n\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n buff[ind] = func1d(inarr_view[ind], *args, **kwargs)\n\n # restore the inserted axes back to where they belong\n for i in range(res.ndim):\n buff = cupy.moveaxis(buff, -1, axis)\n\n return buff\n", "path": "cupy/lib/_shape_base.py"}], "after_files": [{"content": "from numpy.lib import index_tricks\n\nimport cupy\nfrom cupy._core import internal\n\n\ndef apply_along_axis(func1d, axis, arr, *args, **kwargs):\n \"\"\"Apply a function to 1-D slices along the given axis.\n\n Args:\n func1d (function (M,) -> (Nj...)): This function should accept 1-D\n arrays. It is applied to 1-D slices of ``arr`` along the specified\n axis. It must return a 1-D ``cupy.ndarray``.\n axis (integer): Axis along which ``arr`` is sliced.\n arr (cupy.ndarray (Ni..., M, Nk...)): Input array.\n args: Additional arguments for ``func1d``.\n kwargs: Additional keyword arguments for ``func1d``.\n\n Returns:\n cupy.ndarray: The output array. The shape of ``out`` is identical to\n the shape of ``arr``, except along the ``axis`` dimension. This\n axis is removed, and replaced with new dimensions equal to the\n shape of the return value of ``func1d``. So if ``func1d`` returns a\n scalar ``out`` will have one fewer dimensions than ``arr``.\n\n .. seealso:: :func:`numpy.apply_along_axis`\n \"\"\"\n ndim = arr.ndim\n axis = internal._normalize_axis_index(axis, ndim)\n inarr_view = cupy.moveaxis(arr, axis, -1)\n\n # compute indices for the iteration axes, and append a trailing ellipsis to\n # prevent 0d arrays decaying to scalars\n inds = index_tricks.ndindex(inarr_view.shape[:-1])\n inds = (ind + (Ellipsis,) for ind in inds)\n\n # invoke the function on the first item\n try:\n ind0 = next(inds)\n except StopIteration:\n raise ValueError(\n 'Cannot apply_along_axis when any iteration dimensions are 0'\n )\n res = func1d(inarr_view[ind0], *args, **kwargs)\n res = cupy.asarray(res)\n\n # build a buffer for storing evaluations of func1d.\n # remove the requested axis, and add the new ones on the end.\n # laid out so that each write is contiguous.\n # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])\n buff = cupy.empty(inarr_view.shape[:-1] + res.shape, res.dtype)\n\n # save the first result, then compute and save all remaining results\n buff[ind0] = res\n for ind in inds:\n out = func1d(inarr_view[ind], *args, **kwargs)\n buff[ind] = cupy.asarray(out)\n\n # restore the inserted axes back to where they belong\n for i in range(res.ndim):\n buff = cupy.moveaxis(buff, -1, axis)\n\n return buff\n", "path": "cupy/lib/_shape_base.py"}]} | 1,292 | 271 |
gh_patches_debug_27156 | rasdani/github-patches | git_diff | falconry__falcon-364 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Not all modules are cythonized
Missing some modules, such as hooks and those in the util package.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import imp
2 import io
3 import sys
4 from os import path
5 from setuptools import setup, find_packages, Extension
6
7 VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
8 VERSION = VERSION.__version__
9
10 # NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3
11 # TODO(kgriffs): Fork and optimize/modernize python-mimeparse
12 REQUIRES = ['six', 'python-mimeparse']
13
14 PYPY = True
15 CYTHON = False
16 try:
17 sys.pypy_version_info
18 except AttributeError:
19 PYPY = False
20
21 if not PYPY:
22 try:
23 from Cython.Distutils import build_ext
24 CYTHON = True
25 except ImportError:
26 print('\nWARNING: Cython not installed. '
27 'Falcon will still work fine, but may run '
28 'a bit slower.\n')
29 CYTHON = False
30
31 if CYTHON:
32 ext_names = (
33 'api',
34 'api_helpers',
35 'errors',
36 'http_error',
37 'request',
38 'request_helpers',
39 'responders',
40 'response',
41 'response_helpers',
42 )
43
44 cmdclass = {'build_ext': build_ext}
45 ext_modules = [
46 Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
47 for ext in ext_names]
48 else:
49 cmdclass = {}
50 ext_modules = []
51
52 setup(
53 name='falcon',
54 version=VERSION,
55 description='An unladen web framework for building APIs and app backends.',
56 long_description=io.open('README.rst', 'r', encoding='utf-8').read(),
57 classifiers=[
58 'Development Status :: 5 - Production/Stable',
59 'Environment :: Web Environment',
60 'Natural Language :: English',
61 'Intended Audience :: Developers',
62 'Intended Audience :: System Administrators',
63 'License :: OSI Approved :: Apache Software License',
64 'Operating System :: MacOS :: MacOS X',
65 'Operating System :: Microsoft :: Windows',
66 'Operating System :: POSIX',
67 'Topic :: Internet :: WWW/HTTP :: WSGI',
68 'Topic :: Software Development :: Libraries :: Application Frameworks',
69 'Programming Language :: Python',
70 'Programming Language :: Python :: Implementation :: CPython',
71 'Programming Language :: Python :: Implementation :: PyPy',
72 'Programming Language :: Python :: 2.6',
73 'Programming Language :: Python :: 2.7',
74 'Programming Language :: Python :: 3.3',
75 'Programming Language :: Python :: 3.4',
76 ],
77 keywords='wsgi web api framework rest http cloud',
78 author='Kurt Griffiths',
79 author_email='[email protected]',
80 url='http://falconframework.org',
81 license='Apache 2.0',
82 packages=find_packages(exclude=['tests']),
83 include_package_data=True,
84 zip_safe=False,
85 install_requires=REQUIRES,
86 setup_requires=[],
87 cmdclass=cmdclass,
88 ext_modules=ext_modules,
89 test_suite='nose.collector',
90 entry_points={
91 'console_scripts': [
92 'falcon-bench = falcon.cmd.bench:main'
93 ]
94 }
95 )
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,8 +1,12 @@
+import glob
import imp
import io
-import sys
+import os
from os import path
from setuptools import setup, find_packages, Extension
+import sys
+
+MYDIR = path.abspath(os.path.dirname(__file__))
VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))
VERSION = VERSION.__version__
@@ -29,22 +33,29 @@
CYTHON = False
if CYTHON:
- ext_names = (
- 'api',
- 'api_helpers',
- 'errors',
- 'http_error',
- 'request',
- 'request_helpers',
- 'responders',
- 'response',
- 'response_helpers',
- )
+ def list_modules(dirname):
+ filenames = glob.glob(path.join(dirname, '*.py'))
+
+ module_names = []
+ for name in filenames:
+ module, ext = path.splitext(path.basename(name))
+ if module != '__init__':
+ module_names.append(module)
+
+ return module_names
- cmdclass = {'build_ext': build_ext}
ext_modules = [
Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])
- for ext in ext_names]
+ for ext in list_modules(path.join(MYDIR, 'falcon'))]
+
+ ext_modules += [
+ Extension('falcon.util.' + ext,
+ [path.join('falcon', 'util', ext + '.py')])
+
+ for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]
+
+ cmdclass = {'build_ext': build_ext}
+
else:
cmdclass = {}
ext_modules = []
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,8 +1,12 @@\n+import glob\n import imp\n import io\n-import sys\n+import os\n from os import path\n from setuptools import setup, find_packages, Extension\n+import sys\n+\n+MYDIR = path.abspath(os.path.dirname(__file__))\n \n VERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\n VERSION = VERSION.__version__\n@@ -29,22 +33,29 @@\n CYTHON = False\n \n if CYTHON:\n- ext_names = (\n- 'api',\n- 'api_helpers',\n- 'errors',\n- 'http_error',\n- 'request',\n- 'request_helpers',\n- 'responders',\n- 'response',\n- 'response_helpers',\n- )\n+ def list_modules(dirname):\n+ filenames = glob.glob(path.join(dirname, '*.py'))\n+\n+ module_names = []\n+ for name in filenames:\n+ module, ext = path.splitext(path.basename(name))\n+ if module != '__init__':\n+ module_names.append(module)\n+\n+ return module_names\n \n- cmdclass = {'build_ext': build_ext}\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n- for ext in ext_names]\n+ for ext in list_modules(path.join(MYDIR, 'falcon'))]\n+\n+ ext_modules += [\n+ Extension('falcon.util.' + ext,\n+ [path.join('falcon', 'util', ext + '.py')])\n+\n+ for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]\n+\n+ cmdclass = {'build_ext': build_ext}\n+\n else:\n cmdclass = {}\n ext_modules = []\n", "issue": "Not all modules are cythonized\nMissing some modules, such as hooks and those in the util package.\n\n", "before_files": [{"content": "import imp\nimport io\nimport sys\nfrom os import path\nfrom setuptools import setup, find_packages, Extension\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3\n# TODO(kgriffs): Fork and optimize/modernize python-mimeparse\nREQUIRES = ['six', 'python-mimeparse']\n\nPYPY = True\nCYTHON = False\ntry:\n sys.pypy_version_info\nexcept AttributeError:\n PYPY = False\n\nif not PYPY:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n print('\\nWARNING: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n ext_names = (\n 'api',\n 'api_helpers',\n 'errors',\n 'http_error',\n 'request',\n 'request_helpers',\n 'responders',\n 'response',\n 'response_helpers',\n )\n\n cmdclass = {'build_ext': build_ext}\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n for ext in ext_names]\nelse:\n cmdclass = {}\n ext_modules = []\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=io.open('README.rst', 'r', encoding='utf-8').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIRES,\n setup_requires=[],\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n test_suite='nose.collector',\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main'\n ]\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "import glob\nimport imp\nimport io\nimport os\nfrom os import path\nfrom setuptools import setup, find_packages, Extension\nimport sys\n\nMYDIR = path.abspath(os.path.dirname(__file__))\n\nVERSION = imp.load_source('version', path.join('.', 'falcon', 'version.py'))\nVERSION = VERSION.__version__\n\n# NOTE(kgriffs): python-mimeparse is newer than mimeparse, supports Py3\n# TODO(kgriffs): Fork and optimize/modernize python-mimeparse\nREQUIRES = ['six', 'python-mimeparse']\n\nPYPY = True\nCYTHON = False\ntry:\n sys.pypy_version_info\nexcept AttributeError:\n PYPY = False\n\nif not PYPY:\n try:\n from Cython.Distutils import build_ext\n CYTHON = True\n except ImportError:\n print('\\nWARNING: Cython not installed. '\n 'Falcon will still work fine, but may run '\n 'a bit slower.\\n')\n CYTHON = False\n\nif CYTHON:\n def list_modules(dirname):\n filenames = glob.glob(path.join(dirname, '*.py'))\n\n module_names = []\n for name in filenames:\n module, ext = path.splitext(path.basename(name))\n if module != '__init__':\n module_names.append(module)\n\n return module_names\n\n ext_modules = [\n Extension('falcon.' + ext, [path.join('falcon', ext + '.py')])\n for ext in list_modules(path.join(MYDIR, 'falcon'))]\n\n ext_modules += [\n Extension('falcon.util.' + ext,\n [path.join('falcon', 'util', ext + '.py')])\n\n for ext in list_modules(path.join(MYDIR, 'falcon', 'util'))]\n\n cmdclass = {'build_ext': build_ext}\n\nelse:\n cmdclass = {}\n ext_modules = []\n\nsetup(\n name='falcon',\n version=VERSION,\n description='An unladen web framework for building APIs and app backends.',\n long_description=io.open('README.rst', 'r', encoding='utf-8').read(),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Web Environment',\n 'Natural Language :: English',\n 'Intended Audience :: Developers',\n 'Intended Audience :: System Administrators',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Topic :: Internet :: WWW/HTTP :: WSGI',\n 'Topic :: Software Development :: Libraries :: Application Frameworks',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Programming Language :: Python :: 2.6',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n ],\n keywords='wsgi web api framework rest http cloud',\n author='Kurt Griffiths',\n author_email='[email protected]',\n url='http://falconframework.org',\n license='Apache 2.0',\n packages=find_packages(exclude=['tests']),\n include_package_data=True,\n zip_safe=False,\n install_requires=REQUIRES,\n setup_requires=[],\n cmdclass=cmdclass,\n ext_modules=ext_modules,\n test_suite='nose.collector',\n entry_points={\n 'console_scripts': [\n 'falcon-bench = falcon.cmd.bench:main'\n ]\n }\n)\n", "path": "setup.py"}]} | 1,151 | 404 |
gh_patches_debug_34750 | rasdani/github-patches | git_diff | plotly__dash-2859 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Patch += operation not working on the patch object.
If you have a patch object for a number prop and try to do `patched += 1`, the callback will fail with an error, `returned non serializable object`.
Example:
```
app = Dash()
app.layout = [dcc.Store(data=0, store="store"), html.Button("click", id="click")]
@app.callback(Output("store", "data"), Input("click", "n_clicks"))
def on_click(_):
patched = Patch()
patched += 1
return patched
```
Clicking on the button result in the error, it should instead update the store data prop to +1.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dash/_patch.py`
Content:
```
1 def _operation(name, location, **kwargs):
2 return {"operation": name, "location": location, "params": dict(**kwargs)}
3
4
5 _noop = object()
6
7
8 def validate_slice(obj):
9 if isinstance(obj, slice):
10 raise TypeError("a slice is not a valid index for patch")
11
12
13 class Patch:
14 """
15 Patch a callback output value
16
17 Act like a proxy of the output prop value on the frontend.
18
19 Supported prop types: Dictionaries and lists.
20 """
21
22 def __init__(self, location=None, parent=None):
23 if location is not None:
24 self._location = location
25 else:
26 # pylint: disable=consider-using-ternary
27 self._location = (parent and parent._location) or []
28 if parent is not None:
29 self._operations = parent._operations
30 else:
31 self._operations = []
32
33 def __getstate__(self):
34 return vars(self)
35
36 def __setstate__(self, state):
37 vars(self).update(state)
38
39 def __getitem__(self, item):
40 validate_slice(item)
41 return Patch(location=self._location + [item], parent=self)
42
43 def __getattr__(self, item):
44 if item == "tolist":
45 # to_json fix
46 raise AttributeError
47 if item == "_location":
48 return self._location
49 if item == "_operations":
50 return self._operations
51 return self.__getitem__(item)
52
53 def __setattr__(self, key, value):
54 if key in ("_location", "_operations"):
55 self.__dict__[key] = value
56 else:
57 self.__setitem__(key, value)
58
59 def __delattr__(self, item):
60 self.__delitem__(item)
61
62 def __setitem__(self, key, value):
63 validate_slice(key)
64 if value is _noop:
65 # The += set themselves.
66 return
67 self._operations.append(
68 _operation(
69 "Assign",
70 self._location + [key],
71 value=value,
72 )
73 )
74
75 def __delitem__(self, key):
76 validate_slice(key)
77 self._operations.append(_operation("Delete", self._location + [key]))
78
79 def __iadd__(self, other):
80 if isinstance(other, (list, tuple)):
81 self.extend(other)
82 else:
83 self._operations.append(_operation("Add", self._location, value=other))
84 return _noop
85
86 def __isub__(self, other):
87 self._operations.append(_operation("Sub", self._location, value=other))
88 return _noop
89
90 def __imul__(self, other):
91 self._operations.append(_operation("Mul", self._location, value=other))
92 return _noop
93
94 def __itruediv__(self, other):
95 self._operations.append(_operation("Div", self._location, value=other))
96 return _noop
97
98 def __ior__(self, other):
99 self.update(E=other)
100 return _noop
101
102 def __iter__(self):
103 raise TypeError("Patch objects are write-only, you cannot iterate them.")
104
105 def __repr__(self):
106 return f"<write-only dash.Patch object at {self._location}>"
107
108 def append(self, item):
109 """Add the item to the end of a list"""
110 self._operations.append(_operation("Append", self._location, value=item))
111
112 def prepend(self, item):
113 """Add the item to the start of a list"""
114 self._operations.append(_operation("Prepend", self._location, value=item))
115
116 def insert(self, index, item):
117 """Add the item at the index of a list"""
118 self._operations.append(
119 _operation("Insert", self._location, value=item, index=index)
120 )
121
122 def clear(self):
123 """Remove all items in a list"""
124 self._operations.append(_operation("Clear", self._location))
125
126 def reverse(self):
127 """Reversal of the order of items in a list"""
128 self._operations.append(_operation("Reverse", self._location))
129
130 def extend(self, item):
131 """Add all the items to the end of a list"""
132 if not isinstance(item, (list, tuple)):
133 raise TypeError(f"{item} should be a list or tuple")
134 self._operations.append(_operation("Extend", self._location, value=item))
135
136 def remove(self, item):
137 """filter the item out of a list on the frontend"""
138 self._operations.append(_operation("Remove", self._location, value=item))
139
140 def update(self, E=None, **F):
141 """Merge a dict or keyword arguments with another dictionary"""
142 value = E or {}
143 value.update(F)
144 self._operations.append(_operation("Merge", self._location, value=value))
145
146 # pylint: disable=no-self-use
147 def sort(self):
148 raise KeyError(
149 "sort is reserved for future use, use brackets to access this key on your object"
150 )
151
152 def to_plotly_json(self):
153 return {
154 "__dash_patch_update": "__dash_patch_update",
155 "operations": self._operations,
156 }
157
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dash/_patch.py b/dash/_patch.py
--- a/dash/_patch.py
+++ b/dash/_patch.py
@@ -36,18 +36,18 @@
def __setstate__(self, state):
vars(self).update(state)
- def __getitem__(self, item):
+ def __getitem__(self, item) -> "Patch":
validate_slice(item)
return Patch(location=self._location + [item], parent=self)
- def __getattr__(self, item):
+ def __getattr__(self, item) -> "Patch":
if item == "tolist":
# to_json fix
raise AttributeError
if item == "_location":
- return self._location
+ return self._location # type: ignore
if item == "_operations":
- return self._operations
+ return self._operations # type: ignore
return self.__getitem__(item)
def __setattr__(self, key, value):
@@ -81,22 +81,32 @@
self.extend(other)
else:
self._operations.append(_operation("Add", self._location, value=other))
+ if not self._location:
+ return self
return _noop
def __isub__(self, other):
self._operations.append(_operation("Sub", self._location, value=other))
+ if not self._location:
+ return self
return _noop
def __imul__(self, other):
self._operations.append(_operation("Mul", self._location, value=other))
+ if not self._location:
+ return self
return _noop
def __itruediv__(self, other):
self._operations.append(_operation("Div", self._location, value=other))
+ if not self._location:
+ return self
return _noop
def __ior__(self, other):
self.update(E=other)
+ if not self._location:
+ return self
return _noop
def __iter__(self):
| {"golden_diff": "diff --git a/dash/_patch.py b/dash/_patch.py\n--- a/dash/_patch.py\n+++ b/dash/_patch.py\n@@ -36,18 +36,18 @@\n def __setstate__(self, state):\n vars(self).update(state)\n \n- def __getitem__(self, item):\n+ def __getitem__(self, item) -> \"Patch\":\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n \n- def __getattr__(self, item):\n+ def __getattr__(self, item) -> \"Patch\":\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n- return self._location\n+ return self._location # type: ignore\n if item == \"_operations\":\n- return self._operations\n+ return self._operations # type: ignore\n return self.__getitem__(item)\n \n def __setattr__(self, key, value):\n@@ -81,22 +81,32 @@\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n+ if not self._location:\n+ return self\n return _noop\n \n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n+ if not self._location:\n+ return self\n return _noop\n \n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n+ if not self._location:\n+ return self\n return _noop\n \n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n+ if not self._location:\n+ return self\n return _noop\n \n def __ior__(self, other):\n self.update(E=other)\n+ if not self._location:\n+ return self\n return _noop\n \n def __iter__(self):\n", "issue": "[BUG] Patch += operation not working on the patch object.\nIf you have a patch object for a number prop and try to do `patched += 1`, the callback will fail with an error, `returned non serializable object`.\r\n\r\nExample:\r\n```\r\napp = Dash()\r\napp.layout = [dcc.Store(data=0, store=\"store\"), html.Button(\"click\", id=\"click\")]\r\[email protected](Output(\"store\", \"data\"), Input(\"click\", \"n_clicks\"))\r\ndef on_click(_):\r\n patched = Patch()\r\n patched += 1\r\n return patched\r\n```\r\n\r\nClicking on the button result in the error, it should instead update the store data prop to +1.\n", "before_files": [{"content": "def _operation(name, location, **kwargs):\n return {\"operation\": name, \"location\": location, \"params\": dict(**kwargs)}\n\n\n_noop = object()\n\n\ndef validate_slice(obj):\n if isinstance(obj, slice):\n raise TypeError(\"a slice is not a valid index for patch\")\n\n\nclass Patch:\n \"\"\"\n Patch a callback output value\n\n Act like a proxy of the output prop value on the frontend.\n\n Supported prop types: Dictionaries and lists.\n \"\"\"\n\n def __init__(self, location=None, parent=None):\n if location is not None:\n self._location = location\n else:\n # pylint: disable=consider-using-ternary\n self._location = (parent and parent._location) or []\n if parent is not None:\n self._operations = parent._operations\n else:\n self._operations = []\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n\n def __getitem__(self, item):\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n\n def __getattr__(self, item):\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n return self._location\n if item == \"_operations\":\n return self._operations\n return self.__getitem__(item)\n\n def __setattr__(self, key, value):\n if key in (\"_location\", \"_operations\"):\n self.__dict__[key] = value\n else:\n self.__setitem__(key, value)\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __setitem__(self, key, value):\n validate_slice(key)\n if value is _noop:\n # The += set themselves.\n return\n self._operations.append(\n _operation(\n \"Assign\",\n self._location + [key],\n value=value,\n )\n )\n\n def __delitem__(self, key):\n validate_slice(key)\n self._operations.append(_operation(\"Delete\", self._location + [key]))\n\n def __iadd__(self, other):\n if isinstance(other, (list, tuple)):\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n return _noop\n\n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n return _noop\n\n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n return _noop\n\n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n return _noop\n\n def __ior__(self, other):\n self.update(E=other)\n return _noop\n\n def __iter__(self):\n raise TypeError(\"Patch objects are write-only, you cannot iterate them.\")\n\n def __repr__(self):\n return f\"<write-only dash.Patch object at {self._location}>\"\n\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n\n def prepend(self, item):\n \"\"\"Add the item to the start of a list\"\"\"\n self._operations.append(_operation(\"Prepend\", self._location, value=item))\n\n def insert(self, index, item):\n \"\"\"Add the item at the index of a list\"\"\"\n self._operations.append(\n _operation(\"Insert\", self._location, value=item, index=index)\n )\n\n def clear(self):\n \"\"\"Remove all items in a list\"\"\"\n self._operations.append(_operation(\"Clear\", self._location))\n\n def reverse(self):\n \"\"\"Reversal of the order of items in a list\"\"\"\n self._operations.append(_operation(\"Reverse\", self._location))\n\n def extend(self, item):\n \"\"\"Add all the items to the end of a list\"\"\"\n if not isinstance(item, (list, tuple)):\n raise TypeError(f\"{item} should be a list or tuple\")\n self._operations.append(_operation(\"Extend\", self._location, value=item))\n\n def remove(self, item):\n \"\"\"filter the item out of a list on the frontend\"\"\"\n self._operations.append(_operation(\"Remove\", self._location, value=item))\n\n def update(self, E=None, **F):\n \"\"\"Merge a dict or keyword arguments with another dictionary\"\"\"\n value = E or {}\n value.update(F)\n self._operations.append(_operation(\"Merge\", self._location, value=value))\n\n # pylint: disable=no-self-use\n def sort(self):\n raise KeyError(\n \"sort is reserved for future use, use brackets to access this key on your object\"\n )\n\n def to_plotly_json(self):\n return {\n \"__dash_patch_update\": \"__dash_patch_update\",\n \"operations\": self._operations,\n }\n", "path": "dash/_patch.py"}], "after_files": [{"content": "def _operation(name, location, **kwargs):\n return {\"operation\": name, \"location\": location, \"params\": dict(**kwargs)}\n\n\n_noop = object()\n\n\ndef validate_slice(obj):\n if isinstance(obj, slice):\n raise TypeError(\"a slice is not a valid index for patch\")\n\n\nclass Patch:\n \"\"\"\n Patch a callback output value\n\n Act like a proxy of the output prop value on the frontend.\n\n Supported prop types: Dictionaries and lists.\n \"\"\"\n\n def __init__(self, location=None, parent=None):\n if location is not None:\n self._location = location\n else:\n # pylint: disable=consider-using-ternary\n self._location = (parent and parent._location) or []\n if parent is not None:\n self._operations = parent._operations\n else:\n self._operations = []\n\n def __getstate__(self):\n return vars(self)\n\n def __setstate__(self, state):\n vars(self).update(state)\n\n def __getitem__(self, item) -> \"Patch\":\n validate_slice(item)\n return Patch(location=self._location + [item], parent=self)\n\n def __getattr__(self, item) -> \"Patch\":\n if item == \"tolist\":\n # to_json fix\n raise AttributeError\n if item == \"_location\":\n return self._location # type: ignore\n if item == \"_operations\":\n return self._operations # type: ignore\n return self.__getitem__(item)\n\n def __setattr__(self, key, value):\n if key in (\"_location\", \"_operations\"):\n self.__dict__[key] = value\n else:\n self.__setitem__(key, value)\n\n def __delattr__(self, item):\n self.__delitem__(item)\n\n def __setitem__(self, key, value):\n validate_slice(key)\n if value is _noop:\n # The += set themselves.\n return\n self._operations.append(\n _operation(\n \"Assign\",\n self._location + [key],\n value=value,\n )\n )\n\n def __delitem__(self, key):\n validate_slice(key)\n self._operations.append(_operation(\"Delete\", self._location + [key]))\n\n def __iadd__(self, other):\n if isinstance(other, (list, tuple)):\n self.extend(other)\n else:\n self._operations.append(_operation(\"Add\", self._location, value=other))\n if not self._location:\n return self\n return _noop\n\n def __isub__(self, other):\n self._operations.append(_operation(\"Sub\", self._location, value=other))\n if not self._location:\n return self\n return _noop\n\n def __imul__(self, other):\n self._operations.append(_operation(\"Mul\", self._location, value=other))\n if not self._location:\n return self\n return _noop\n\n def __itruediv__(self, other):\n self._operations.append(_operation(\"Div\", self._location, value=other))\n if not self._location:\n return self\n return _noop\n\n def __ior__(self, other):\n self.update(E=other)\n if not self._location:\n return self\n return _noop\n\n def __iter__(self):\n raise TypeError(\"Patch objects are write-only, you cannot iterate them.\")\n\n def __repr__(self):\n return f\"<write-only dash.Patch object at {self._location}>\"\n\n def append(self, item):\n \"\"\"Add the item to the end of a list\"\"\"\n self._operations.append(_operation(\"Append\", self._location, value=item))\n\n def prepend(self, item):\n \"\"\"Add the item to the start of a list\"\"\"\n self._operations.append(_operation(\"Prepend\", self._location, value=item))\n\n def insert(self, index, item):\n \"\"\"Add the item at the index of a list\"\"\"\n self._operations.append(\n _operation(\"Insert\", self._location, value=item, index=index)\n )\n\n def clear(self):\n \"\"\"Remove all items in a list\"\"\"\n self._operations.append(_operation(\"Clear\", self._location))\n\n def reverse(self):\n \"\"\"Reversal of the order of items in a list\"\"\"\n self._operations.append(_operation(\"Reverse\", self._location))\n\n def extend(self, item):\n \"\"\"Add all the items to the end of a list\"\"\"\n if not isinstance(item, (list, tuple)):\n raise TypeError(f\"{item} should be a list or tuple\")\n self._operations.append(_operation(\"Extend\", self._location, value=item))\n\n def remove(self, item):\n \"\"\"filter the item out of a list on the frontend\"\"\"\n self._operations.append(_operation(\"Remove\", self._location, value=item))\n\n def update(self, E=None, **F):\n \"\"\"Merge a dict or keyword arguments with another dictionary\"\"\"\n value = E or {}\n value.update(F)\n self._operations.append(_operation(\"Merge\", self._location, value=value))\n\n # pylint: disable=no-self-use\n def sort(self):\n raise KeyError(\n \"sort is reserved for future use, use brackets to access this key on your object\"\n )\n\n def to_plotly_json(self):\n return {\n \"__dash_patch_update\": \"__dash_patch_update\",\n \"operations\": self._operations,\n }\n", "path": "dash/_patch.py"}]} | 1,874 | 460 |
gh_patches_debug_286 | rasdani/github-patches | git_diff | Mailu__Mailu-2049 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fetchmail: /var/lib/fetchmail needs persistence
According [fetchmail documentation](https://www.fetchmail.info/fetchmail-man.html#12), an `.idfile` is used to keep track of previously downloaded messages. Shouldn't that file persistent over container restarts?
I'm not a Fetchmail user, perhaps somebody can shine a light on how this currently works?
cc: @Nebukadneza, @hoellen, @kaiyou
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optional/fetchmail/fetchmail.py`
Content:
```
1 #!/usr/bin/python3
2
3 import time
4 import os
5 import tempfile
6 import shlex
7 import subprocess
8 import re
9 import requests
10 import sys
11 import traceback
12
13
14 FETCHMAIL = """
15 fetchmail -N \
16 --sslcertck --sslcertpath /etc/ssl/certs \
17 -f {}
18 """
19
20
21 RC_LINE = """
22 poll "{host}" proto {protocol} port {port}
23 user "{username}" password "{password}"
24 is "{user_email}"
25 smtphost "{smtphost}"
26 {options}
27 """
28
29
30 def extract_host_port(host_and_port, default_port):
31 host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()
32 return host, int(port) if port else default_port
33
34
35 def escape_rc_string(arg):
36 return "".join("\\x%2x" % ord(char) for char in arg)
37
38
39 def fetchmail(fetchmailrc):
40 with tempfile.NamedTemporaryFile() as handler:
41 handler.write(fetchmailrc.encode("utf8"))
42 handler.flush()
43 command = FETCHMAIL.format(shlex.quote(handler.name))
44 output = subprocess.check_output(command, shell=True)
45 return output
46
47
48 def run(debug):
49 try:
50 fetches = requests.get("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch").json()
51 smtphost, smtpport = extract_host_port(os.environ.get("HOST_SMTP", "smtp"), None)
52 if smtpport is None:
53 smtphostport = smtphost
54 else:
55 smtphostport = "%s/%d" % (smtphost, smtpport)
56 for fetch in fetches:
57 fetchmailrc = ""
58 options = "options antispam 501, 504, 550, 553, 554"
59 options += " ssl" if fetch["tls"] else ""
60 options += " keep" if fetch["keep"] else " fetchall"
61 fetchmailrc += RC_LINE.format(
62 user_email=escape_rc_string(fetch["user_email"]),
63 protocol=fetch["protocol"],
64 host=escape_rc_string(fetch["host"]),
65 port=fetch["port"],
66 smtphost=smtphostport,
67 username=escape_rc_string(fetch["username"]),
68 password=escape_rc_string(fetch["password"]),
69 options=options
70 )
71 if debug:
72 print(fetchmailrc)
73 try:
74 print(fetchmail(fetchmailrc))
75 error_message = ""
76 except subprocess.CalledProcessError as error:
77 error_message = error.output.decode("utf8")
78 # No mail is not an error
79 if not error_message.startswith("fetchmail: No mail"):
80 print(error_message)
81 user_info = "for %s at %s" % (fetch["user_email"], fetch["host"])
82 # Number of messages seen is not a error as well
83 if ("messages" in error_message and
84 "(seen " in error_message and
85 user_info in error_message):
86 print(error_message)
87 finally:
88 requests.post("http://" + os.environ.get("HOST_ADMIN", "admin") + "/internal/fetch/{}".format(fetch["id"]),
89 json=error_message.split("\n")[0]
90 )
91 except Exception:
92 traceback.print_exc()
93
94
95 if __name__ == "__main__":
96 while True:
97 time.sleep(int(os.environ.get("FETCHMAIL_DELAY", 60)))
98 run(os.environ.get("DEBUG", None) == "True")
99 sys.stdout.flush()
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py
--- a/optional/fetchmail/fetchmail.py
+++ b/optional/fetchmail/fetchmail.py
@@ -13,6 +13,7 @@
FETCHMAIL = """
fetchmail -N \
+ --idfile /data/fetchids --uidl \
--sslcertck --sslcertpath /etc/ssl/certs \
-f {}
"""
| {"golden_diff": "diff --git a/optional/fetchmail/fetchmail.py b/optional/fetchmail/fetchmail.py\n--- a/optional/fetchmail/fetchmail.py\n+++ b/optional/fetchmail/fetchmail.py\n@@ -13,6 +13,7 @@\n \n FETCHMAIL = \"\"\"\n fetchmail -N \\\n+ --idfile /data/fetchids --uidl \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n \"\"\"\n", "issue": "Fetchmail: /var/lib/fetchmail needs persistence\nAccording [fetchmail documentation](https://www.fetchmail.info/fetchmail-man.html#12), an `.idfile` is used to keep track of previously downloaded messages. Shouldn't that file persistent over container restarts?\r\n\r\nI'm not a Fetchmail user, perhaps somebody can shine a light on how this currently works?\r\n\r\ncc: @Nebukadneza, @hoellen, @kaiyou \n", "before_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}], "after_files": [{"content": "#!/usr/bin/python3\n\nimport time\nimport os\nimport tempfile\nimport shlex\nimport subprocess\nimport re\nimport requests\nimport sys\nimport traceback\n\n\nFETCHMAIL = \"\"\"\nfetchmail -N \\\n --idfile /data/fetchids --uidl \\\n --sslcertck --sslcertpath /etc/ssl/certs \\\n -f {}\n\"\"\"\n\n\nRC_LINE = \"\"\"\npoll \"{host}\" proto {protocol} port {port}\n user \"{username}\" password \"{password}\"\n is \"{user_email}\"\n smtphost \"{smtphost}\"\n {options}\n\"\"\"\n\n\ndef extract_host_port(host_and_port, default_port):\n host, _, port = re.match('^(.*?)(:([0-9]*))?$', host_and_port).groups()\n return host, int(port) if port else default_port\n\n\ndef escape_rc_string(arg):\n return \"\".join(\"\\\\x%2x\" % ord(char) for char in arg)\n\n\ndef fetchmail(fetchmailrc):\n with tempfile.NamedTemporaryFile() as handler:\n handler.write(fetchmailrc.encode(\"utf8\"))\n handler.flush()\n command = FETCHMAIL.format(shlex.quote(handler.name))\n output = subprocess.check_output(command, shell=True)\n return output\n\n\ndef run(debug):\n try:\n fetches = requests.get(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch\").json()\n smtphost, smtpport = extract_host_port(os.environ.get(\"HOST_SMTP\", \"smtp\"), None)\n if smtpport is None:\n smtphostport = smtphost\n else:\n smtphostport = \"%s/%d\" % (smtphost, smtpport)\n for fetch in fetches:\n fetchmailrc = \"\"\n options = \"options antispam 501, 504, 550, 553, 554\"\n options += \" ssl\" if fetch[\"tls\"] else \"\"\n options += \" keep\" if fetch[\"keep\"] else \" fetchall\"\n fetchmailrc += RC_LINE.format(\n user_email=escape_rc_string(fetch[\"user_email\"]),\n protocol=fetch[\"protocol\"],\n host=escape_rc_string(fetch[\"host\"]),\n port=fetch[\"port\"],\n smtphost=smtphostport,\n username=escape_rc_string(fetch[\"username\"]),\n password=escape_rc_string(fetch[\"password\"]),\n options=options\n )\n if debug:\n print(fetchmailrc)\n try:\n print(fetchmail(fetchmailrc))\n error_message = \"\"\n except subprocess.CalledProcessError as error:\n error_message = error.output.decode(\"utf8\")\n # No mail is not an error\n if not error_message.startswith(\"fetchmail: No mail\"):\n print(error_message)\n user_info = \"for %s at %s\" % (fetch[\"user_email\"], fetch[\"host\"])\n # Number of messages seen is not a error as well\n if (\"messages\" in error_message and\n \"(seen \" in error_message and\n user_info in error_message):\n print(error_message)\n finally:\n requests.post(\"http://\" + os.environ.get(\"HOST_ADMIN\", \"admin\") + \"/internal/fetch/{}\".format(fetch[\"id\"]),\n json=error_message.split(\"\\n\")[0]\n )\n except Exception:\n traceback.print_exc()\n\n\nif __name__ == \"__main__\":\n while True:\n time.sleep(int(os.environ.get(\"FETCHMAIL_DELAY\", 60)))\n run(os.environ.get(\"DEBUG\", None) == \"True\")\n sys.stdout.flush()\n", "path": "optional/fetchmail/fetchmail.py"}]} | 1,320 | 105 |
gh_patches_debug_18158 | rasdani/github-patches | git_diff | openai__gym-1966 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in PixelObservationWrapper
In the pixel_observation.py, we have a bit of code that looks like this:
```
def _add_pixel_observation(self, observation):
if self._pixels_only:
observation = collections.OrderedDict()
elif self._observation_is_dict:
observation = type(observation)(observation)
else:
observation = collections.OrderedDict()
observation[STATE_KEY] = observation
```
If you note, the argument `observation` is being clobbered in the else case, so now the observation dictionary at the STATE_KEY refers to itself instead of the underlying env's observation.
I'm happy to fix this and submit a pull request but I wanted to raise the community's attention to this first.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/wrappers/pixel_observation.py`
Content:
```
1 """An observation wrapper that augments observations by pixel values."""
2
3 import collections
4 import copy
5
6 import numpy as np
7
8 from gym import spaces
9 from gym import ObservationWrapper
10
11 STATE_KEY = 'state'
12
13
14 class PixelObservationWrapper(ObservationWrapper):
15 """Augment observations by pixel values."""
16
17 def __init__(self,
18 env,
19 pixels_only=True,
20 render_kwargs=None,
21 pixel_keys=('pixels', )):
22 """Initializes a new pixel Wrapper.
23
24 Args:
25 env: The environment to wrap.
26 pixels_only: If `True` (default), the original observation returned
27 by the wrapped environment will be discarded, and a dictionary
28 observation will only include pixels. If `False`, the
29 observation dictionary will contain both the original
30 observations and the pixel observations.
31 render_kwargs: Optional `dict` containing keyword arguments passed
32 to the `self.render` method.
33 pixel_keys: Optional custom string specifying the pixel
34 observation's key in the `OrderedDict` of observations.
35 Defaults to 'pixels'.
36
37 Raises:
38 ValueError: If `env`'s observation spec is not compatible with the
39 wrapper. Supported formats are a single array, or a dict of
40 arrays.
41 ValueError: If `env`'s observation already contains any of the
42 specified `pixel_keys`.
43 """
44
45 super(PixelObservationWrapper, self).__init__(env)
46
47 if render_kwargs is None:
48 render_kwargs = {}
49
50 for key in pixel_keys:
51 render_kwargs.setdefault(key, {})
52
53 render_mode = render_kwargs[key].pop('mode', 'rgb_array')
54 assert render_mode == 'rgb_array', render_mode
55 render_kwargs[key]['mode'] = 'rgb_array'
56
57 wrapped_observation_space = env.observation_space
58
59 if isinstance(wrapped_observation_space, spaces.Box):
60 self._observation_is_dict = False
61 invalid_keys = set([STATE_KEY])
62 elif isinstance(wrapped_observation_space,
63 (spaces.Dict, collections.MutableMapping)):
64 self._observation_is_dict = True
65 invalid_keys = set(wrapped_observation_space.spaces.keys())
66 else:
67 raise ValueError("Unsupported observation space structure.")
68
69 if not pixels_only:
70 # Make sure that now keys in the `pixel_keys` overlap with
71 # `observation_keys`
72 overlapping_keys = set(pixel_keys) & set(invalid_keys)
73 if overlapping_keys:
74 raise ValueError("Duplicate or reserved pixel keys {!r}."
75 .format(overlapping_keys))
76
77 if pixels_only:
78 self.observation_space = spaces.Dict()
79 elif self._observation_is_dict:
80 self.observation_space = copy.deepcopy(wrapped_observation_space)
81 else:
82 self.observation_space = spaces.Dict()
83 self.observation_space.spaces[STATE_KEY] = wrapped_observation_space
84
85 # Extend observation space with pixels.
86
87 pixels_spaces = {}
88 for pixel_key in pixel_keys:
89 pixels = self.env.render(**render_kwargs[pixel_key])
90
91 if np.issubdtype(pixels.dtype, np.integer):
92 low, high = (0, 255)
93 elif np.issubdtype(pixels.dtype, np.float):
94 low, high = (-float('inf'), float('inf'))
95 else:
96 raise TypeError(pixels.dtype)
97
98 pixels_space = spaces.Box(
99 shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)
100 pixels_spaces[pixel_key] = pixels_space
101
102 self.observation_space.spaces.update(pixels_spaces)
103
104 self._env = env
105 self._pixels_only = pixels_only
106 self._render_kwargs = render_kwargs
107 self._pixel_keys = pixel_keys
108
109 def observation(self, observation):
110 pixel_observation = self._add_pixel_observation(observation)
111 return pixel_observation
112
113 def _add_pixel_observation(self, observation):
114 if self._pixels_only:
115 observation = collections.OrderedDict()
116 elif self._observation_is_dict:
117 observation = type(observation)(observation)
118 else:
119 observation = collections.OrderedDict()
120 observation[STATE_KEY] = observation
121
122 pixel_observations = {
123 pixel_key: self.env.render(**self._render_kwargs[pixel_key])
124 for pixel_key in self._pixel_keys
125 }
126
127 observation.update(pixel_observations)
128
129 return observation
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py
--- a/gym/wrappers/pixel_observation.py
+++ b/gym/wrappers/pixel_observation.py
@@ -110,14 +110,14 @@
pixel_observation = self._add_pixel_observation(observation)
return pixel_observation
- def _add_pixel_observation(self, observation):
+ def _add_pixel_observation(self, wrapped_observation):
if self._pixels_only:
observation = collections.OrderedDict()
elif self._observation_is_dict:
- observation = type(observation)(observation)
+ observation = type(wrapped_observation)(wrapped_observation)
else:
observation = collections.OrderedDict()
- observation[STATE_KEY] = observation
+ observation[STATE_KEY] = wrapped_observation
pixel_observations = {
pixel_key: self.env.render(**self._render_kwargs[pixel_key])
| {"golden_diff": "diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py\n--- a/gym/wrappers/pixel_observation.py\n+++ b/gym/wrappers/pixel_observation.py\n@@ -110,14 +110,14 @@\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n \n- def _add_pixel_observation(self, observation):\n+ def _add_pixel_observation(self, wrapped_observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n- observation = type(observation)(observation)\n+ observation = type(wrapped_observation)(wrapped_observation)\n else:\n observation = collections.OrderedDict()\n- observation[STATE_KEY] = observation\n+ observation[STATE_KEY] = wrapped_observation\n \n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n", "issue": "Bug in PixelObservationWrapper\nIn the pixel_observation.py, we have a bit of code that looks like this: \r\n\r\n```\r\n def _add_pixel_observation(self, observation):\r\n if self._pixels_only:\r\n observation = collections.OrderedDict()\r\n elif self._observation_is_dict:\r\n observation = type(observation)(observation)\r\n else:\r\n observation = collections.OrderedDict()\r\n observation[STATE_KEY] = observation\r\n```\r\n\r\nIf you note, the argument `observation` is being clobbered in the else case, so now the observation dictionary at the STATE_KEY refers to itself instead of the underlying env's observation. \r\n\r\nI'm happy to fix this and submit a pull request but I wanted to raise the community's attention to this first. \n", "before_files": [{"content": "\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport collections\nimport copy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym import ObservationWrapper\n\nSTATE_KEY = 'state'\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self,\n env,\n pixels_only=True,\n render_kwargs=None,\n pixel_keys=('pixels', )):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n if render_kwargs is None:\n render_kwargs = {}\n\n for key in pixel_keys:\n render_kwargs.setdefault(key, {})\n\n render_mode = render_kwargs[key].pop('mode', 'rgb_array')\n assert render_mode == 'rgb_array', render_mode\n render_kwargs[key]['mode'] = 'rgb_array'\n\n wrapped_observation_space = env.observation_space\n\n if isinstance(wrapped_observation_space, spaces.Box):\n self._observation_is_dict = False\n invalid_keys = set([STATE_KEY])\n elif isinstance(wrapped_observation_space,\n (spaces.Dict, collections.MutableMapping)):\n self._observation_is_dict = True\n invalid_keys = set(wrapped_observation_space.spaces.keys())\n else:\n raise ValueError(\"Unsupported observation space structure.\")\n\n if not pixels_only:\n # Make sure that now keys in the `pixel_keys` overlap with\n # `observation_keys`\n overlapping_keys = set(pixel_keys) & set(invalid_keys)\n if overlapping_keys:\n raise ValueError(\"Duplicate or reserved pixel keys {!r}.\"\n .format(overlapping_keys))\n\n if pixels_only:\n self.observation_space = spaces.Dict()\n elif self._observation_is_dict:\n self.observation_space = copy.deepcopy(wrapped_observation_space)\n else:\n self.observation_space = spaces.Dict()\n self.observation_space.spaces[STATE_KEY] = wrapped_observation_space\n\n # Extend observation space with pixels.\n\n pixels_spaces = {}\n for pixel_key in pixel_keys:\n pixels = self.env.render(**render_kwargs[pixel_key])\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n pixels_space = spaces.Box(\n shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n pixels_spaces[pixel_key] = pixels_space\n\n self.observation_space.spaces.update(pixels_spaces)\n\n self._env = env\n self._pixels_only = pixels_only\n self._render_kwargs = render_kwargs\n self._pixel_keys = pixel_keys\n\n def observation(self, observation):\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n\n def _add_pixel_observation(self, observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n observation = type(observation)(observation)\n else:\n observation = collections.OrderedDict()\n observation[STATE_KEY] = observation\n\n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n for pixel_key in self._pixel_keys\n }\n\n observation.update(pixel_observations)\n\n return observation\n", "path": "gym/wrappers/pixel_observation.py"}], "after_files": [{"content": "\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport collections\nimport copy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym import ObservationWrapper\n\nSTATE_KEY = 'state'\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self,\n env,\n pixels_only=True,\n render_kwargs=None,\n pixel_keys=('pixels', )):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n if render_kwargs is None:\n render_kwargs = {}\n\n for key in pixel_keys:\n render_kwargs.setdefault(key, {})\n\n render_mode = render_kwargs[key].pop('mode', 'rgb_array')\n assert render_mode == 'rgb_array', render_mode\n render_kwargs[key]['mode'] = 'rgb_array'\n\n wrapped_observation_space = env.observation_space\n\n if isinstance(wrapped_observation_space, spaces.Box):\n self._observation_is_dict = False\n invalid_keys = set([STATE_KEY])\n elif isinstance(wrapped_observation_space,\n (spaces.Dict, collections.MutableMapping)):\n self._observation_is_dict = True\n invalid_keys = set(wrapped_observation_space.spaces.keys())\n else:\n raise ValueError(\"Unsupported observation space structure.\")\n\n if not pixels_only:\n # Make sure that now keys in the `pixel_keys` overlap with\n # `observation_keys`\n overlapping_keys = set(pixel_keys) & set(invalid_keys)\n if overlapping_keys:\n raise ValueError(\"Duplicate or reserved pixel keys {!r}.\"\n .format(overlapping_keys))\n\n if pixels_only:\n self.observation_space = spaces.Dict()\n elif self._observation_is_dict:\n self.observation_space = copy.deepcopy(wrapped_observation_space)\n else:\n self.observation_space = spaces.Dict()\n self.observation_space.spaces[STATE_KEY] = wrapped_observation_space\n\n # Extend observation space with pixels.\n\n pixels_spaces = {}\n for pixel_key in pixel_keys:\n pixels = self.env.render(**render_kwargs[pixel_key])\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n pixels_space = spaces.Box(\n shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n pixels_spaces[pixel_key] = pixels_space\n\n self.observation_space.spaces.update(pixels_spaces)\n\n self._env = env\n self._pixels_only = pixels_only\n self._render_kwargs = render_kwargs\n self._pixel_keys = pixel_keys\n\n def observation(self, observation):\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n\n def _add_pixel_observation(self, wrapped_observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n observation = type(wrapped_observation)(wrapped_observation)\n else:\n observation = collections.OrderedDict()\n observation[STATE_KEY] = wrapped_observation\n\n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n for pixel_key in self._pixel_keys\n }\n\n observation.update(pixel_observations)\n\n return observation\n", "path": "gym/wrappers/pixel_observation.py"}]} | 1,635 | 216 |
gh_patches_debug_51330 | rasdani/github-patches | git_diff | scikit-image__scikit-image-1281 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: io.imshow() and io.show() do not work as expected in winows
In my win7-x64 environemnt, io.imshow() and io.show() do not work as expected. I use io.imshow() to show mutiple images, and when I call io.show() to show all the images, only the last image shows. In linux, it works well and all the images will show when I call io.show()
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `skimage/io/_plugins/matplotlib_plugin.py`
Content:
```
1 import matplotlib.pyplot as plt
2
3
4 def imshow(*args, **kwargs):
5 kwargs.setdefault('interpolation', 'nearest')
6 kwargs.setdefault('cmap', 'gray')
7 plt.imshow(*args, **kwargs)
8
9 imread = plt.imread
10 show = plt.show
11
12
13 def _app_show():
14 show()
15
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/skimage/io/_plugins/matplotlib_plugin.py b/skimage/io/_plugins/matplotlib_plugin.py
--- a/skimage/io/_plugins/matplotlib_plugin.py
+++ b/skimage/io/_plugins/matplotlib_plugin.py
@@ -2,6 +2,8 @@
def imshow(*args, **kwargs):
+ if plt.gca().has_data():
+ plt.figure()
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
plt.imshow(*args, **kwargs)
| {"golden_diff": "diff --git a/skimage/io/_plugins/matplotlib_plugin.py b/skimage/io/_plugins/matplotlib_plugin.py\n--- a/skimage/io/_plugins/matplotlib_plugin.py\n+++ b/skimage/io/_plugins/matplotlib_plugin.py\n@@ -2,6 +2,8 @@\n \n \n def imshow(*args, **kwargs):\n+ if plt.gca().has_data():\n+ plt.figure()\n kwargs.setdefault('interpolation', 'nearest')\n kwargs.setdefault('cmap', 'gray')\n plt.imshow(*args, **kwargs)\n", "issue": "Bug: io.imshow() and io.show() do not work as expected in winows\nIn my win7-x64 environemnt, io.imshow() and io.show() do not work as expected. I use io.imshow() to show mutiple images, and when I call io.show() to show all the images, only the last image shows. In linux, it works well and all the images will show when I call io.show()\n\n", "before_files": [{"content": "import matplotlib.pyplot as plt\n\n\ndef imshow(*args, **kwargs):\n kwargs.setdefault('interpolation', 'nearest')\n kwargs.setdefault('cmap', 'gray')\n plt.imshow(*args, **kwargs)\n\nimread = plt.imread\nshow = plt.show\n\n\ndef _app_show():\n show()\n", "path": "skimage/io/_plugins/matplotlib_plugin.py"}], "after_files": [{"content": "import matplotlib.pyplot as plt\n\n\ndef imshow(*args, **kwargs):\n if plt.gca().has_data():\n plt.figure()\n kwargs.setdefault('interpolation', 'nearest')\n kwargs.setdefault('cmap', 'gray')\n plt.imshow(*args, **kwargs)\n\nimread = plt.imread\nshow = plt.show\n\n\ndef _app_show():\n show()\n", "path": "skimage/io/_plugins/matplotlib_plugin.py"}]} | 441 | 114 |
gh_patches_debug_64317 | rasdani/github-patches | git_diff | pex-tool__pex-1112 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.21
On the docket:
+ [x] "FileNotFoundError: [Errno 2] No such file or directory" in pex #1098
+ [x] Unclosed resource warning for `/dev/null` in PEX teardown. #1101
+ [x] Remove `--sources-directory` / `--resources-directory` distinction. #1100
+ [x] Invalid requirement, parse error at "'python_v' #940
+ [x] Pex skipping pandas activation #1017
+ [x] Changing vendored versions does not fully clean up previous version #1096
+ [x] Pex discards the current interpreter's PATH entry when it is a directory entry. #1109
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.20"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.20"
+__version__ = "2.1.21"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.20\"\n+__version__ = \"2.1.21\"\n", "issue": "Release 2.1.21\nOn the docket:\r\n+ [x] \"FileNotFoundError: [Errno 2] No such file or directory\" in pex #1098\r\n+ [x] Unclosed resource warning for `/dev/null` in PEX teardown. #1101\r\n+ [x] Remove `--sources-directory` / `--resources-directory` distinction. #1100\r\n+ [x] Invalid requirement, parse error at \"'python_v' #940\r\n+ [x] Pex skipping pandas activation #1017\r\n+ [x] Changing vendored versions does not fully clean up previous version #1096\r\n+ [x] Pex discards the current interpreter's PATH entry when it is a directory entry. #1109\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.20\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.21\"\n", "path": "pex/version.py"}]} | 480 | 96 |
gh_patches_debug_38331 | rasdani/github-patches | git_diff | spectrochempy__spectrochempy-11 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Automate building of docs for new release and dev version.
Author: @fernandezc (Christian Fernandez )
Date: 2020-05-28
Redmine Issue: 74, https://redmine.spectrochempy.fr/issues/74
---
None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `spectrochempy/core/readers/readopus.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # ======================================================================================================================
4 # Copyright (©) 2015-2020 LCS
5 # Laboratoire Catalyse et Spectrochimie, Caen, France.
6 # CeCILL-B FREE SOFTWARE LICENSE AGREEMENT
7 # See full LICENSE agreement in the root directory
8 # ======================================================================================================================
9
10 """This module to extend NDDataset with the import methods method.
11
12 """
13 __all__ = ['read_opus']
14
15 __dataset_methods__ = __all__
16
17 # ----------------------------------------------------------------------------------------------------------------------
18 # standard imports
19 # ----------------------------------------------------------------------------------------------------------------------
20
21
22 from brukeropusreader import read_file
23 from warnings import warn
24 from datetime import datetime, timezone, timedelta
25
26
27
28
29
30 # ----------------------------------------------------------------------------------------------------------------------
31 # third party imports
32 # ----------------------------------------------------------------------------------------------------------------------
33 # ----------------------------------------------------------------------------------------------------------------------
34 # local imports
35 # ----------------------------------------------------------------------------------------------------------------------
36 from spectrochempy.core import debug_
37 from spectrochempy.core.dataset.nddataset import NDDataset
38 from spectrochempy.core.dataset.ndcoord import Coord
39 from spectrochempy.utils import readfilename
40 # ======================================================================================================================
41 # Public functions
42 # ======================================================================================================================
43
44 # .............................................................................
45 def read_opus(dataset=None, **kwargs):
46 """Open Bruker Opus file(s) and group them in a single dataset. Only the spectrum is
47 extracted ("AB" field). Returns an error if dimensions are incompatibles.
48
49 Parameters
50 ----------
51 filename : `None`, `str`, or list of `str`
52 Filename of the file(s) to load. If `None` : opens a dialog box to select
53 files. If `str` : a single filename. It list of str :
54 a list of filenames.
55 directory : str, optional, default="".
56 From where to read the specified filename. If not specified, read in
57 the defaults datadir.
58
59 Returns
60 -------
61 dataset : |NDDataset|
62 A dataset corresponding to the (set of) bruker file(s).
63
64 Examples
65 --------
66 >>> A = NDDataset.read_opus('irdata\\spectrum.0001')
67 >>> print(A)
68 NDDataset: [float64] a.u. (shape: (y:1, x:2568))
69 """
70 debug_("reading bruker opus files")
71
72 # filename will be given by a keyword parameter except if the first parameters is already
73 # the filename
74 filename = kwargs.get('filename', None)
75
76 # check if the first parameter is a dataset because we allow not to pass it
77 if not isinstance(dataset, NDDataset):
78 # probably did not specify a dataset
79 # so the first parameters must be the filename
80 if isinstance(dataset, (str, list)) and dataset != '':
81 filename = dataset
82
83 # check if directory was specified
84 directory = kwargs.get("directory", None)
85 sortbydate = kwargs.get("sortbydate", True)
86
87 # returns a list of files to read
88 files = readfilename(filename,
89 directory=directory,
90 filetypes=['Bruker files (*.*)',
91 'all files (*)'],
92 dictionary=False)
93 #todo: see how to use regular expression in Qt filters
94
95 if not files:
96 # there is no files, return nothing
97 return None
98
99 xaxis = None
100 intensities = []
101 names = []
102 acquisitiondates = []
103 timestamps = []
104 for file in files:
105 opus_data = read_file(file)
106 try:
107 opus_data["AB"]
108 except KeyError: # not an absorbance spectrum
109 warn("opus file {} could not be read".format(file))
110 continue
111
112 if not xaxis:
113 xaxis = Coord(opus_data.get_range("AB"), title='Wavenumbers', units='cm^-1')
114
115 elif (opus_data.get_range("AB") != xaxis.data).any():
116 raise ValueError("spectra have incompatible dimensions (xaxis)")
117
118 intensities.append(opus_data["AB"])
119 names.append(opus_data["Sample"]['SNM'])
120 acqdate = opus_data["AB Data Parameter"]["DAT"]
121 acqtime = opus_data["AB Data Parameter"]["TIM"]
122 GMT_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])
123 date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],
124 '%d/%m/%Y_%H:%M:%S.%f')
125 UTC_date_time = date_time - timedelta(hours=GMT_offset_hour)
126 UTC_date_time = UTC_date_time.replace(tzinfo=timezone.utc)
127 # Transform to timestamp for storage in the Coord object
128 # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime
129 timestamp = UTC_date_time.timestamp()
130 acquisitiondates.append(UTC_date_time)
131 timestamps.append(timestamp)
132
133 # return if none of the files could be read:
134 if not xaxis:
135 return
136
137 yaxis = Coord(timestamps,
138 title='Acquisition timestamp (GMT)',
139 units='s',
140 labels=(acquisitiondates, names))
141
142 dataset = NDDataset(intensities)
143 dataset.set_coords(y=yaxis, x=xaxis)
144 dataset.units = 'absorbance'
145 dataset.title = 'Absorbance'
146
147 # Set origin, description and history
148 dataset.origin = "opus"
149 dataset.description = ('Dataset from opus files. \n')
150
151 if sortbydate:
152 dataset.sort(dim='y', inplace=True)
153
154 dataset.history = str(datetime.now()) + ':import from opus files \n'
155
156 # Set the NDDataset date
157 dataset._date = datetime.now()
158 dataset._modified = dataset.date
159 # debug_("end of reading")
160
161 return dataset
162
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/spectrochempy/core/readers/readopus.py b/spectrochempy/core/readers/readopus.py
--- a/spectrochempy/core/readers/readopus.py
+++ b/spectrochempy/core/readers/readopus.py
@@ -22,10 +22,7 @@
from brukeropusreader import read_file
from warnings import warn
from datetime import datetime, timezone, timedelta
-
-
-
-
+from numpy import linspace
# ----------------------------------------------------------------------------------------------------------------------
# third party imports
@@ -37,6 +34,8 @@
from spectrochempy.core.dataset.nddataset import NDDataset
from spectrochempy.core.dataset.ndcoord import Coord
from spectrochempy.utils import readfilename
+
+
# ======================================================================================================================
# Public functions
# ======================================================================================================================
@@ -90,7 +89,7 @@
filetypes=['Bruker files (*.*)',
'all files (*)'],
dictionary=False)
- #todo: see how to use regular expression in Qt filters
+ # todo: see how to use regular expression in Qt filters
if not files:
# there is no files, return nothing
@@ -109,13 +108,18 @@
warn("opus file {} could not be read".format(file))
continue
+ npt = opus_data['AB Data Parameter']['NPT']
+ fxv = opus_data['AB Data Parameter']['FXV']
+ lxv = opus_data['AB Data Parameter']['LXV']
+ xdata = linspace(fxv, lxv, npt)
+
if not xaxis:
- xaxis = Coord(opus_data.get_range("AB"), title='Wavenumbers', units='cm^-1')
+ xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')
- elif (opus_data.get_range("AB") != xaxis.data).any():
+ elif (xdata != xaxis.data).any():
raise ValueError("spectra have incompatible dimensions (xaxis)")
- intensities.append(opus_data["AB"])
+ intensities.append(opus_data["AB"][:npt])
names.append(opus_data["Sample"]['SNM'])
acqdate = opus_data["AB Data Parameter"]["DAT"]
acqtime = opus_data["AB Data Parameter"]["TIM"]
| {"golden_diff": "diff --git a/spectrochempy/core/readers/readopus.py b/spectrochempy/core/readers/readopus.py\n--- a/spectrochempy/core/readers/readopus.py\n+++ b/spectrochempy/core/readers/readopus.py\n@@ -22,10 +22,7 @@\n from brukeropusreader import read_file\n from warnings import warn\n from datetime import datetime, timezone, timedelta\n-\n-\n-\n-\n+from numpy import linspace\n \n # ----------------------------------------------------------------------------------------------------------------------\n # third party imports\n@@ -37,6 +34,8 @@\n from spectrochempy.core.dataset.nddataset import NDDataset\n from spectrochempy.core.dataset.ndcoord import Coord\n from spectrochempy.utils import readfilename\n+\n+\n # ======================================================================================================================\n # Public functions\n # ======================================================================================================================\n@@ -90,7 +89,7 @@\n filetypes=['Bruker files (*.*)',\n 'all files (*)'],\n dictionary=False)\n- #todo: see how to use regular expression in Qt filters\n+ # todo: see how to use regular expression in Qt filters\n \n if not files:\n # there is no files, return nothing\n@@ -109,13 +108,18 @@\n warn(\"opus file {} could not be read\".format(file))\n continue\n \n+ npt = opus_data['AB Data Parameter']['NPT']\n+ fxv = opus_data['AB Data Parameter']['FXV']\n+ lxv = opus_data['AB Data Parameter']['LXV']\n+ xdata = linspace(fxv, lxv, npt)\n+\n if not xaxis:\n- xaxis = Coord(opus_data.get_range(\"AB\"), title='Wavenumbers', units='cm^-1')\n+ xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')\n \n- elif (opus_data.get_range(\"AB\") != xaxis.data).any():\n+ elif (xdata != xaxis.data).any():\n raise ValueError(\"spectra have incompatible dimensions (xaxis)\")\n \n- intensities.append(opus_data[\"AB\"])\n+ intensities.append(opus_data[\"AB\"][:npt])\n names.append(opus_data[\"Sample\"]['SNM'])\n acqdate = opus_data[\"AB Data Parameter\"][\"DAT\"]\n acqtime = opus_data[\"AB Data Parameter\"][\"TIM\"]\n", "issue": "Automate building of docs for new release and dev version.\nAuthor: @fernandezc (Christian Fernandez )\nDate: 2020-05-28\nRedmine Issue: 74, https://redmine.spectrochempy.fr/issues/74\n\n---\n\nNone\n\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# ======================================================================================================================\n# Copyright (\u00a9) 2015-2020 LCS\n# Laboratoire Catalyse et Spectrochimie, Caen, France.\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT\n# See full LICENSE agreement in the root directory\n# ======================================================================================================================\n\n\"\"\"This module to extend NDDataset with the import methods method.\n\n\"\"\"\n__all__ = ['read_opus']\n\n__dataset_methods__ = __all__\n\n# ----------------------------------------------------------------------------------------------------------------------\n# standard imports\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nfrom brukeropusreader import read_file\nfrom warnings import warn\nfrom datetime import datetime, timezone, timedelta\n\n\n\n\n\n# ----------------------------------------------------------------------------------------------------------------------\n# third party imports\n# ----------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------------\n# local imports\n# ----------------------------------------------------------------------------------------------------------------------\nfrom spectrochempy.core import debug_\nfrom spectrochempy.core.dataset.nddataset import NDDataset\nfrom spectrochempy.core.dataset.ndcoord import Coord\nfrom spectrochempy.utils import readfilename\n# ======================================================================================================================\n# Public functions\n# ======================================================================================================================\n\n# .............................................................................\ndef read_opus(dataset=None, **kwargs):\n \"\"\"Open Bruker Opus file(s) and group them in a single dataset. Only the spectrum is\n extracted (\"AB\" field). Returns an error if dimensions are incompatibles.\n\n Parameters\n ----------\n filename : `None`, `str`, or list of `str`\n Filename of the file(s) to load. If `None` : opens a dialog box to select\n files. If `str` : a single filename. It list of str :\n a list of filenames.\n directory : str, optional, default=\"\".\n From where to read the specified filename. If not specified, read in\n the defaults datadir.\n\n Returns\n -------\n dataset : |NDDataset|\n A dataset corresponding to the (set of) bruker file(s).\n\n Examples\n --------\n >>> A = NDDataset.read_opus('irdata\\\\spectrum.0001')\n >>> print(A)\n NDDataset: [float64] a.u. (shape: (y:1, x:2568))\n \"\"\"\n debug_(\"reading bruker opus files\")\n\n # filename will be given by a keyword parameter except if the first parameters is already\n # the filename\n filename = kwargs.get('filename', None)\n\n # check if the first parameter is a dataset because we allow not to pass it\n if not isinstance(dataset, NDDataset):\n # probably did not specify a dataset\n # so the first parameters must be the filename\n if isinstance(dataset, (str, list)) and dataset != '':\n filename = dataset\n\n # check if directory was specified\n directory = kwargs.get(\"directory\", None)\n sortbydate = kwargs.get(\"sortbydate\", True)\n\n # returns a list of files to read\n files = readfilename(filename,\n directory=directory,\n filetypes=['Bruker files (*.*)',\n 'all files (*)'],\n dictionary=False)\n #todo: see how to use regular expression in Qt filters\n\n if not files:\n # there is no files, return nothing\n return None\n\n xaxis = None\n intensities = []\n names = []\n acquisitiondates = []\n timestamps = []\n for file in files:\n opus_data = read_file(file)\n try:\n opus_data[\"AB\"]\n except KeyError: # not an absorbance spectrum\n warn(\"opus file {} could not be read\".format(file))\n continue\n\n if not xaxis:\n xaxis = Coord(opus_data.get_range(\"AB\"), title='Wavenumbers', units='cm^-1')\n\n elif (opus_data.get_range(\"AB\") != xaxis.data).any():\n raise ValueError(\"spectra have incompatible dimensions (xaxis)\")\n\n intensities.append(opus_data[\"AB\"])\n names.append(opus_data[\"Sample\"]['SNM'])\n acqdate = opus_data[\"AB Data Parameter\"][\"DAT\"]\n acqtime = opus_data[\"AB Data Parameter\"][\"TIM\"]\n GMT_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])\n date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],\n '%d/%m/%Y_%H:%M:%S.%f')\n UTC_date_time = date_time - timedelta(hours=GMT_offset_hour)\n UTC_date_time = UTC_date_time.replace(tzinfo=timezone.utc)\n # Transform to timestamp for storage in the Coord object\n # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime\n timestamp = UTC_date_time.timestamp()\n acquisitiondates.append(UTC_date_time)\n timestamps.append(timestamp)\n\n # return if none of the files could be read:\n if not xaxis:\n return\n\n yaxis = Coord(timestamps,\n title='Acquisition timestamp (GMT)',\n units='s',\n labels=(acquisitiondates, names))\n\n dataset = NDDataset(intensities)\n dataset.set_coords(y=yaxis, x=xaxis)\n dataset.units = 'absorbance'\n dataset.title = 'Absorbance'\n\n # Set origin, description and history\n dataset.origin = \"opus\"\n dataset.description = ('Dataset from opus files. \\n')\n\n if sortbydate:\n dataset.sort(dim='y', inplace=True)\n\n dataset.history = str(datetime.now()) + ':import from opus files \\n'\n\n # Set the NDDataset date\n dataset._date = datetime.now()\n dataset._modified = dataset.date\n # debug_(\"end of reading\")\n\n return dataset\n", "path": "spectrochempy/core/readers/readopus.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# ======================================================================================================================\n# Copyright (\u00a9) 2015-2020 LCS\n# Laboratoire Catalyse et Spectrochimie, Caen, France.\n# CeCILL-B FREE SOFTWARE LICENSE AGREEMENT\n# See full LICENSE agreement in the root directory\n# ======================================================================================================================\n\n\"\"\"This module to extend NDDataset with the import methods method.\n\n\"\"\"\n__all__ = ['read_opus']\n\n__dataset_methods__ = __all__\n\n# ----------------------------------------------------------------------------------------------------------------------\n# standard imports\n# ----------------------------------------------------------------------------------------------------------------------\n\n\nfrom brukeropusreader import read_file\nfrom warnings import warn\nfrom datetime import datetime, timezone, timedelta\nfrom numpy import linspace\n\n# ----------------------------------------------------------------------------------------------------------------------\n# third party imports\n# ----------------------------------------------------------------------------------------------------------------------\n# ----------------------------------------------------------------------------------------------------------------------\n# local imports\n# ----------------------------------------------------------------------------------------------------------------------\nfrom spectrochempy.core import debug_\nfrom spectrochempy.core.dataset.nddataset import NDDataset\nfrom spectrochempy.core.dataset.ndcoord import Coord\nfrom spectrochempy.utils import readfilename\n\n\n# ======================================================================================================================\n# Public functions\n# ======================================================================================================================\n\n# .............................................................................\ndef read_opus(dataset=None, **kwargs):\n \"\"\"Open Bruker Opus file(s) and group them in a single dataset. Only the spectrum is\n extracted (\"AB\" field). Returns an error if dimensions are incompatibles.\n\n Parameters\n ----------\n filename : `None`, `str`, or list of `str`\n Filename of the file(s) to load. If `None` : opens a dialog box to select\n files. If `str` : a single filename. It list of str :\n a list of filenames.\n directory : str, optional, default=\"\".\n From where to read the specified filename. If not specified, read in\n the defaults datadir.\n\n Returns\n -------\n dataset : |NDDataset|\n A dataset corresponding to the (set of) bruker file(s).\n\n Examples\n --------\n >>> A = NDDataset.read_opus('irdata\\\\spectrum.0001')\n >>> print(A)\n NDDataset: [float64] a.u. (shape: (y:1, x:2568))\n \"\"\"\n debug_(\"reading bruker opus files\")\n\n # filename will be given by a keyword parameter except if the first parameters is already\n # the filename\n filename = kwargs.get('filename', None)\n\n # check if the first parameter is a dataset because we allow not to pass it\n if not isinstance(dataset, NDDataset):\n # probably did not specify a dataset\n # so the first parameters must be the filename\n if isinstance(dataset, (str, list)) and dataset != '':\n filename = dataset\n\n # check if directory was specified\n directory = kwargs.get(\"directory\", None)\n sortbydate = kwargs.get(\"sortbydate\", True)\n\n # returns a list of files to read\n files = readfilename(filename,\n directory=directory,\n filetypes=['Bruker files (*.*)',\n 'all files (*)'],\n dictionary=False)\n # todo: see how to use regular expression in Qt filters\n\n if not files:\n # there is no files, return nothing\n return None\n\n xaxis = None\n intensities = []\n names = []\n acquisitiondates = []\n timestamps = []\n for file in files:\n opus_data = read_file(file)\n try:\n opus_data[\"AB\"]\n except KeyError: # not an absorbance spectrum\n warn(\"opus file {} could not be read\".format(file))\n continue\n\n npt = opus_data['AB Data Parameter']['NPT']\n fxv = opus_data['AB Data Parameter']['FXV']\n lxv = opus_data['AB Data Parameter']['LXV']\n xdata = linspace(fxv, lxv, npt)\n\n if not xaxis:\n xaxis = Coord(x=xdata, title='Wavenumbers', units='cm^-1')\n\n elif (xdata != xaxis.data).any():\n raise ValueError(\"spectra have incompatible dimensions (xaxis)\")\n\n intensities.append(opus_data[\"AB\"][:npt])\n names.append(opus_data[\"Sample\"]['SNM'])\n acqdate = opus_data[\"AB Data Parameter\"][\"DAT\"]\n acqtime = opus_data[\"AB Data Parameter\"][\"TIM\"]\n GMT_offset_hour = float(acqtime.split('GMT')[1].split(')')[0])\n date_time = datetime.strptime(acqdate + '_' + acqtime.split()[0],\n '%d/%m/%Y_%H:%M:%S.%f')\n UTC_date_time = date_time - timedelta(hours=GMT_offset_hour)\n UTC_date_time = UTC_date_time.replace(tzinfo=timezone.utc)\n # Transform to timestamp for storage in the Coord object\n # use datetime.fromtimestamp(d, timezone.utc)) to transform back to datetime\n timestamp = UTC_date_time.timestamp()\n acquisitiondates.append(UTC_date_time)\n timestamps.append(timestamp)\n\n # return if none of the files could be read:\n if not xaxis:\n return\n\n yaxis = Coord(timestamps,\n title='Acquisition timestamp (GMT)',\n units='s',\n labels=(acquisitiondates, names))\n\n dataset = NDDataset(intensities)\n dataset.set_coords(y=yaxis, x=xaxis)\n dataset.units = 'absorbance'\n dataset.title = 'Absorbance'\n\n # Set origin, description and history\n dataset.origin = \"opus\"\n dataset.description = ('Dataset from opus files. \\n')\n\n if sortbydate:\n dataset.sort(dim='y', inplace=True)\n\n dataset.history = str(datetime.now()) + ':import from opus files \\n'\n\n # Set the NDDataset date\n dataset._date = datetime.now()\n dataset._modified = dataset.date\n # debug_(\"end of reading\")\n\n return dataset\n", "path": "spectrochempy/core/readers/readopus.py"}]} | 1,931 | 516 |
gh_patches_debug_66169 | rasdani/github-patches | git_diff | cisagov__manage.get.gov-1071 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Show simplified domain statuses to the registrant
### Story
As a domain manager
I want to know the status of my domain in simple language
so that I know if any action is needed or if any functions are limited
### Acceptance Criteria
- [x] Domains table on the dashboard shows a user-friendly domain status
- [ ] Show the domain status on the "Domain Overview" page
- [ ] For Domain statuses Deleted and Hold, change manage link to View with eye icon [(Figma)](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428%3A11621&mode=design&t=eottghQ6NoX98F6q-1)
### Additional Context
**BACKGROUND**
In general, EPP / domain statuses are not easily understandable to most users; therefore, we need to ensure we're showing "user-friendly" versions of the domain status.
User-friendly statuses include:
- _Note:_ "Unknown" _Domain status shows as_ "DNS needed"
- DNS needed
- Ready
- On hold
- Deleted
Refer to[ status diagram Miro](https://miro.com/app/board/uXjVMuqbLOk=/?moveToWidget=3458764561795634398&cot=14)
**DOMAINS TABLE**
Currently, the approved Domains table displays the application status. But, because the application process is complete, we should update this to show the _domain_ status.
Reference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428-25637&mode=design&t=VBfj3tkkg2p3f8UT-0)
**DOMAIN OVERVIEW**
Currently, we do not show the domain status when viewing the "Manage Domains" pages. The "Manage Domains" pages can be accessed by clicking the "Manage" button next to an approved domain.
The first page is the "Domain Overview." Add stylized message to the top of that page to indicate the user-friendly domain status.
Reference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=4829-39693&mode=design&t=VBfj3tkkg2p3f8UT-0)
### Issue Links
Depends on: Domain Status presence for testing
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/registrar/views/index.py`
Content:
```
1 from django.db.models import F
2 from django.shortcuts import render
3
4 from registrar.models import DomainApplication
5
6
7 def index(request):
8 """This page is available to anyone without logging in."""
9 context = {}
10 if request.user.is_authenticated:
11 applications = DomainApplication.objects.filter(creator=request.user)
12 # Let's exclude the approved applications since our
13 # domain_applications context will be used to populate
14 # the active applications table
15 context["domain_applications"] = applications.exclude(status="approved")
16
17 domains = request.user.permissions.values(
18 "role",
19 pk=F("domain__id"),
20 name=F("domain__name"),
21 created_time=F("domain__created_at"),
22 application_status=F("domain__domain_application__status"),
23 )
24 context["domains"] = domains
25 return render(request, "home.html", context)
26
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py
--- a/src/registrar/views/index.py
+++ b/src/registrar/views/index.py
@@ -19,7 +19,7 @@
pk=F("domain__id"),
name=F("domain__name"),
created_time=F("domain__created_at"),
- application_status=F("domain__domain_application__status"),
+ state=F("domain__state"),
)
context["domains"] = domains
return render(request, "home.html", context)
| {"golden_diff": "diff --git a/src/registrar/views/index.py b/src/registrar/views/index.py\n--- a/src/registrar/views/index.py\n+++ b/src/registrar/views/index.py\n@@ -19,7 +19,7 @@\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n- application_status=F(\"domain__domain_application__status\"),\n+ state=F(\"domain__state\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "issue": "Show simplified domain statuses to the registrant\n### Story\r\n\r\nAs a domain manager\r\nI want to know the status of my domain in simple language\r\nso that I know if any action is needed or if any functions are limited\r\n\r\n\r\n### Acceptance Criteria\r\n\r\n- [x] Domains table on the dashboard shows a user-friendly domain status\r\n- [ ] Show the domain status on the \"Domain Overview\" page\r\n- [ ] For Domain statuses Deleted and Hold, change manage link to View with eye icon [(Figma)](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428%3A11621&mode=design&t=eottghQ6NoX98F6q-1)\r\n\r\n\r\n### Additional Context\r\n**BACKGROUND**\r\nIn general, EPP / domain statuses are not easily understandable to most users; therefore, we need to ensure we're showing \"user-friendly\" versions of the domain status. \r\n\r\nUser-friendly statuses include: \r\n- _Note:_ \"Unknown\" _Domain status shows as_ \"DNS needed\"\r\n- DNS needed \r\n- Ready\r\n- On hold\r\n- Deleted\r\n\r\nRefer to[ status diagram Miro](https://miro.com/app/board/uXjVMuqbLOk=/?moveToWidget=3458764561795634398&cot=14)\r\n\r\n**DOMAINS TABLE**\r\nCurrently, the approved Domains table displays the application status. But, because the application process is complete, we should update this to show the _domain_ status. \r\n\r\nReference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=428-25637&mode=design&t=VBfj3tkkg2p3f8UT-0)\r\n\r\n**DOMAIN OVERVIEW**\r\nCurrently, we do not show the domain status when viewing the \"Manage Domains\" pages. The \"Manage Domains\" pages can be accessed by clicking the \"Manage\" button next to an approved domain. \r\n\r\nThe first page is the \"Domain Overview.\" Add stylized message to the top of that page to indicate the user-friendly domain status.\r\n\r\nReference [Figma mock up](https://www.figma.com/file/aJbuDLJNNikqQObTuCNKQa/GetGov_Designs_ECS-Truss?type=design&node-id=4829-39693&mode=design&t=VBfj3tkkg2p3f8UT-0)\r\n\r\n### Issue Links\r\n\r\nDepends on: Domain Status presence for testing\n", "before_files": [{"content": "from django.db.models import F\nfrom django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n # Let's exclude the approved applications since our\n # domain_applications context will be used to populate\n # the active applications table\n context[\"domain_applications\"] = applications.exclude(status=\"approved\")\n\n domains = request.user.permissions.values(\n \"role\",\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n application_status=F(\"domain__domain_application__status\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}], "after_files": [{"content": "from django.db.models import F\nfrom django.shortcuts import render\n\nfrom registrar.models import DomainApplication\n\n\ndef index(request):\n \"\"\"This page is available to anyone without logging in.\"\"\"\n context = {}\n if request.user.is_authenticated:\n applications = DomainApplication.objects.filter(creator=request.user)\n # Let's exclude the approved applications since our\n # domain_applications context will be used to populate\n # the active applications table\n context[\"domain_applications\"] = applications.exclude(status=\"approved\")\n\n domains = request.user.permissions.values(\n \"role\",\n pk=F(\"domain__id\"),\n name=F(\"domain__name\"),\n created_time=F(\"domain__created_at\"),\n state=F(\"domain__state\"),\n )\n context[\"domains\"] = domains\n return render(request, \"home.html\", context)\n", "path": "src/registrar/views/index.py"}]} | 1,073 | 117 |
gh_patches_debug_20140 | rasdani/github-patches | git_diff | Flexget__Flexget-1599 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bakabt URL change
Bakabt url has change from http://www.bakabt.com to https://bakabt.me, the url rewriter plugin needs to be updated to match
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `flexget/plugins/sites/bakabt.py`
Content:
```
1 from __future__ import unicode_literals, division, absolute_import
2 from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
3
4 import logging
5
6 from flexget import plugin
7 from flexget.event import event
8 from flexget.plugins.internal.urlrewriting import UrlRewritingError
9 from flexget.utils.soup import get_soup
10
11 log = logging.getLogger('bakabt')
12
13
14 class UrlRewriteBakaBT(object):
15 """BakaBT urlrewriter."""
16
17 # urlrewriter API
18 def url_rewritable(self, task, entry):
19 url = entry['url']
20 if url.startswith('http://www.bakabt.com/download/'):
21 return False
22 if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):
23 return True
24 return False
25
26 # urlrewriter API
27 def url_rewrite(self, task, entry):
28 entry['url'] = self.parse_download_page(entry['url'], task.requests)
29
30 @plugin.internet(log)
31 def parse_download_page(self, url, requests):
32 txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
33 page = requests.get(url, headers=txheaders)
34 try:
35 soup = get_soup(page.text)
36 except Exception as e:
37 raise UrlRewritingError(e)
38 tag_a = soup.find('a', attrs={'class': 'download_link'})
39 if not tag_a:
40 raise UrlRewritingError('Unable to locate download link from url %s' % url)
41 torrent_url = 'http://www.bakabt.com' + tag_a.get('href')
42 return torrent_url
43
44
45 @event('plugin.register')
46 def register_plugin():
47 plugin.register(UrlRewriteBakaBT, 'bakabt', groups=['urlrewriter'], api_ver=2)
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/flexget/plugins/sites/bakabt.py b/flexget/plugins/sites/bakabt.py
--- a/flexget/plugins/sites/bakabt.py
+++ b/flexget/plugins/sites/bakabt.py
@@ -17,9 +17,9 @@
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
- if url.startswith('http://www.bakabt.com/download/'):
+ if url.startswith('http://www.bakabt.me/download/'):
return False
- if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):
+ if url.startswith('http://www.bakabt.me/') or url.startswith('http://bakabt.me/'):
return True
return False
@@ -38,7 +38,7 @@
tag_a = soup.find('a', attrs={'class': 'download_link'})
if not tag_a:
raise UrlRewritingError('Unable to locate download link from url %s' % url)
- torrent_url = 'http://www.bakabt.com' + tag_a.get('href')
+ torrent_url = 'http://www.bakabt.me' + tag_a.get('href')
return torrent_url
| {"golden_diff": "diff --git a/flexget/plugins/sites/bakabt.py b/flexget/plugins/sites/bakabt.py\n--- a/flexget/plugins/sites/bakabt.py\n+++ b/flexget/plugins/sites/bakabt.py\n@@ -17,9 +17,9 @@\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n- if url.startswith('http://www.bakabt.com/download/'):\n+ if url.startswith('http://www.bakabt.me/download/'):\n return False\n- if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):\n+ if url.startswith('http://www.bakabt.me/') or url.startswith('http://bakabt.me/'):\n return True\n return False\n \n@@ -38,7 +38,7 @@\n tag_a = soup.find('a', attrs={'class': 'download_link'})\n if not tag_a:\n raise UrlRewritingError('Unable to locate download link from url %s' % url)\n- torrent_url = 'http://www.bakabt.com' + tag_a.get('href')\n+ torrent_url = 'http://www.bakabt.me' + tag_a.get('href')\n return torrent_url\n", "issue": "Bakabt URL change\nBakabt url has change from http://www.bakabt.com to https://bakabt.me, the url rewriter plugin needs to be updated to match\n", "before_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('bakabt')\n\n\nclass UrlRewriteBakaBT(object):\n \"\"\"BakaBT urlrewriter.\"\"\"\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.startswith('http://www.bakabt.com/download/'):\n return False\n if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n entry['url'] = self.parse_download_page(entry['url'], task.requests)\n\n @plugin.internet(log)\n def parse_download_page(self, url, requests):\n txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}\n page = requests.get(url, headers=txheaders)\n try:\n soup = get_soup(page.text)\n except Exception as e:\n raise UrlRewritingError(e)\n tag_a = soup.find('a', attrs={'class': 'download_link'})\n if not tag_a:\n raise UrlRewritingError('Unable to locate download link from url %s' % url)\n torrent_url = 'http://www.bakabt.com' + tag_a.get('href')\n return torrent_url\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteBakaBT, 'bakabt', groups=['urlrewriter'], api_ver=2)\n", "path": "flexget/plugins/sites/bakabt.py"}], "after_files": [{"content": "from __future__ import unicode_literals, division, absolute_import\nfrom builtins import * # noqa pylint: disable=unused-import, redefined-builtin\n\nimport logging\n\nfrom flexget import plugin\nfrom flexget.event import event\nfrom flexget.plugins.internal.urlrewriting import UrlRewritingError\nfrom flexget.utils.soup import get_soup\n\nlog = logging.getLogger('bakabt')\n\n\nclass UrlRewriteBakaBT(object):\n \"\"\"BakaBT urlrewriter.\"\"\"\n\n # urlrewriter API\n def url_rewritable(self, task, entry):\n url = entry['url']\n if url.startswith('http://www.bakabt.me/download/'):\n return False\n if url.startswith('http://www.bakabt.me/') or url.startswith('http://bakabt.me/'):\n return True\n return False\n\n # urlrewriter API\n def url_rewrite(self, task, entry):\n entry['url'] = self.parse_download_page(entry['url'], task.requests)\n\n @plugin.internet(log)\n def parse_download_page(self, url, requests):\n txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}\n page = requests.get(url, headers=txheaders)\n try:\n soup = get_soup(page.text)\n except Exception as e:\n raise UrlRewritingError(e)\n tag_a = soup.find('a', attrs={'class': 'download_link'})\n if not tag_a:\n raise UrlRewritingError('Unable to locate download link from url %s' % url)\n torrent_url = 'http://www.bakabt.me' + tag_a.get('href')\n return torrent_url\n\n\n@event('plugin.register')\ndef register_plugin():\n plugin.register(UrlRewriteBakaBT, 'bakabt', groups=['urlrewriter'], api_ver=2)\n", "path": "flexget/plugins/sites/bakabt.py"}]} | 805 | 294 |
gh_patches_debug_23767 | rasdani/github-patches | git_diff | microsoft__presidio-650 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Image anonymization is slow
Hi,
First of all, thank you guys for making this awesome project :)
I wanted to ask if there are any ways to improve image anonymization performance. I'm using Presidio as a python package and it takes about 5~ seconds to process pretty small images. I'm using the example code from the Presidio docs.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py`
Content:
```
1 from typing import List
2
3 from presidio_analyzer import AnalyzerEngine
4 from presidio_analyzer import RecognizerResult
5 from presidio_image_redactor.entities.image_recognizer_result import (
6 ImageRecognizerResult,
7 )
8 from presidio_image_redactor.ocr import OCR
9
10
11 class ImageAnalyzerEngine:
12 """ImageAnalyzerEngine class."""
13
14 def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:
15 """Analyse method to analyse the given image.
16
17 :param image: PIL Image/numpy array or file path(str) to be processed
18
19 :return: list of the extract entities with image bounding boxes
20 """
21 ocr_result = OCR().perform_ocr(image)
22 text = OCR().get_text_from_ocr_dict(ocr_result)
23
24 analyzer = AnalyzerEngine()
25 analyzer_result = analyzer.analyze(text=text, language="en", **kwargs)
26 bboxes = self.map_analyzer_results_to_bounding_boxes(
27 analyzer_result, ocr_result, text
28 )
29 return bboxes
30
31 @staticmethod
32 def map_analyzer_results_to_bounding_boxes(
33 text_analyzer_results: List[RecognizerResult], ocr_result: dict, text: str
34 ) -> List[ImageRecognizerResult]:
35 """Map extracted PII entities to image bounding boxes.
36
37 Matching is based on the position of the recognized entity from analyzer
38 and word (in ocr dict) in the text.
39
40 :param text_analyzer_results: PII entities recognized by presidio analyzer
41 :param ocr_result: dict results with words and bboxes from OCR
42 :param text: text the results are based on
43
44 return: list of extracted entities with image bounding boxes
45 """
46 if (not ocr_result) or (not text_analyzer_results):
47 return []
48
49 bboxes = []
50 proc_indexes = 0
51 indexes = len(text_analyzer_results)
52
53 pos = 0
54 iter_ocr = enumerate(ocr_result["text"])
55 for index, word in iter_ocr:
56 if not word:
57 pos += 1
58 else:
59 for element in text_analyzer_results:
60 text_element = text[element.start : element.end]
61 # check position and text of ocr word matches recognized entity
62 if (
63 max(pos, element.start) < min(element.end, pos + len(word))
64 ) and ((text_element in word) or (word in text_element)):
65 bboxes.append(
66 ImageRecognizerResult(
67 element.entity_type,
68 element.start,
69 element.end,
70 element.score,
71 ocr_result["left"][index],
72 ocr_result["top"][index],
73 ocr_result["width"][index],
74 ocr_result["height"][index],
75 )
76 )
77
78 # add bounding boxes for all words in ocr dict
79 # contained within the text of recognized entity
80 # based on relative position in the full text
81 while pos + len(word) < element.end:
82 index, word = next(iter_ocr)
83 if word:
84 bboxes.append(
85 ImageRecognizerResult(
86 element.entity_type,
87 element.start,
88 element.end,
89 element.score,
90 ocr_result["left"][index],
91 ocr_result["top"][index],
92 ocr_result["width"][index],
93 ocr_result["height"][index],
94 )
95 )
96 pos += len(word) + 1
97 proc_indexes += 1
98
99 if proc_indexes == indexes:
100 break
101 pos += len(word) + 1
102
103 return bboxes
104
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
--- a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
+++ b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py
@@ -9,7 +9,16 @@
class ImageAnalyzerEngine:
- """ImageAnalyzerEngine class."""
+ """ImageAnalyzerEngine class.
+
+ :param analyzer_engine: The Presidio AnalyzerEngine instance
+ to be used to detect PII in text.
+ """
+
+ def __init__(self, analyzer_engine: AnalyzerEngine = None):
+ if not analyzer_engine:
+ analyzer_engine = AnalyzerEngine()
+ self.analyzer_engine = analyzer_engine
def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:
"""Analyse method to analyse the given image.
@@ -21,8 +30,8 @@
ocr_result = OCR().perform_ocr(image)
text = OCR().get_text_from_ocr_dict(ocr_result)
- analyzer = AnalyzerEngine()
- analyzer_result = analyzer.analyze(text=text, language="en", **kwargs)
+ analyzer_result = self.analyzer_engine.analyze(
+ text=text, language="en", **kwargs)
bboxes = self.map_analyzer_results_to_bounding_boxes(
analyzer_result, ocr_result, text
)
| {"golden_diff": "diff --git a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n--- a/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n+++ b/presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py\n@@ -9,7 +9,16 @@\n \n \n class ImageAnalyzerEngine:\n- \"\"\"ImageAnalyzerEngine class.\"\"\"\n+ \"\"\"ImageAnalyzerEngine class.\n+\n+ :param analyzer_engine: The Presidio AnalyzerEngine instance\n+ to be used to detect PII in text.\n+ \"\"\"\n+\n+ def __init__(self, analyzer_engine: AnalyzerEngine = None):\n+ if not analyzer_engine:\n+ analyzer_engine = AnalyzerEngine()\n+ self.analyzer_engine = analyzer_engine\n \n def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:\n \"\"\"Analyse method to analyse the given image.\n@@ -21,8 +30,8 @@\n ocr_result = OCR().perform_ocr(image)\n text = OCR().get_text_from_ocr_dict(ocr_result)\n \n- analyzer = AnalyzerEngine()\n- analyzer_result = analyzer.analyze(text=text, language=\"en\", **kwargs)\n+ analyzer_result = self.analyzer_engine.analyze(\n+ text=text, language=\"en\", **kwargs)\n bboxes = self.map_analyzer_results_to_bounding_boxes(\n analyzer_result, ocr_result, text\n )\n", "issue": "Image anonymization is slow\nHi,\r\n\r\nFirst of all, thank you guys for making this awesome project :)\r\n\r\nI wanted to ask if there are any ways to improve image anonymization performance. I'm using Presidio as a python package and it takes about 5~ seconds to process pretty small images. I'm using the example code from the Presidio docs.\n", "before_files": [{"content": "from typing import List\n\nfrom presidio_analyzer import AnalyzerEngine\nfrom presidio_analyzer import RecognizerResult\nfrom presidio_image_redactor.entities.image_recognizer_result import (\n ImageRecognizerResult,\n)\nfrom presidio_image_redactor.ocr import OCR\n\n\nclass ImageAnalyzerEngine:\n \"\"\"ImageAnalyzerEngine class.\"\"\"\n\n def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:\n \"\"\"Analyse method to analyse the given image.\n\n :param image: PIL Image/numpy array or file path(str) to be processed\n\n :return: list of the extract entities with image bounding boxes\n \"\"\"\n ocr_result = OCR().perform_ocr(image)\n text = OCR().get_text_from_ocr_dict(ocr_result)\n\n analyzer = AnalyzerEngine()\n analyzer_result = analyzer.analyze(text=text, language=\"en\", **kwargs)\n bboxes = self.map_analyzer_results_to_bounding_boxes(\n analyzer_result, ocr_result, text\n )\n return bboxes\n\n @staticmethod\n def map_analyzer_results_to_bounding_boxes(\n text_analyzer_results: List[RecognizerResult], ocr_result: dict, text: str\n ) -> List[ImageRecognizerResult]:\n \"\"\"Map extracted PII entities to image bounding boxes.\n\n Matching is based on the position of the recognized entity from analyzer\n and word (in ocr dict) in the text.\n\n :param text_analyzer_results: PII entities recognized by presidio analyzer\n :param ocr_result: dict results with words and bboxes from OCR\n :param text: text the results are based on\n\n return: list of extracted entities with image bounding boxes\n \"\"\"\n if (not ocr_result) or (not text_analyzer_results):\n return []\n\n bboxes = []\n proc_indexes = 0\n indexes = len(text_analyzer_results)\n\n pos = 0\n iter_ocr = enumerate(ocr_result[\"text\"])\n for index, word in iter_ocr:\n if not word:\n pos += 1\n else:\n for element in text_analyzer_results:\n text_element = text[element.start : element.end]\n # check position and text of ocr word matches recognized entity\n if (\n max(pos, element.start) < min(element.end, pos + len(word))\n ) and ((text_element in word) or (word in text_element)):\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n\n # add bounding boxes for all words in ocr dict\n # contained within the text of recognized entity\n # based on relative position in the full text\n while pos + len(word) < element.end:\n index, word = next(iter_ocr)\n if word:\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n pos += len(word) + 1\n proc_indexes += 1\n\n if proc_indexes == indexes:\n break\n pos += len(word) + 1\n\n return bboxes\n", "path": "presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py"}], "after_files": [{"content": "from typing import List\n\nfrom presidio_analyzer import AnalyzerEngine\nfrom presidio_analyzer import RecognizerResult\nfrom presidio_image_redactor.entities.image_recognizer_result import (\n ImageRecognizerResult,\n)\nfrom presidio_image_redactor.ocr import OCR\n\n\nclass ImageAnalyzerEngine:\n \"\"\"ImageAnalyzerEngine class.\n\n :param analyzer_engine: The Presidio AnalyzerEngine instance\n to be used to detect PII in text.\n \"\"\"\n\n def __init__(self, analyzer_engine: AnalyzerEngine = None):\n if not analyzer_engine:\n analyzer_engine = AnalyzerEngine()\n self.analyzer_engine = analyzer_engine\n\n def analyze(self, image: object, **kwargs) -> List[ImageRecognizerResult]:\n \"\"\"Analyse method to analyse the given image.\n\n :param image: PIL Image/numpy array or file path(str) to be processed\n\n :return: list of the extract entities with image bounding boxes\n \"\"\"\n ocr_result = OCR().perform_ocr(image)\n text = OCR().get_text_from_ocr_dict(ocr_result)\n\n analyzer_result = self.analyzer_engine.analyze(\n text=text, language=\"en\", **kwargs)\n bboxes = self.map_analyzer_results_to_bounding_boxes(\n analyzer_result, ocr_result, text\n )\n return bboxes\n\n @staticmethod\n def map_analyzer_results_to_bounding_boxes(\n text_analyzer_results: List[RecognizerResult], ocr_result: dict, text: str\n ) -> List[ImageRecognizerResult]:\n \"\"\"Map extracted PII entities to image bounding boxes.\n\n Matching is based on the position of the recognized entity from analyzer\n and word (in ocr dict) in the text.\n\n :param text_analyzer_results: PII entities recognized by presidio analyzer\n :param ocr_result: dict results with words and bboxes from OCR\n :param text: text the results are based on\n\n return: list of extracted entities with image bounding boxes\n \"\"\"\n if (not ocr_result) or (not text_analyzer_results):\n return []\n\n bboxes = []\n proc_indexes = 0\n indexes = len(text_analyzer_results)\n\n pos = 0\n iter_ocr = enumerate(ocr_result[\"text\"])\n for index, word in iter_ocr:\n if not word:\n pos += 1\n else:\n for element in text_analyzer_results:\n text_element = text[element.start : element.end]\n # check position and text of ocr word matches recognized entity\n if (\n max(pos, element.start) < min(element.end, pos + len(word))\n ) and ((text_element in word) or (word in text_element)):\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n\n # add bounding boxes for all words in ocr dict\n # contained within the text of recognized entity\n # based on relative position in the full text\n while pos + len(word) < element.end:\n index, word = next(iter_ocr)\n if word:\n bboxes.append(\n ImageRecognizerResult(\n element.entity_type,\n element.start,\n element.end,\n element.score,\n ocr_result[\"left\"][index],\n ocr_result[\"top\"][index],\n ocr_result[\"width\"][index],\n ocr_result[\"height\"][index],\n )\n )\n pos += len(word) + 1\n proc_indexes += 1\n\n if proc_indexes == indexes:\n break\n pos += len(word) + 1\n\n return bboxes\n", "path": "presidio-image-redactor/presidio_image_redactor/image_analyzer_engine.py"}]} | 1,316 | 339 |
gh_patches_debug_15494 | rasdani/github-patches | git_diff | EleutherAI__gpt-neox-72 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Implement 1-Bit Adam
Integrate 1-bit Adam into our model. The DeepSpeed tutorial can be found [here](https://www.deepspeed.ai/tutorials/onebit-adam/)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `train_pipeline.py`
Content:
```
1 import random
2 import deepspeed
3 import torch
4 from torch.utils.data import DataLoader
5 from tqdm.auto import trange
6 import torch.distributed as distributed
7
8 from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,
9 prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)
10
11 from gpt_neox.utils import get_args, get_params
12
13 import GPUtil
14
15 # arguments
16 train_args = get_args()
17 params = get_params(train_args.model)
18
19 # tokenizer
20 tokenizer = get_tokenizer(tokenizer_type=params["tokenizer"].get("type", None),
21 from_pretrained=params["tokenizer"].get("from_pretrained", True),
22 add_padding_token=params["tokenizer"].get("add_padding_token", False))
23 vocab_size = len(tokenizer) if params["vocab_size"] is None else params["vocab_size"]
24
25 # model
26 deepspeed.init_distributed(dist_backend='nccl')
27 torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier
28
29 def loss_function(x, y):
30 losses = torch.nn.functional.cross_entropy(x, y, reduction='none')
31 loss = losses.mean()
32 return loss
33
34 model = GPTNeoX_Pipe(
35 num_tokens=params["vocab_size"],
36 dim=params["hidden_dim"],
37 seq_len=params["seq_len"],
38 depth=params["n_layers"],
39 heads=params["n_heads"],
40 dim_head=params["dim_head"],
41 loss_fn = loss_function,#torch.nn.CrossEntropyLoss(),
42 num_stages = params.get("pipeline_num_stages", 2)
43 )
44 model = AutoregressiveWrapper(model)
45
46 # optimizer
47 ds_model_params = prepare_optimizer_parameters(model)
48 optim = torch.optim.Adam(model.parameters(), lr=params["learning_rate"])
49
50 # prepare data
51 dset_params = params["dataset"]
52 assert dset_params is not None
53
54 if is_main(train_args):
55 prepare_data(dset_params["name"])
56 torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier
57 else:
58 torch.distributed.barrier()
59
60 # data loading
61 train_dataset = GPT2Dataset(glob_pattern=dset_params["train_path"],
62 seq_len=params["seq_len"],
63 train=True,
64 **dset_params)
65 train_loader = model_engine.deepspeed_io(train_dataset, pin_memory=params.get("pin_memory", False))
66
67 eval_dataset = GPT2Dataset(glob_pattern=dset_params["eval_path"],
68 seq_len=params["seq_len"],
69 train=False,
70 **dset_params)
71
72 val_loader = DataLoader(eval_dataset, batch_size=params["eval_batch_size"])
73 val_loader = iter(val_loader)
74
75 # deepspeed loader
76 model_engine, optim, train_loader, _ = deepspeed.initialize(args=train_args,
77 model=model,
78 optimizer=optim,
79 model_parameters=ds_model_params,
80 training_data=train_dataset)
81
82
83 batches_to_train = 10000
84
85 pbar = trange(params["num_epochs"], mininterval=10., desc='Training Model', dynamic_ncols=True)
86 for _ in pbar:
87 for i in range(batches_to_train):
88
89 is_main = model_engine.local_rank == 0
90
91 loss = model_engine.train_batch()
92
93 pbar.set_description(f'Training Loss: {loss.item():.4f}')
94 pbar.update()
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/train_pipeline.py b/train_pipeline.py
--- a/train_pipeline.py
+++ b/train_pipeline.py
@@ -1,16 +1,21 @@
+import argparse
+import json
import random
+from collections import defaultdict
+import os
import deepspeed
import torch
from torch.utils.data import DataLoader
from tqdm.auto import trange
-import torch.distributed as distributed
-from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,
- prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)
+from gpt_neox import (GPTNeoX, AutoregressiveWrapper, TextSamplerDataset,
+ cycle, prepare_optimizer_parameters, decode_tokens, prepare_data,
+ GPTNeoX_Pipe)
+from gpt_neox.datasets import GPT2Dataset
+from gpt_neox.utils import is_main
+import gpt_neox
-from gpt_neox.utils import get_args, get_params
-
-import GPUtil
+WORLD_SIZE = os.getenv('WORLD_SIZE')
# arguments
train_args = get_args()
| {"golden_diff": "diff --git a/train_pipeline.py b/train_pipeline.py\n--- a/train_pipeline.py\n+++ b/train_pipeline.py\n@@ -1,16 +1,21 @@\n+import argparse\n+import json\n import random\n+from collections import defaultdict\n+import os\n import deepspeed\n import torch\n from torch.utils.data import DataLoader\n from tqdm.auto import trange\n-import torch.distributed as distributed\n \n-from gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,\n- prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)\n+from gpt_neox import (GPTNeoX, AutoregressiveWrapper, TextSamplerDataset,\n+ cycle, prepare_optimizer_parameters, decode_tokens, prepare_data,\n+ GPTNeoX_Pipe)\n+from gpt_neox.datasets import GPT2Dataset\n+from gpt_neox.utils import is_main\n+import gpt_neox\n \n-from gpt_neox.utils import get_args, get_params\n-\n-import GPUtil\n+WORLD_SIZE = os.getenv('WORLD_SIZE')\n \n # arguments\n train_args = get_args()\n", "issue": "Implement 1-Bit Adam\nIntegrate 1-bit Adam into our model. The DeepSpeed tutorial can be found [here](https://www.deepspeed.ai/tutorials/onebit-adam/)\n", "before_files": [{"content": "import random\nimport deepspeed\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import trange\nimport torch.distributed as distributed\n\nfrom gpt_neox import (GPTNeoX_Pipe, AutoregressiveWrapper, GPT2Dataset, extract_tarfile,\n prepare_optimizer_parameters, get_tokenizer, is_main, prepare_data)\n\nfrom gpt_neox.utils import get_args, get_params\n\nimport GPUtil\n\n# arguments\ntrain_args = get_args()\nparams = get_params(train_args.model)\n\n# tokenizer\ntokenizer = get_tokenizer(tokenizer_type=params[\"tokenizer\"].get(\"type\", None),\n from_pretrained=params[\"tokenizer\"].get(\"from_pretrained\", True),\n add_padding_token=params[\"tokenizer\"].get(\"add_padding_token\", False))\nvocab_size = len(tokenizer) if params[\"vocab_size\"] is None else params[\"vocab_size\"]\n\n# model\ndeepspeed.init_distributed(dist_backend='nccl')\ntorch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier\n\ndef loss_function(x, y):\n losses = torch.nn.functional.cross_entropy(x, y, reduction='none')\n loss = losses.mean()\n return loss\n \nmodel = GPTNeoX_Pipe(\n num_tokens=params[\"vocab_size\"],\n dim=params[\"hidden_dim\"],\n seq_len=params[\"seq_len\"],\n depth=params[\"n_layers\"],\n heads=params[\"n_heads\"],\n dim_head=params[\"dim_head\"],\n loss_fn = loss_function,#torch.nn.CrossEntropyLoss(),\n num_stages = params.get(\"pipeline_num_stages\", 2)\n)\nmodel = AutoregressiveWrapper(model)\n\n# optimizer\nds_model_params = prepare_optimizer_parameters(model)\noptim = torch.optim.Adam(model.parameters(), lr=params[\"learning_rate\"])\n\n# prepare data\ndset_params = params[\"dataset\"]\nassert dset_params is not None\n\nif is_main(train_args):\n prepare_data(dset_params[\"name\"])\n torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier\nelse:\n torch.distributed.barrier()\n \n# data loading\ntrain_dataset = GPT2Dataset(glob_pattern=dset_params[\"train_path\"],\n seq_len=params[\"seq_len\"],\n train=True,\n **dset_params)\ntrain_loader = model_engine.deepspeed_io(train_dataset, pin_memory=params.get(\"pin_memory\", False))\n\neval_dataset = GPT2Dataset(glob_pattern=dset_params[\"eval_path\"],\n seq_len=params[\"seq_len\"],\n train=False,\n **dset_params)\n\nval_loader = DataLoader(eval_dataset, batch_size=params[\"eval_batch_size\"])\nval_loader = iter(val_loader)\n\n# deepspeed loader\nmodel_engine, optim, train_loader, _ = deepspeed.initialize(args=train_args,\n model=model,\n optimizer=optim,\n model_parameters=ds_model_params,\n training_data=train_dataset)\n\n\nbatches_to_train = 10000\n\npbar = trange(params[\"num_epochs\"], mininterval=10., desc='Training Model', dynamic_ncols=True)\nfor _ in pbar:\n for i in range(batches_to_train):\n\n is_main = model_engine.local_rank == 0\n\n loss = model_engine.train_batch()\n\n pbar.set_description(f'Training Loss: {loss.item():.4f}')\n pbar.update()\n", "path": "train_pipeline.py"}], "after_files": [{"content": "import argparse\nimport json\nimport random\nfrom collections import defaultdict\nimport os\nimport deepspeed\nimport torch\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import trange\n\nfrom gpt_neox import (GPTNeoX, AutoregressiveWrapper, TextSamplerDataset,\n cycle, prepare_optimizer_parameters, decode_tokens, prepare_data,\n GPTNeoX_Pipe)\nfrom gpt_neox.datasets import GPT2Dataset\nfrom gpt_neox.utils import is_main\nimport gpt_neox\n\nWORLD_SIZE = os.getenv('WORLD_SIZE')\n\n# arguments\ntrain_args = get_args()\nparams = get_params(train_args.model)\n\n# tokenizer\ntokenizer = get_tokenizer(tokenizer_type=params[\"tokenizer\"].get(\"type\", None),\n from_pretrained=params[\"tokenizer\"].get(\"from_pretrained\", True),\n add_padding_token=params[\"tokenizer\"].get(\"add_padding_token\", False))\nvocab_size = len(tokenizer) if params[\"vocab_size\"] is None else params[\"vocab_size\"]\n\n# model\ndeepspeed.init_distributed(dist_backend='nccl')\ntorch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier\n\ndef loss_function(x, y):\n losses = torch.nn.functional.cross_entropy(x, y, reduction='none')\n loss = losses.mean()\n return loss\n \nmodel = GPTNeoX_Pipe(\n num_tokens=params[\"vocab_size\"],\n dim=params[\"hidden_dim\"],\n seq_len=params[\"seq_len\"],\n depth=params[\"n_layers\"],\n heads=params[\"n_heads\"],\n dim_head=params[\"dim_head\"],\n loss_fn = loss_function,#torch.nn.CrossEntropyLoss(),\n num_stages = params.get(\"pipeline_num_stages\", 2)\n)\nmodel = AutoregressiveWrapper(model)\n\n# optimizer\nds_model_params = prepare_optimizer_parameters(model)\noptim = torch.optim.Adam(model.parameters(), lr=params[\"learning_rate\"])\n\n# prepare data\ndset_params = params[\"dataset\"]\nassert dset_params is not None\n\nif is_main(train_args):\n prepare_data(dset_params[\"name\"])\n torch.distributed.barrier() # barrier will force processes to stop until *all* processes have reached the barrier\nelse:\n torch.distributed.barrier()\n \n# data loading\ntrain_dataset = GPT2Dataset(glob_pattern=dset_params[\"train_path\"],\n seq_len=params[\"seq_len\"],\n train=True,\n **dset_params)\ntrain_loader = model_engine.deepspeed_io(train_dataset, pin_memory=params.get(\"pin_memory\", False))\n\neval_dataset = GPT2Dataset(glob_pattern=dset_params[\"eval_path\"],\n seq_len=params[\"seq_len\"],\n train=False,\n **dset_params)\n\nval_loader = DataLoader(eval_dataset, batch_size=params[\"eval_batch_size\"])\nval_loader = iter(val_loader)\n\n# deepspeed loader\nmodel_engine, optim, train_loader, _ = deepspeed.initialize(args=train_args,\n model=model,\n optimizer=optim,\n model_parameters=ds_model_params,\n training_data=train_dataset)\n\n\nbatches_to_train = 10000\n\npbar = trange(params[\"num_epochs\"], mininterval=10., desc='Training Model', dynamic_ncols=True)\nfor _ in pbar:\n for i in range(batches_to_train):\n\n is_main = model_engine.local_rank == 0\n\n loss = model_engine.train_batch()\n\n pbar.set_description(f'Training Loss: {loss.item():.4f}')\n pbar.update()\n", "path": "train_pipeline.py"}]} | 1,202 | 244 |
gh_patches_debug_33275 | rasdani/github-patches | git_diff | liberapay__liberapay.com-82 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Switch Jinja templates to line statements
Because they're easier to type (especially on an azerty keyboard): `% if ...` instead of `{% if ... %}`. [Documentation](http://jinja.pocoo.org/docs/dev/templates/#line-statements).
Any objections?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `liberapay/utils/emails.py`
Content:
```
1 from __future__ import unicode_literals
2
3 from aspen.simplates.pagination import parse_specline, split_and_escape
4 from aspen_jinja2_renderer import SimplateLoader
5 from jinja2 import Environment
6
7
8 ( VERIFICATION_MISSING
9 , VERIFICATION_FAILED
10 , VERIFICATION_EXPIRED
11 , VERIFICATION_REDUNDANT
12 , VERIFICATION_STYMIED
13 , VERIFICATION_SUCCEEDED
14 ) = range(6)
15
16
17 jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)
18 jinja_env_html = Environment(
19 trim_blocks=True, lstrip_blocks=True,
20 autoescape=True, extensions=['jinja2.ext.autoescape'],
21 )
22
23 def compile_email_spt(fpath):
24 r = {}
25 with open(fpath) as f:
26 pages = list(split_and_escape(f.read()))
27 for i, page in enumerate(pages, 1):
28 tmpl = b'\n' * page.offset + page.content
29 content_type, renderer = parse_specline(page.header)
30 key = 'subject' if i == 1 else content_type
31 env = jinja_env_html if content_type == 'text/html' else jinja_env
32 r[key] = SimplateLoader(fpath, tmpl).load(env, fpath)
33 return r
34
```
Path: `liberapay/constants.py`
Content:
```
1 from __future__ import print_function, unicode_literals
2
3 from collections import OrderedDict
4 from datetime import date, timedelta
5 from decimal import Decimal
6 import re
7
8
9 ASCII_ALLOWED_IN_USERNAME = set("0123456789"
10 "abcdefghijklmnopqrstuvwxyz"
11 "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
12 "-_")
13
14
15 BIRTHDAY = date(2015, 5, 22)
16
17 EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
18 EMAIL_RE = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
19
20 MAX_TIP = Decimal('100.00')
21 MIN_TIP = Decimal('0.01')
22
23 QUARANTINE = timedelta(weeks=4)
24
25 PASSWORD_MIN_SIZE = 8
26 PASSWORD_MAX_SIZE = 150
27
28 _ = lambda a: a
29 PRIVACY_FIELDS = OrderedDict([
30 ('hide_giving', _("Hide total giving from others.")),
31 ('hide_receiving', _("Hide total receiving from others.")),
32 ('hide_from_search', _("Hide myself from search results.")),
33 ])
34 del _
35 PRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())
36
37 SESSION = b'session'
38 SESSION_REFRESH = timedelta(hours=1)
39 SESSION_TIMEOUT = timedelta(hours=6)
40
41 USERNAME_MAX_SIZE = 32
42
```
Path: `liberapay/renderers/jinja2_htmlescaped.py`
Content:
```
1 import aspen_jinja2_renderer as base
2
3 from markupsafe import escape as htmlescape
4
5
6 class HTMLRenderer(base.Renderer):
7 def render_content(self, context):
8
9 # Extend to inject an HTML-escaping function. Since autoescape is on,
10 # template authors shouldn't normally need to use this function, but
11 # having it in the simplate context makes it easier to implement i18n.
12
13 context['escape'] = context['state']['escape'] = htmlescape
14
15 return base.Renderer.render_content(self, context)
16
17
18 class Factory(base.Factory):
19
20 Renderer = HTMLRenderer
21
22 def compile_meta(self, configuration):
23 # Override to turn on autoescaping.
24 loader = base.FileSystemLoader(configuration.project_root)
25 return base.Environment( loader=loader
26 , autoescape=True
27 , extensions=['jinja2.ext.autoescape']
28 )
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/liberapay/constants.py b/liberapay/constants.py
--- a/liberapay/constants.py
+++ b/liberapay/constants.py
@@ -17,6 +17,11 @@
EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)
EMAIL_RE = re.compile(r'^[^@]+@[^@]+\.[^@]+$')
+JINJA_ENV_COMMON = dict(
+ trim_blocks=True, lstrip_blocks=True,
+ line_statement_prefix='%', line_comment_prefix='#',
+)
+
MAX_TIP = Decimal('100.00')
MIN_TIP = Decimal('0.01')
diff --git a/liberapay/renderers/jinja2_htmlescaped.py b/liberapay/renderers/jinja2_htmlescaped.py
--- a/liberapay/renderers/jinja2_htmlescaped.py
+++ b/liberapay/renderers/jinja2_htmlescaped.py
@@ -2,6 +2,8 @@
from markupsafe import escape as htmlescape
+from liberapay.constants import JINJA_ENV_COMMON
+
class HTMLRenderer(base.Renderer):
def render_content(self, context):
@@ -22,7 +24,8 @@
def compile_meta(self, configuration):
# Override to turn on autoescaping.
loader = base.FileSystemLoader(configuration.project_root)
- return base.Environment( loader=loader
- , autoescape=True
- , extensions=['jinja2.ext.autoescape']
- )
+ return base.Environment(
+ loader=loader,
+ autoescape=True, extensions=['jinja2.ext.autoescape'],
+ **JINJA_ENV_COMMON
+ )
diff --git a/liberapay/utils/emails.py b/liberapay/utils/emails.py
--- a/liberapay/utils/emails.py
+++ b/liberapay/utils/emails.py
@@ -4,6 +4,8 @@
from aspen_jinja2_renderer import SimplateLoader
from jinja2 import Environment
+from liberapay.constants import JINJA_ENV_COMMON
+
( VERIFICATION_MISSING
, VERIFICATION_FAILED
@@ -14,10 +16,10 @@
) = range(6)
-jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)
+jinja_env = Environment(**JINJA_ENV_COMMON)
jinja_env_html = Environment(
- trim_blocks=True, lstrip_blocks=True,
autoescape=True, extensions=['jinja2.ext.autoescape'],
+ **JINJA_ENV_COMMON
)
def compile_email_spt(fpath):
| {"golden_diff": "diff --git a/liberapay/constants.py b/liberapay/constants.py\n--- a/liberapay/constants.py\n+++ b/liberapay/constants.py\n@@ -17,6 +17,11 @@\n EMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\n EMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n \n+JINJA_ENV_COMMON = dict(\n+ trim_blocks=True, lstrip_blocks=True,\n+ line_statement_prefix='%', line_comment_prefix='#',\n+)\n+\n MAX_TIP = Decimal('100.00')\n MIN_TIP = Decimal('0.01')\n \ndiff --git a/liberapay/renderers/jinja2_htmlescaped.py b/liberapay/renderers/jinja2_htmlescaped.py\n--- a/liberapay/renderers/jinja2_htmlescaped.py\n+++ b/liberapay/renderers/jinja2_htmlescaped.py\n@@ -2,6 +2,8 @@\n \n from markupsafe import escape as htmlescape\n \n+from liberapay.constants import JINJA_ENV_COMMON\n+\n \n class HTMLRenderer(base.Renderer):\n def render_content(self, context):\n@@ -22,7 +24,8 @@\n def compile_meta(self, configuration):\n # Override to turn on autoescaping.\n loader = base.FileSystemLoader(configuration.project_root)\n- return base.Environment( loader=loader\n- , autoescape=True\n- , extensions=['jinja2.ext.autoescape']\n- )\n+ return base.Environment(\n+ loader=loader,\n+ autoescape=True, extensions=['jinja2.ext.autoescape'],\n+ **JINJA_ENV_COMMON\n+ )\ndiff --git a/liberapay/utils/emails.py b/liberapay/utils/emails.py\n--- a/liberapay/utils/emails.py\n+++ b/liberapay/utils/emails.py\n@@ -4,6 +4,8 @@\n from aspen_jinja2_renderer import SimplateLoader\n from jinja2 import Environment\n \n+from liberapay.constants import JINJA_ENV_COMMON\n+\n \n ( VERIFICATION_MISSING\n , VERIFICATION_FAILED\n@@ -14,10 +16,10 @@\n ) = range(6)\n \n \n-jinja_env = Environment(trim_blocks=True, lstrip_blocks=True)\n+jinja_env = Environment(**JINJA_ENV_COMMON)\n jinja_env_html = Environment(\n- trim_blocks=True, lstrip_blocks=True,\n autoescape=True, extensions=['jinja2.ext.autoescape'],\n+ **JINJA_ENV_COMMON\n )\n \n def compile_email_spt(fpath):\n", "issue": "Switch Jinja templates to line statements\nBecause they're easier to type (especially on an azerty keyboard): `% if ...` instead of `{% if ... %}`. [Documentation](http://jinja.pocoo.org/docs/dev/templates/#line-statements).\n\nAny objections?\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nfrom aspen.simplates.pagination import parse_specline, split_and_escape\nfrom aspen_jinja2_renderer import SimplateLoader\nfrom jinja2 import Environment\n\n\n( VERIFICATION_MISSING\n, VERIFICATION_FAILED\n, VERIFICATION_EXPIRED\n, VERIFICATION_REDUNDANT\n, VERIFICATION_STYMIED\n, VERIFICATION_SUCCEEDED\n ) = range(6)\n\n\njinja_env = Environment(trim_blocks=True, lstrip_blocks=True)\njinja_env_html = Environment(\n trim_blocks=True, lstrip_blocks=True,\n autoescape=True, extensions=['jinja2.ext.autoescape'],\n)\n\ndef compile_email_spt(fpath):\n r = {}\n with open(fpath) as f:\n pages = list(split_and_escape(f.read()))\n for i, page in enumerate(pages, 1):\n tmpl = b'\\n' * page.offset + page.content\n content_type, renderer = parse_specline(page.header)\n key = 'subject' if i == 1 else content_type\n env = jinja_env_html if content_type == 'text/html' else jinja_env\n r[key] = SimplateLoader(fpath, tmpl).load(env, fpath)\n return r\n", "path": "liberapay/utils/emails.py"}, {"content": "from __future__ import print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom datetime import date, timedelta\nfrom decimal import Decimal\nimport re\n\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_\")\n\n\nBIRTHDAY = date(2015, 5, 22)\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n\nMAX_TIP = Decimal('100.00')\nMIN_TIP = Decimal('0.01')\n\nQUARANTINE = timedelta(weeks=4)\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\n_ = lambda a: a\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', _(\"Hide total giving from others.\")),\n ('hide_receiving', _(\"Hide total receiving from others.\")),\n ('hide_from_search', _(\"Hide myself from search results.\")),\n])\ndel _\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nSESSION = b'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\nUSERNAME_MAX_SIZE = 32\n", "path": "liberapay/constants.py"}, {"content": "import aspen_jinja2_renderer as base\n\nfrom markupsafe import escape as htmlescape\n\n\nclass HTMLRenderer(base.Renderer):\n def render_content(self, context):\n\n # Extend to inject an HTML-escaping function. Since autoescape is on,\n # template authors shouldn't normally need to use this function, but\n # having it in the simplate context makes it easier to implement i18n.\n\n context['escape'] = context['state']['escape'] = htmlescape\n\n return base.Renderer.render_content(self, context)\n\n\nclass Factory(base.Factory):\n\n Renderer = HTMLRenderer\n\n def compile_meta(self, configuration):\n # Override to turn on autoescaping.\n loader = base.FileSystemLoader(configuration.project_root)\n return base.Environment( loader=loader\n , autoescape=True\n , extensions=['jinja2.ext.autoescape']\n )\n", "path": "liberapay/renderers/jinja2_htmlescaped.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nfrom aspen.simplates.pagination import parse_specline, split_and_escape\nfrom aspen_jinja2_renderer import SimplateLoader\nfrom jinja2 import Environment\n\nfrom liberapay.constants import JINJA_ENV_COMMON\n\n\n( VERIFICATION_MISSING\n, VERIFICATION_FAILED\n, VERIFICATION_EXPIRED\n, VERIFICATION_REDUNDANT\n, VERIFICATION_STYMIED\n, VERIFICATION_SUCCEEDED\n ) = range(6)\n\n\njinja_env = Environment(**JINJA_ENV_COMMON)\njinja_env_html = Environment(\n autoescape=True, extensions=['jinja2.ext.autoescape'],\n **JINJA_ENV_COMMON\n)\n\ndef compile_email_spt(fpath):\n r = {}\n with open(fpath) as f:\n pages = list(split_and_escape(f.read()))\n for i, page in enumerate(pages, 1):\n tmpl = b'\\n' * page.offset + page.content\n content_type, renderer = parse_specline(page.header)\n key = 'subject' if i == 1 else content_type\n env = jinja_env_html if content_type == 'text/html' else jinja_env\n r[key] = SimplateLoader(fpath, tmpl).load(env, fpath)\n return r\n", "path": "liberapay/utils/emails.py"}, {"content": "from __future__ import print_function, unicode_literals\n\nfrom collections import OrderedDict\nfrom datetime import date, timedelta\nfrom decimal import Decimal\nimport re\n\n\nASCII_ALLOWED_IN_USERNAME = set(\"0123456789\"\n \"abcdefghijklmnopqrstuvwxyz\"\n \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n \"-_\")\n\n\nBIRTHDAY = date(2015, 5, 22)\n\nEMAIL_VERIFICATION_TIMEOUT = timedelta(hours=24)\nEMAIL_RE = re.compile(r'^[^@]+@[^@]+\\.[^@]+$')\n\nJINJA_ENV_COMMON = dict(\n trim_blocks=True, lstrip_blocks=True,\n line_statement_prefix='%', line_comment_prefix='#',\n)\n\nMAX_TIP = Decimal('100.00')\nMIN_TIP = Decimal('0.01')\n\nQUARANTINE = timedelta(weeks=4)\n\nPASSWORD_MIN_SIZE = 8\nPASSWORD_MAX_SIZE = 150\n\n_ = lambda a: a\nPRIVACY_FIELDS = OrderedDict([\n ('hide_giving', _(\"Hide total giving from others.\")),\n ('hide_receiving', _(\"Hide total receiving from others.\")),\n ('hide_from_search', _(\"Hide myself from search results.\")),\n])\ndel _\nPRIVACY_FIELDS_S = ' '.join(PRIVACY_FIELDS.keys())\n\nSESSION = b'session'\nSESSION_REFRESH = timedelta(hours=1)\nSESSION_TIMEOUT = timedelta(hours=6)\n\nUSERNAME_MAX_SIZE = 32\n", "path": "liberapay/constants.py"}, {"content": "import aspen_jinja2_renderer as base\n\nfrom markupsafe import escape as htmlescape\n\nfrom liberapay.constants import JINJA_ENV_COMMON\n\n\nclass HTMLRenderer(base.Renderer):\n def render_content(self, context):\n\n # Extend to inject an HTML-escaping function. Since autoescape is on,\n # template authors shouldn't normally need to use this function, but\n # having it in the simplate context makes it easier to implement i18n.\n\n context['escape'] = context['state']['escape'] = htmlescape\n\n return base.Renderer.render_content(self, context)\n\n\nclass Factory(base.Factory):\n\n Renderer = HTMLRenderer\n\n def compile_meta(self, configuration):\n # Override to turn on autoescaping.\n loader = base.FileSystemLoader(configuration.project_root)\n return base.Environment(\n loader=loader,\n autoescape=True, extensions=['jinja2.ext.autoescape'],\n **JINJA_ENV_COMMON\n )\n", "path": "liberapay/renderers/jinja2_htmlescaped.py"}]} | 1,278 | 568 |
gh_patches_debug_35591 | rasdani/github-patches | git_diff | azavea__raster-vision-692 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Avoid downloading all rasters
If you have many large scenes, the rasters will all be downloaded at once and you will run out of disk space. This is because the download occurs in the constructor of `RasterSources`. Instead, dwe should download rasters https://github.com/azavea/raster-vision/blob/develop/rastervision/data/raster_source/rasterio_source.py#L79 in the _activate method https://github.com/azavea/raster-vision/blob/develop/rastervision/data/raster_source/rasterio_source.py#L100 and delete them in the _deactivate method.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rastervision/command/command.py`
Content:
```
1 from abc import ABC, abstractmethod
2
3 from rastervision.rv_config import RVConfig
4
5
6 class Command(ABC):
7 @abstractmethod
8 def run(self, tmp_dir):
9 """Run the command."""
10 pass
11
12 def set_tmp_dir(self, tmp_dir):
13 self._tmp_dir = tmp_dir
14
15 def get_tmp_dir(self):
16 if hasattr(self, '_tmp_dir') and self._tmp_dir:
17 if isinstance(self._tmp_dir, str):
18 return self._tmp_dir
19 else:
20 return self._tmp_dir.name
21 else:
22 return RVConfig.get_tmp_dir().name
23
24
25 class NoOpCommand(Command):
26 """Defines a command that does nothing.
27 """
28
29 def run(self, tmp_dir):
30 pass
31
```
Path: `rastervision/data/raster_source/rasterio_source.py`
Content:
```
1 from abc import abstractmethod
2
3 import numpy as np
4 import rasterio
5 from rasterio.enums import (ColorInterp, MaskFlags)
6
7 from rastervision.data import (ActivateMixin, ActivationError)
8 from rastervision.data.raster_source import RasterSource
9 from rastervision.core.box import Box
10
11
12 def load_window(image_dataset, window=None, channels=None, is_masked=False):
13 """Load a window of an image from a TIFF file.
14
15 Args:
16 window: ((row_start, row_stop), (col_start, col_stop)) or
17 ((y_min, y_max), (x_min, x_max))
18 channels: An optional list of bands to read.
19 is_masked: If True, read a masked array from rasterio
20 """
21 if is_masked:
22 im = image_dataset.read(window=window, boundless=True, masked=True)
23 im = np.ma.filled(im, fill_value=0)
24 else:
25 im = image_dataset.read(window=window, boundless=True)
26
27 # Handle non-zero NODATA values by setting the data to 0.
28 for channel, nodata in enumerate(image_dataset.nodatavals):
29 if nodata is not None and nodata != 0:
30 im[channel, im[channel] == nodata] = 0
31
32 if channels:
33 im = im[channels, :]
34 im = np.transpose(im, axes=[1, 2, 0])
35 return im
36
37
38 class RasterioRasterSource(ActivateMixin, RasterSource):
39 def __init__(self, raster_transformers, temp_dir, channel_order=None):
40 self.temp_dir = temp_dir
41 self.imagery_path = self._download_data(temp_dir)
42
43 num_channels = None
44
45 # Activate in order to get information out of the raster
46 with self.activate():
47 colorinterp = self.image_dataset.colorinterp
48 self.channels = [
49 i for i, color_interp in enumerate(colorinterp)
50 if color_interp != ColorInterp.alpha
51 ]
52
53 mask_flags = self.image_dataset.mask_flag_enums
54 self.is_masked = any(
55 [m for m in mask_flags if m != MaskFlags.all_valid])
56
57 self.height = self.image_dataset.height
58 self.width = self.image_dataset.width
59 # Get 1x1 chip (after applying raster transformers) to test dtype
60 # and channel order if needed
61 test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))
62
63 raw_channels = list(range(0, test_chip.shape[2]))
64 self.channel_order = channel_order or raw_channels
65 num_channels = len(raw_channels)
66
67 # Transform the chip to get the final dtype
68 test_chip = test_chip[:, :, self.channel_order]
69 for transformer in raster_transformers:
70 test_chip = transformer.transform(test_chip, channel_order)
71
72 self.dtype = test_chip.dtype
73
74 self._set_crs_transformer()
75
76 super().__init__(channel_order, num_channels, raster_transformers)
77
78 @abstractmethod
79 def _download_data(self, tmp_dir):
80 """Download any data needed for this Raster Source.
81 Return a single local path representing the image or a VRT of the data."""
82 pass
83
84 def get_crs_transformer(self):
85 return self.crs_transformer
86
87 def get_extent(self):
88 return Box(0, 0, self.height, self.width)
89
90 def get_dtype(self):
91 """Return the numpy.dtype of this scene"""
92 return self.dtype
93
94 def _get_chip(self, window):
95 if self.image_dataset is None:
96 raise ActivationError('RasterSource must be activated before use')
97 return load_window(self.image_dataset, window.rasterio_format(),
98 self.channels)
99
100 def _activate(self):
101 self.image_dataset = rasterio.open(self.imagery_path)
102
103 def _deactivate(self):
104 self.image_dataset.close()
105 self.image_dataset = None
106
```
Path: `rastervision/data/raster_source/image_source.py`
Content:
```
1 from rastervision.data.raster_source.rasterio_source import (
2 RasterioRasterSource)
3 from rastervision.data.crs_transformer.identity_crs_transformer import (
4 IdentityCRSTransformer)
5 from rastervision.utils.files import download_if_needed
6
7
8 class ImageSource(RasterioRasterSource):
9 def __init__(self, uri, raster_transformers, temp_dir, channel_order=None):
10 self.uri = uri
11 super().__init__(raster_transformers, temp_dir, channel_order)
12
13 def _download_data(self, temp_dir):
14 return download_if_needed(self.uri, self.temp_dir)
15
16 def _set_crs_transformer(self):
17 self.crs_transformer = IdentityCRSTransformer()
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rastervision/command/command.py b/rastervision/command/command.py
--- a/rastervision/command/command.py
+++ b/rastervision/command/command.py
@@ -19,7 +19,9 @@
else:
return self._tmp_dir.name
else:
- return RVConfig.get_tmp_dir().name
+ tmp_dir = RVConfig.get_tmp_dir()
+ self.set_tmp_dir(tmp_dir)
+ return tmp_dir.name
class NoOpCommand(Command):
diff --git a/rastervision/data/raster_source/image_source.py b/rastervision/data/raster_source/image_source.py
--- a/rastervision/data/raster_source/image_source.py
+++ b/rastervision/data/raster_source/image_source.py
@@ -11,7 +11,7 @@
super().__init__(raster_transformers, temp_dir, channel_order)
def _download_data(self, temp_dir):
- return download_if_needed(self.uri, self.temp_dir)
+ return download_if_needed(self.uri, temp_dir)
def _set_crs_transformer(self):
self.crs_transformer = IdentityCRSTransformer()
diff --git a/rastervision/data/raster_source/rasterio_source.py b/rastervision/data/raster_source/rasterio_source.py
--- a/rastervision/data/raster_source/rasterio_source.py
+++ b/rastervision/data/raster_source/rasterio_source.py
@@ -1,4 +1,5 @@
from abc import abstractmethod
+import tempfile
import numpy as np
import rasterio
@@ -38,8 +39,8 @@
class RasterioRasterSource(ActivateMixin, RasterSource):
def __init__(self, raster_transformers, temp_dir, channel_order=None):
self.temp_dir = temp_dir
- self.imagery_path = self._download_data(temp_dir)
-
+ self.image_temp_dir = None
+ self.image_dataset = None
num_channels = None
# Activate in order to get information out of the raster
@@ -98,8 +99,14 @@
self.channels)
def _activate(self):
+ # Download images to temporary directory and delete it when done.
+ self.image_temp_dir = tempfile.TemporaryDirectory(dir=self.temp_dir)
+ self.imagery_path = self._download_data(self.image_temp_dir.name)
self.image_dataset = rasterio.open(self.imagery_path)
+ self._set_crs_transformer()
def _deactivate(self):
self.image_dataset.close()
self.image_dataset = None
+ self.image_temp_dir.cleanup()
+ self.image_temp_dir = None
| {"golden_diff": "diff --git a/rastervision/command/command.py b/rastervision/command/command.py\n--- a/rastervision/command/command.py\n+++ b/rastervision/command/command.py\n@@ -19,7 +19,9 @@\n else:\n return self._tmp_dir.name\n else:\n- return RVConfig.get_tmp_dir().name\n+ tmp_dir = RVConfig.get_tmp_dir()\n+ self.set_tmp_dir(tmp_dir)\n+ return tmp_dir.name\n \n \n class NoOpCommand(Command):\ndiff --git a/rastervision/data/raster_source/image_source.py b/rastervision/data/raster_source/image_source.py\n--- a/rastervision/data/raster_source/image_source.py\n+++ b/rastervision/data/raster_source/image_source.py\n@@ -11,7 +11,7 @@\n super().__init__(raster_transformers, temp_dir, channel_order)\n \n def _download_data(self, temp_dir):\n- return download_if_needed(self.uri, self.temp_dir)\n+ return download_if_needed(self.uri, temp_dir)\n \n def _set_crs_transformer(self):\n self.crs_transformer = IdentityCRSTransformer()\ndiff --git a/rastervision/data/raster_source/rasterio_source.py b/rastervision/data/raster_source/rasterio_source.py\n--- a/rastervision/data/raster_source/rasterio_source.py\n+++ b/rastervision/data/raster_source/rasterio_source.py\n@@ -1,4 +1,5 @@\n from abc import abstractmethod\n+import tempfile\n \n import numpy as np\n import rasterio\n@@ -38,8 +39,8 @@\n class RasterioRasterSource(ActivateMixin, RasterSource):\n def __init__(self, raster_transformers, temp_dir, channel_order=None):\n self.temp_dir = temp_dir\n- self.imagery_path = self._download_data(temp_dir)\n-\n+ self.image_temp_dir = None\n+ self.image_dataset = None\n num_channels = None\n \n # Activate in order to get information out of the raster\n@@ -98,8 +99,14 @@\n self.channels)\n \n def _activate(self):\n+ # Download images to temporary directory and delete it when done.\n+ self.image_temp_dir = tempfile.TemporaryDirectory(dir=self.temp_dir)\n+ self.imagery_path = self._download_data(self.image_temp_dir.name)\n self.image_dataset = rasterio.open(self.imagery_path)\n+ self._set_crs_transformer()\n \n def _deactivate(self):\n self.image_dataset.close()\n self.image_dataset = None\n+ self.image_temp_dir.cleanup()\n+ self.image_temp_dir = None\n", "issue": "Avoid downloading all rasters\nIf you have many large scenes, the rasters will all be downloaded at once and you will run out of disk space. This is because the download occurs in the constructor of `RasterSources`. Instead, dwe should download rasters https://github.com/azavea/raster-vision/blob/develop/rastervision/data/raster_source/rasterio_source.py#L79 in the _activate method https://github.com/azavea/raster-vision/blob/develop/rastervision/data/raster_source/rasterio_source.py#L100 and delete them in the _deactivate method. \n", "before_files": [{"content": "from abc import ABC, abstractmethod\n\nfrom rastervision.rv_config import RVConfig\n\n\nclass Command(ABC):\n @abstractmethod\n def run(self, tmp_dir):\n \"\"\"Run the command.\"\"\"\n pass\n\n def set_tmp_dir(self, tmp_dir):\n self._tmp_dir = tmp_dir\n\n def get_tmp_dir(self):\n if hasattr(self, '_tmp_dir') and self._tmp_dir:\n if isinstance(self._tmp_dir, str):\n return self._tmp_dir\n else:\n return self._tmp_dir.name\n else:\n return RVConfig.get_tmp_dir().name\n\n\nclass NoOpCommand(Command):\n \"\"\"Defines a command that does nothing.\n \"\"\"\n\n def run(self, tmp_dir):\n pass\n", "path": "rastervision/command/command.py"}, {"content": "from abc import abstractmethod\n\nimport numpy as np\nimport rasterio\nfrom rasterio.enums import (ColorInterp, MaskFlags)\n\nfrom rastervision.data import (ActivateMixin, ActivationError)\nfrom rastervision.data.raster_source import RasterSource\nfrom rastervision.core.box import Box\n\n\ndef load_window(image_dataset, window=None, channels=None, is_masked=False):\n \"\"\"Load a window of an image from a TIFF file.\n\n Args:\n window: ((row_start, row_stop), (col_start, col_stop)) or\n ((y_min, y_max), (x_min, x_max))\n channels: An optional list of bands to read.\n is_masked: If True, read a masked array from rasterio\n \"\"\"\n if is_masked:\n im = image_dataset.read(window=window, boundless=True, masked=True)\n im = np.ma.filled(im, fill_value=0)\n else:\n im = image_dataset.read(window=window, boundless=True)\n\n # Handle non-zero NODATA values by setting the data to 0.\n for channel, nodata in enumerate(image_dataset.nodatavals):\n if nodata is not None and nodata != 0:\n im[channel, im[channel] == nodata] = 0\n\n if channels:\n im = im[channels, :]\n im = np.transpose(im, axes=[1, 2, 0])\n return im\n\n\nclass RasterioRasterSource(ActivateMixin, RasterSource):\n def __init__(self, raster_transformers, temp_dir, channel_order=None):\n self.temp_dir = temp_dir\n self.imagery_path = self._download_data(temp_dir)\n\n num_channels = None\n\n # Activate in order to get information out of the raster\n with self.activate():\n colorinterp = self.image_dataset.colorinterp\n self.channels = [\n i for i, color_interp in enumerate(colorinterp)\n if color_interp != ColorInterp.alpha\n ]\n\n mask_flags = self.image_dataset.mask_flag_enums\n self.is_masked = any(\n [m for m in mask_flags if m != MaskFlags.all_valid])\n\n self.height = self.image_dataset.height\n self.width = self.image_dataset.width\n # Get 1x1 chip (after applying raster transformers) to test dtype\n # and channel order if needed\n test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))\n\n raw_channels = list(range(0, test_chip.shape[2]))\n self.channel_order = channel_order or raw_channels\n num_channels = len(raw_channels)\n\n # Transform the chip to get the final dtype\n test_chip = test_chip[:, :, self.channel_order]\n for transformer in raster_transformers:\n test_chip = transformer.transform(test_chip, channel_order)\n\n self.dtype = test_chip.dtype\n\n self._set_crs_transformer()\n\n super().__init__(channel_order, num_channels, raster_transformers)\n\n @abstractmethod\n def _download_data(self, tmp_dir):\n \"\"\"Download any data needed for this Raster Source.\n Return a single local path representing the image or a VRT of the data.\"\"\"\n pass\n\n def get_crs_transformer(self):\n return self.crs_transformer\n\n def get_extent(self):\n return Box(0, 0, self.height, self.width)\n\n def get_dtype(self):\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return self.dtype\n\n def _get_chip(self, window):\n if self.image_dataset is None:\n raise ActivationError('RasterSource must be activated before use')\n return load_window(self.image_dataset, window.rasterio_format(),\n self.channels)\n\n def _activate(self):\n self.image_dataset = rasterio.open(self.imagery_path)\n\n def _deactivate(self):\n self.image_dataset.close()\n self.image_dataset = None\n", "path": "rastervision/data/raster_source/rasterio_source.py"}, {"content": "from rastervision.data.raster_source.rasterio_source import (\n RasterioRasterSource)\nfrom rastervision.data.crs_transformer.identity_crs_transformer import (\n IdentityCRSTransformer)\nfrom rastervision.utils.files import download_if_needed\n\n\nclass ImageSource(RasterioRasterSource):\n def __init__(self, uri, raster_transformers, temp_dir, channel_order=None):\n self.uri = uri\n super().__init__(raster_transformers, temp_dir, channel_order)\n\n def _download_data(self, temp_dir):\n return download_if_needed(self.uri, self.temp_dir)\n\n def _set_crs_transformer(self):\n self.crs_transformer = IdentityCRSTransformer()\n", "path": "rastervision/data/raster_source/image_source.py"}], "after_files": [{"content": "from abc import ABC, abstractmethod\n\nfrom rastervision.rv_config import RVConfig\n\n\nclass Command(ABC):\n @abstractmethod\n def run(self, tmp_dir):\n \"\"\"Run the command.\"\"\"\n pass\n\n def set_tmp_dir(self, tmp_dir):\n self._tmp_dir = tmp_dir\n\n def get_tmp_dir(self):\n if hasattr(self, '_tmp_dir') and self._tmp_dir:\n if isinstance(self._tmp_dir, str):\n return self._tmp_dir\n else:\n return self._tmp_dir.name\n else:\n tmp_dir = RVConfig.get_tmp_dir()\n self.set_tmp_dir(tmp_dir)\n return tmp_dir.name\n\n\nclass NoOpCommand(Command):\n \"\"\"Defines a command that does nothing.\n \"\"\"\n\n def run(self, tmp_dir):\n pass\n", "path": "rastervision/command/command.py"}, {"content": "from abc import abstractmethod\nimport tempfile\n\nimport numpy as np\nimport rasterio\nfrom rasterio.enums import (ColorInterp, MaskFlags)\n\nfrom rastervision.data import (ActivateMixin, ActivationError)\nfrom rastervision.data.raster_source import RasterSource\nfrom rastervision.core.box import Box\n\n\ndef load_window(image_dataset, window=None, channels=None, is_masked=False):\n \"\"\"Load a window of an image from a TIFF file.\n\n Args:\n window: ((row_start, row_stop), (col_start, col_stop)) or\n ((y_min, y_max), (x_min, x_max))\n channels: An optional list of bands to read.\n is_masked: If True, read a masked array from rasterio\n \"\"\"\n if is_masked:\n im = image_dataset.read(window=window, boundless=True, masked=True)\n im = np.ma.filled(im, fill_value=0)\n else:\n im = image_dataset.read(window=window, boundless=True)\n\n # Handle non-zero NODATA values by setting the data to 0.\n for channel, nodata in enumerate(image_dataset.nodatavals):\n if nodata is not None and nodata != 0:\n im[channel, im[channel] == nodata] = 0\n\n if channels:\n im = im[channels, :]\n im = np.transpose(im, axes=[1, 2, 0])\n return im\n\n\nclass RasterioRasterSource(ActivateMixin, RasterSource):\n def __init__(self, raster_transformers, temp_dir, channel_order=None):\n self.temp_dir = temp_dir\n self.image_temp_dir = None\n self.image_dataset = None\n num_channels = None\n\n # Activate in order to get information out of the raster\n with self.activate():\n colorinterp = self.image_dataset.colorinterp\n self.channels = [\n i for i, color_interp in enumerate(colorinterp)\n if color_interp != ColorInterp.alpha\n ]\n\n mask_flags = self.image_dataset.mask_flag_enums\n self.is_masked = any(\n [m for m in mask_flags if m != MaskFlags.all_valid])\n\n self.height = self.image_dataset.height\n self.width = self.image_dataset.width\n # Get 1x1 chip (after applying raster transformers) to test dtype\n # and channel order if needed\n test_chip = self.get_raw_chip(Box.make_square(0, 0, 1))\n\n raw_channels = list(range(0, test_chip.shape[2]))\n self.channel_order = channel_order or raw_channels\n num_channels = len(raw_channels)\n\n # Transform the chip to get the final dtype\n test_chip = test_chip[:, :, self.channel_order]\n for transformer in raster_transformers:\n test_chip = transformer.transform(test_chip, channel_order)\n\n self.dtype = test_chip.dtype\n\n self._set_crs_transformer()\n\n super().__init__(channel_order, num_channels, raster_transformers)\n\n @abstractmethod\n def _download_data(self, tmp_dir):\n \"\"\"Download any data needed for this Raster Source.\n Return a single local path representing the image or a VRT of the data.\"\"\"\n pass\n\n def get_crs_transformer(self):\n return self.crs_transformer\n\n def get_extent(self):\n return Box(0, 0, self.height, self.width)\n\n def get_dtype(self):\n \"\"\"Return the numpy.dtype of this scene\"\"\"\n return self.dtype\n\n def _get_chip(self, window):\n if self.image_dataset is None:\n raise ActivationError('RasterSource must be activated before use')\n return load_window(self.image_dataset, window.rasterio_format(),\n self.channels)\n\n def _activate(self):\n # Download images to temporary directory and delete it when done.\n self.image_temp_dir = tempfile.TemporaryDirectory(dir=self.temp_dir)\n self.imagery_path = self._download_data(self.image_temp_dir.name)\n self.image_dataset = rasterio.open(self.imagery_path)\n self._set_crs_transformer()\n\n def _deactivate(self):\n self.image_dataset.close()\n self.image_dataset = None\n self.image_temp_dir.cleanup()\n self.image_temp_dir = None\n", "path": "rastervision/data/raster_source/rasterio_source.py"}, {"content": "from rastervision.data.raster_source.rasterio_source import (\n RasterioRasterSource)\nfrom rastervision.data.crs_transformer.identity_crs_transformer import (\n IdentityCRSTransformer)\nfrom rastervision.utils.files import download_if_needed\n\n\nclass ImageSource(RasterioRasterSource):\n def __init__(self, uri, raster_transformers, temp_dir, channel_order=None):\n self.uri = uri\n super().__init__(raster_transformers, temp_dir, channel_order)\n\n def _download_data(self, temp_dir):\n return download_if_needed(self.uri, temp_dir)\n\n def _set_crs_transformer(self):\n self.crs_transformer = IdentityCRSTransformer()\n", "path": "rastervision/data/raster_source/image_source.py"}]} | 1,907 | 583 |
gh_patches_debug_2885 | rasdani/github-patches | git_diff | paperless-ngx__paperless-ngx-1666 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Wrong version number string within docker 1.9.1
### Description
After a successful pull and deploy via docker with https://ghcr.io/paperless-ngx/paperless-ngx:1.9.1 the version string on the paperless-ngx Web-UI is still 1.9.0.

### Steps to reproduce
1. Pull the new version via docker, docker-compose or portainer via https://ghcr.io/paperless-ngx/paperless-ngx and tag 1.9.1
2. Access the Web-UI.
3. Login
4. Find the version string on the lower left side.
### Webserver logs
_No response_
### Paperless-ngx version
1.9.1
### Host OS
Alpine Linux x86-64
### Installation method
Docker - official image
### Browser
Chrome
### Configuration changes
_No response_
### Other
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/paperless/version.py`
Content:
```
1 from typing import Final
2 from typing import Tuple
3
4 __version__: Final[Tuple[int, int, int]] = (1, 9, 0)
5 # Version string like X.Y.Z
6 __full_version_str__: Final[str] = ".".join(map(str, __version__))
7 # Version string like X.Y
8 __major_minor_version_str__: Final[str] = ".".join(map(str, __version__[:-1]))
9
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/paperless/version.py b/src/paperless/version.py
--- a/src/paperless/version.py
+++ b/src/paperless/version.py
@@ -1,7 +1,7 @@
from typing import Final
from typing import Tuple
-__version__: Final[Tuple[int, int, int]] = (1, 9, 0)
+__version__: Final[Tuple[int, int, int]] = (1, 9, 2)
# Version string like X.Y.Z
__full_version_str__: Final[str] = ".".join(map(str, __version__))
# Version string like X.Y
| {"golden_diff": "diff --git a/src/paperless/version.py b/src/paperless/version.py\n--- a/src/paperless/version.py\n+++ b/src/paperless/version.py\n@@ -1,7 +1,7 @@\n from typing import Final\n from typing import Tuple\n \n-__version__: Final[Tuple[int, int, int]] = (1, 9, 0)\n+__version__: Final[Tuple[int, int, int]] = (1, 9, 2)\n # Version string like X.Y.Z\n __full_version_str__: Final[str] = \".\".join(map(str, __version__))\n # Version string like X.Y\n", "issue": "[BUG] Wrong version number string within docker 1.9.1\n### Description\n\nAfter a successful pull and deploy via docker with https://ghcr.io/paperless-ngx/paperless-ngx:1.9.1 the version string on the paperless-ngx Web-UI is still 1.9.0.\r\n\r\n\r\n\n\n### Steps to reproduce\n\n1. Pull the new version via docker, docker-compose or portainer via https://ghcr.io/paperless-ngx/paperless-ngx and tag 1.9.1\r\n2. Access the Web-UI.\r\n3. Login\r\n4. Find the version string on the lower left side.\n\n### Webserver logs\n\n_No response_\n\n### Paperless-ngx version\n\n1.9.1\n\n### Host OS\n\nAlpine Linux x86-64\n\n### Installation method\n\nDocker - official image\n\n### Browser\n\nChrome\n\n### Configuration changes\n\n_No response_\n\n### Other\n\n_No response_\n", "before_files": [{"content": "from typing import Final\nfrom typing import Tuple\n\n__version__: Final[Tuple[int, int, int]] = (1, 9, 0)\n# Version string like X.Y.Z\n__full_version_str__: Final[str] = \".\".join(map(str, __version__))\n# Version string like X.Y\n__major_minor_version_str__: Final[str] = \".\".join(map(str, __version__[:-1]))\n", "path": "src/paperless/version.py"}], "after_files": [{"content": "from typing import Final\nfrom typing import Tuple\n\n__version__: Final[Tuple[int, int, int]] = (1, 9, 2)\n# Version string like X.Y.Z\n__full_version_str__: Final[str] = \".\".join(map(str, __version__))\n# Version string like X.Y\n__major_minor_version_str__: Final[str] = \".\".join(map(str, __version__[:-1]))\n", "path": "src/paperless/version.py"}]} | 647 | 135 |
gh_patches_debug_17439 | rasdani/github-patches | git_diff | Parsl__parsl-929 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Properly handle func names for bash apps
At the moment bash apps are always `remote_side_bash_executor` in the monitoring DB because that is what is actually running. We should pass through the name of the app's function instead.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/app/bash.py`
Content:
```
1 import logging
2
3 from inspect import signature, Parameter
4 from parsl.app.errors import wrap_error
5 from parsl.app.futures import DataFuture
6 from parsl.app.app import AppBase
7 from parsl.dataflow.dflow import DataFlowKernelLoader
8
9 logger = logging.getLogger(__name__)
10
11
12 def remote_side_bash_executor(func, *args, **kwargs):
13 """Execute the bash app type function and return the command line string.
14
15 This string is reformatted with the *args, and **kwargs
16 from call time.
17 """
18 import os
19 import time
20 import subprocess
21 import logging
22 import parsl.app.errors as pe
23
24 logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)
25
26 # start_t = time.time()
27
28 func_name = func.__name__
29
30 partial_cmdline = None
31
32 # Try to run the func to compose the commandline
33 try:
34 # Execute the func to get the commandline
35 partial_cmdline = func(*args, **kwargs)
36 # Reformat the commandline with current args and kwargs
37 executable = partial_cmdline.format(*args, **kwargs)
38
39 except AttributeError as e:
40 if partial_cmdline is not None:
41 raise pe.AppBadFormatting("App formatting failed for app '{}' with AttributeError: {}".format(func_name, e))
42 else:
43 raise pe.BashAppNoReturn("Bash app '{}' did not return a value, or returned none - with this exception: {}".format(func_name, e), None)
44
45 except IndexError as e:
46 raise pe.AppBadFormatting("App formatting failed for app '{}' with IndexError: {}".format(func_name, e))
47 except Exception as e:
48 logging.error("Caught exception during formatting of app '{}': {}".format(func_name, e))
49 raise e
50
51 logging.debug("Executable: %s", executable)
52
53 # Updating stdout, stderr if values passed at call time.
54
55 def open_std_fd(fdname):
56 # fdname is 'stdout' or 'stderr'
57 stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)
58 if stdfspec is None:
59 return None
60 elif isinstance(stdfspec, str):
61 fname = stdfspec
62 mode = 'a+'
63 elif isinstance(stdfspec, tuple):
64 if len(stdfspec) != 2:
65 raise pe.BadStdStreamFile("std descriptor %s has incorrect tuple length %s" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))
66 fname, mode = stdfspec
67 else:
68 raise pe.BadStdStreamFile("std descriptor %s has unexpected type %s" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))
69 try:
70 fd = open(fname, mode)
71 except Exception as e:
72 raise pe.BadStdStreamFile(fname, e)
73 return fd
74
75 std_out = open_std_fd('stdout')
76 std_err = open_std_fd('stderr')
77 timeout = kwargs.get('walltime')
78
79 returncode = None
80 try:
81 proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')
82 proc.wait(timeout=timeout)
83 returncode = proc.returncode
84
85 except subprocess.TimeoutExpired:
86 # print("Timeout")
87 raise pe.AppTimeout("[{}] App exceeded walltime: {}".format(func_name, timeout))
88
89 except Exception as e:
90 # print("Caught exception: ", e)
91 raise pe.AppException("[{}] App caught exception: {}".format(func_name, proc.returncode), e)
92
93 if returncode != 0:
94 raise pe.AppFailure("[{}] App failed with exit code: {}".format(func_name, proc.returncode), proc.returncode)
95
96 # TODO : Add support for globs here
97
98 missing = []
99 for outputfile in kwargs.get('outputs', []):
100 fpath = outputfile
101 if type(outputfile) != str:
102 fpath = outputfile.filepath
103
104 if not os.path.exists(fpath):
105 missing.extend([outputfile])
106
107 if missing:
108 raise pe.MissingOutputs("[{}] Missing outputs".format(func_name), missing)
109
110 # exec_duration = time.time() - start_t
111 return returncode
112
113
114 class BashApp(AppBase):
115
116 def __init__(self, func, data_flow_kernel=None, walltime=60, cache=False, executors='all'):
117 super().__init__(func, data_flow_kernel=data_flow_kernel, walltime=60, executors=executors, cache=cache)
118 self.kwargs = {}
119
120 # We duplicate the extraction of parameter defaults
121 # to self.kwargs to ensure availability at point of
122 # command string format. Refer: #349
123 sig = signature(func)
124
125 for s in sig.parameters:
126 if sig.parameters[s].default != Parameter.empty:
127 self.kwargs[s] = sig.parameters[s].default
128
129 def __call__(self, *args, **kwargs):
130 """Handle the call to a Bash app.
131
132 Args:
133 - Arbitrary
134
135 Kwargs:
136 - Arbitrary
137
138 Returns:
139 If outputs=[...] was a kwarg then:
140 App_fut, [Data_Futures...]
141 else:
142 App_fut
143
144 """
145 # Update kwargs in the app definition with ones passed in at calltime
146 self.kwargs.update(kwargs)
147
148 if self.data_flow_kernel is None:
149 dfk = DataFlowKernelLoader.dfk()
150 else:
151 dfk = self.data_flow_kernel
152
153 app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,
154 executors=self.executors,
155 fn_hash=self.func_hash,
156 cache=self.cache,
157 **self.kwargs)
158
159 out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)
160 for o in kwargs.get('outputs', [])]
161 app_fut._outputs = out_futs
162
163 return app_fut
164
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsl/app/bash.py b/parsl/app/bash.py
--- a/parsl/app/bash.py
+++ b/parsl/app/bash.py
@@ -1,6 +1,7 @@
import logging
-
+from functools import update_wrapper
from inspect import signature, Parameter
+
from parsl.app.errors import wrap_error
from parsl.app.futures import DataFuture
from parsl.app.app import AppBase
@@ -150,7 +151,8 @@
else:
dfk = self.data_flow_kernel
- app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,
+ app_fut = dfk.submit(wrap_error(update_wrapper(remote_side_bash_executor, self.func)),
+ self.func, *args,
executors=self.executors,
fn_hash=self.func_hash,
cache=self.cache,
| {"golden_diff": "diff --git a/parsl/app/bash.py b/parsl/app/bash.py\n--- a/parsl/app/bash.py\n+++ b/parsl/app/bash.py\n@@ -1,6 +1,7 @@\n import logging\n-\n+from functools import update_wrapper\n from inspect import signature, Parameter\n+\n from parsl.app.errors import wrap_error\n from parsl.app.futures import DataFuture\n from parsl.app.app import AppBase\n@@ -150,7 +151,8 @@\n else:\n dfk = self.data_flow_kernel\n \n- app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,\n+ app_fut = dfk.submit(wrap_error(update_wrapper(remote_side_bash_executor, self.func)),\n+ self.func, *args,\n executors=self.executors,\n fn_hash=self.func_hash,\n cache=self.cache,\n", "issue": "Properly handle func names for bash apps\nAt the moment bash apps are always `remote_side_bash_executor` in the monitoring DB because that is what is actually running. We should pass through the name of the app's function instead.\n", "before_files": [{"content": "import logging\n\nfrom inspect import signature, Parameter\nfrom parsl.app.errors import wrap_error\nfrom parsl.app.futures import DataFuture\nfrom parsl.app.app import AppBase\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\nlogger = logging.getLogger(__name__)\n\n\ndef remote_side_bash_executor(func, *args, **kwargs):\n \"\"\"Execute the bash app type function and return the command line string.\n\n This string is reformatted with the *args, and **kwargs\n from call time.\n \"\"\"\n import os\n import time\n import subprocess\n import logging\n import parsl.app.errors as pe\n\n logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)\n\n # start_t = time.time()\n\n func_name = func.__name__\n\n partial_cmdline = None\n\n # Try to run the func to compose the commandline\n try:\n # Execute the func to get the commandline\n partial_cmdline = func(*args, **kwargs)\n # Reformat the commandline with current args and kwargs\n executable = partial_cmdline.format(*args, **kwargs)\n\n except AttributeError as e:\n if partial_cmdline is not None:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with AttributeError: {}\".format(func_name, e))\n else:\n raise pe.BashAppNoReturn(\"Bash app '{}' did not return a value, or returned none - with this exception: {}\".format(func_name, e), None)\n\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n logging.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n raise e\n\n logging.debug(\"Executable: %s\", executable)\n\n # Updating stdout, stderr if values passed at call time.\n\n def open_std_fd(fdname):\n # fdname is 'stdout' or 'stderr'\n stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)\n if stdfspec is None:\n return None\n elif isinstance(stdfspec, str):\n fname = stdfspec\n mode = 'a+'\n elif isinstance(stdfspec, tuple):\n if len(stdfspec) != 2:\n raise pe.BadStdStreamFile(\"std descriptor %s has incorrect tuple length %s\" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))\n fname, mode = stdfspec\n else:\n raise pe.BadStdStreamFile(\"std descriptor %s has unexpected type %s\" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))\n try:\n fd = open(fname, mode)\n except Exception as e:\n raise pe.BadStdStreamFile(fname, e)\n return fd\n\n std_out = open_std_fd('stdout')\n std_err = open_std_fd('stderr')\n timeout = kwargs.get('walltime')\n\n returncode = None\n try:\n proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')\n proc.wait(timeout=timeout)\n returncode = proc.returncode\n\n except subprocess.TimeoutExpired:\n # print(\"Timeout\")\n raise pe.AppTimeout(\"[{}] App exceeded walltime: {}\".format(func_name, timeout))\n\n except Exception as e:\n # print(\"Caught exception: \", e)\n raise pe.AppException(\"[{}] App caught exception: {}\".format(func_name, proc.returncode), e)\n\n if returncode != 0:\n raise pe.AppFailure(\"[{}] App failed with exit code: {}\".format(func_name, proc.returncode), proc.returncode)\n\n # TODO : Add support for globs here\n\n missing = []\n for outputfile in kwargs.get('outputs', []):\n fpath = outputfile\n if type(outputfile) != str:\n fpath = outputfile.filepath\n\n if not os.path.exists(fpath):\n missing.extend([outputfile])\n\n if missing:\n raise pe.MissingOutputs(\"[{}] Missing outputs\".format(func_name), missing)\n\n # exec_duration = time.time() - start_t\n return returncode\n\n\nclass BashApp(AppBase):\n\n def __init__(self, func, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n super().__init__(func, data_flow_kernel=data_flow_kernel, walltime=60, executors=executors, cache=cache)\n self.kwargs = {}\n\n # We duplicate the extraction of parameter defaults\n # to self.kwargs to ensure availability at point of\n # command string format. Refer: #349\n sig = signature(func)\n\n for s in sig.parameters:\n if sig.parameters[s].default != Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n def __call__(self, *args, **kwargs):\n \"\"\"Handle the call to a Bash app.\n\n Args:\n - Arbitrary\n\n Kwargs:\n - Arbitrary\n\n Returns:\n If outputs=[...] was a kwarg then:\n App_fut, [Data_Futures...]\n else:\n App_fut\n\n \"\"\"\n # Update kwargs in the app definition with ones passed in at calltime\n self.kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n app_fut = dfk.submit(wrap_error(remote_side_bash_executor), self.func, *args,\n executors=self.executors,\n fn_hash=self.func_hash,\n cache=self.cache,\n **self.kwargs)\n\n out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)\n for o in kwargs.get('outputs', [])]\n app_fut._outputs = out_futs\n\n return app_fut\n", "path": "parsl/app/bash.py"}], "after_files": [{"content": "import logging\nfrom functools import update_wrapper\nfrom inspect import signature, Parameter\n\nfrom parsl.app.errors import wrap_error\nfrom parsl.app.futures import DataFuture\nfrom parsl.app.app import AppBase\nfrom parsl.dataflow.dflow import DataFlowKernelLoader\n\nlogger = logging.getLogger(__name__)\n\n\ndef remote_side_bash_executor(func, *args, **kwargs):\n \"\"\"Execute the bash app type function and return the command line string.\n\n This string is reformatted with the *args, and **kwargs\n from call time.\n \"\"\"\n import os\n import time\n import subprocess\n import logging\n import parsl.app.errors as pe\n\n logging.basicConfig(filename='/tmp/bashexec.{0}.log'.format(time.time()), level=logging.DEBUG)\n\n # start_t = time.time()\n\n func_name = func.__name__\n\n partial_cmdline = None\n\n # Try to run the func to compose the commandline\n try:\n # Execute the func to get the commandline\n partial_cmdline = func(*args, **kwargs)\n # Reformat the commandline with current args and kwargs\n executable = partial_cmdline.format(*args, **kwargs)\n\n except AttributeError as e:\n if partial_cmdline is not None:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with AttributeError: {}\".format(func_name, e))\n else:\n raise pe.BashAppNoReturn(\"Bash app '{}' did not return a value, or returned none - with this exception: {}\".format(func_name, e), None)\n\n except IndexError as e:\n raise pe.AppBadFormatting(\"App formatting failed for app '{}' with IndexError: {}\".format(func_name, e))\n except Exception as e:\n logging.error(\"Caught exception during formatting of app '{}': {}\".format(func_name, e))\n raise e\n\n logging.debug(\"Executable: %s\", executable)\n\n # Updating stdout, stderr if values passed at call time.\n\n def open_std_fd(fdname):\n # fdname is 'stdout' or 'stderr'\n stdfspec = kwargs.get(fdname) # spec is str name or tuple (name, mode)\n if stdfspec is None:\n return None\n elif isinstance(stdfspec, str):\n fname = stdfspec\n mode = 'a+'\n elif isinstance(stdfspec, tuple):\n if len(stdfspec) != 2:\n raise pe.BadStdStreamFile(\"std descriptor %s has incorrect tuple length %s\" % (fdname, len(stdfspec)), TypeError('Bad Tuple Length'))\n fname, mode = stdfspec\n else:\n raise pe.BadStdStreamFile(\"std descriptor %s has unexpected type %s\" % (fdname, str(type(stdfspec))), TypeError('Bad Tuple Type'))\n try:\n fd = open(fname, mode)\n except Exception as e:\n raise pe.BadStdStreamFile(fname, e)\n return fd\n\n std_out = open_std_fd('stdout')\n std_err = open_std_fd('stderr')\n timeout = kwargs.get('walltime')\n\n returncode = None\n try:\n proc = subprocess.Popen(executable, stdout=std_out, stderr=std_err, shell=True, executable='/bin/bash')\n proc.wait(timeout=timeout)\n returncode = proc.returncode\n\n except subprocess.TimeoutExpired:\n # print(\"Timeout\")\n raise pe.AppTimeout(\"[{}] App exceeded walltime: {}\".format(func_name, timeout))\n\n except Exception as e:\n # print(\"Caught exception: \", e)\n raise pe.AppException(\"[{}] App caught exception: {}\".format(func_name, proc.returncode), e)\n\n if returncode != 0:\n raise pe.AppFailure(\"[{}] App failed with exit code: {}\".format(func_name, proc.returncode), proc.returncode)\n\n # TODO : Add support for globs here\n\n missing = []\n for outputfile in kwargs.get('outputs', []):\n fpath = outputfile\n if type(outputfile) != str:\n fpath = outputfile.filepath\n\n if not os.path.exists(fpath):\n missing.extend([outputfile])\n\n if missing:\n raise pe.MissingOutputs(\"[{}] Missing outputs\".format(func_name), missing)\n\n # exec_duration = time.time() - start_t\n return returncode\n\n\nclass BashApp(AppBase):\n\n def __init__(self, func, data_flow_kernel=None, walltime=60, cache=False, executors='all'):\n super().__init__(func, data_flow_kernel=data_flow_kernel, walltime=60, executors=executors, cache=cache)\n self.kwargs = {}\n\n # We duplicate the extraction of parameter defaults\n # to self.kwargs to ensure availability at point of\n # command string format. Refer: #349\n sig = signature(func)\n\n for s in sig.parameters:\n if sig.parameters[s].default != Parameter.empty:\n self.kwargs[s] = sig.parameters[s].default\n\n def __call__(self, *args, **kwargs):\n \"\"\"Handle the call to a Bash app.\n\n Args:\n - Arbitrary\n\n Kwargs:\n - Arbitrary\n\n Returns:\n If outputs=[...] was a kwarg then:\n App_fut, [Data_Futures...]\n else:\n App_fut\n\n \"\"\"\n # Update kwargs in the app definition with ones passed in at calltime\n self.kwargs.update(kwargs)\n\n if self.data_flow_kernel is None:\n dfk = DataFlowKernelLoader.dfk()\n else:\n dfk = self.data_flow_kernel\n\n app_fut = dfk.submit(wrap_error(update_wrapper(remote_side_bash_executor, self.func)),\n self.func, *args,\n executors=self.executors,\n fn_hash=self.func_hash,\n cache=self.cache,\n **self.kwargs)\n\n out_futs = [DataFuture(app_fut, o, tid=app_fut.tid)\n for o in kwargs.get('outputs', [])]\n app_fut._outputs = out_futs\n\n return app_fut\n", "path": "parsl/app/bash.py"}]} | 1,997 | 193 |
gh_patches_debug_34542 | rasdani/github-patches | git_diff | numpy__numpy-13976 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MAINT: remove numpy/tools/test-installed-numpy.py ?
Looking at Matti's modifications in #13482, I don't see a good reason to keep `test-installed-numpy.py` if we are going to continue using `runtests.py`. Both have separate parsers to accomplish similar things, but `runtests.py` is more developed / actively used while `test-installed-numpy.py` still has discussion of nose / python 2.4, and Matti's proposed change there adds `-n` option which does something different in `runtests.py`.
`runtests.py -n` will test installed NumPy instead of rebuilding, so seems like redundancy / maintenance burden we don't need moving forward
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tools/test-installed-numpy.py`
Content:
```
1 #!/usr/bin/env python
2 from __future__ import division, absolute_import, print_function
3
4 # A simple script to test the installed version of numpy by calling
5 # 'numpy.test()'. Key features:
6 # -- convenient command-line syntax
7 # -- sets exit status appropriately, useful for automated test environments
8
9 # It would be better to set this up as a module in the numpy namespace, so
10 # that it could be run as:
11 # python -m numpy.run_tests <args>
12 # But, python2.4's -m switch only works with top-level modules, not modules
13 # that are inside packages. So, once we drop 2.4 support, maybe...
14
15 import sys, os
16 # In case we are run from the source directory, we don't want to import numpy
17 # from there, we want to import the installed version:
18 sys.path.pop(0)
19
20 from optparse import OptionParser
21 parser = OptionParser("usage: %prog [options] -- [nosetests options]")
22 parser.add_option("-v", "--verbose",
23 action="count", dest="verbose", default=1,
24 help="increase verbosity")
25 parser.add_option("--doctests",
26 action="store_true", dest="doctests", default=False,
27 help="Run doctests in module")
28 parser.add_option("--coverage",
29 action="store_true", dest="coverage", default=False,
30 help="report coverage of NumPy code (requires 'pytest-cov' module")
31 parser.add_option("-m", "--mode",
32 action="store", dest="mode", default="fast",
33 help="'fast', 'full', or something that could be "
34 "passed to pytest [default: %default]")
35 parser.add_option("-n", "--durations",
36 dest="durations", default=-1,
37 help="show time to run slowest N tests [default: -1]")
38 (options, args) = parser.parse_args()
39
40 import numpy
41
42 # Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
43 # The same flags check is also used in the tests to switch behavior.
44 if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
45 if not numpy.ones((10, 1), order='C').flags.f_contiguous:
46 print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
47 sys.exit(1)
48 elif numpy.ones((10, 1), order='C').flags.f_contiguous:
49 print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
50 sys.exit(1)
51
52 if options.coverage:
53 # Produce code coverage XML report for codecov.io
54 args += ["--cov-report=xml"]
55
56 result = numpy.test(options.mode,
57 verbose=options.verbose,
58 extra_argv=args,
59 doctests=options.doctests,
60 durations=int(options.durations),
61 coverage=options.coverage)
62
63 if result:
64 sys.exit(0)
65 else:
66 sys.exit(1)
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py
deleted file mode 100755
--- a/tools/test-installed-numpy.py
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-# A simple script to test the installed version of numpy by calling
-# 'numpy.test()'. Key features:
-# -- convenient command-line syntax
-# -- sets exit status appropriately, useful for automated test environments
-
-# It would be better to set this up as a module in the numpy namespace, so
-# that it could be run as:
-# python -m numpy.run_tests <args>
-# But, python2.4's -m switch only works with top-level modules, not modules
-# that are inside packages. So, once we drop 2.4 support, maybe...
-
-import sys, os
-# In case we are run from the source directory, we don't want to import numpy
-# from there, we want to import the installed version:
-sys.path.pop(0)
-
-from optparse import OptionParser
-parser = OptionParser("usage: %prog [options] -- [nosetests options]")
-parser.add_option("-v", "--verbose",
- action="count", dest="verbose", default=1,
- help="increase verbosity")
-parser.add_option("--doctests",
- action="store_true", dest="doctests", default=False,
- help="Run doctests in module")
-parser.add_option("--coverage",
- action="store_true", dest="coverage", default=False,
- help="report coverage of NumPy code (requires 'pytest-cov' module")
-parser.add_option("-m", "--mode",
- action="store", dest="mode", default="fast",
- help="'fast', 'full', or something that could be "
- "passed to pytest [default: %default]")
-parser.add_option("-n", "--durations",
- dest="durations", default=-1,
- help="show time to run slowest N tests [default: -1]")
-(options, args) = parser.parse_args()
-
-import numpy
-
-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
-# The same flags check is also used in the tests to switch behavior.
-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
- if not numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
- sys.exit(1)
-elif numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
- sys.exit(1)
-
-if options.coverage:
- # Produce code coverage XML report for codecov.io
- args += ["--cov-report=xml"]
-
-result = numpy.test(options.mode,
- verbose=options.verbose,
- extra_argv=args,
- doctests=options.doctests,
- durations=int(options.durations),
- coverage=options.coverage)
-
-if result:
- sys.exit(0)
-else:
- sys.exit(1)
| {"golden_diff": "diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py\ndeleted file mode 100755\n--- a/tools/test-installed-numpy.py\n+++ /dev/null\n@@ -1,66 +0,0 @@\n-#!/usr/bin/env python\n-from __future__ import division, absolute_import, print_function\n-\n-# A simple script to test the installed version of numpy by calling\n-# 'numpy.test()'. Key features:\n-# -- convenient command-line syntax\n-# -- sets exit status appropriately, useful for automated test environments\n-\n-# It would be better to set this up as a module in the numpy namespace, so\n-# that it could be run as:\n-# python -m numpy.run_tests <args>\n-# But, python2.4's -m switch only works with top-level modules, not modules\n-# that are inside packages. So, once we drop 2.4 support, maybe...\n-\n-import sys, os\n-# In case we are run from the source directory, we don't want to import numpy\n-# from there, we want to import the installed version:\n-sys.path.pop(0)\n-\n-from optparse import OptionParser\n-parser = OptionParser(\"usage: %prog [options] -- [nosetests options]\")\n-parser.add_option(\"-v\", \"--verbose\",\n- action=\"count\", dest=\"verbose\", default=1,\n- help=\"increase verbosity\")\n-parser.add_option(\"--doctests\",\n- action=\"store_true\", dest=\"doctests\", default=False,\n- help=\"Run doctests in module\")\n-parser.add_option(\"--coverage\",\n- action=\"store_true\", dest=\"coverage\", default=False,\n- help=\"report coverage of NumPy code (requires 'pytest-cov' module\")\n-parser.add_option(\"-m\", \"--mode\",\n- action=\"store\", dest=\"mode\", default=\"fast\",\n- help=\"'fast', 'full', or something that could be \"\n- \"passed to pytest [default: %default]\")\n-parser.add_option(\"-n\", \"--durations\",\n- dest=\"durations\", default=-1,\n- help=\"show time to run slowest N tests [default: -1]\")\n-(options, args) = parser.parse_args()\n-\n-import numpy\n-\n-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.\n-# The same flags check is also used in the tests to switch behavior.\n-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\"):\n- if not numpy.ones((10, 1), order='C').flags.f_contiguous:\n- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')\n- sys.exit(1)\n-elif numpy.ones((10, 1), order='C').flags.f_contiguous:\n- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')\n- sys.exit(1)\n-\n-if options.coverage:\n- # Produce code coverage XML report for codecov.io\n- args += [\"--cov-report=xml\"]\n-\n-result = numpy.test(options.mode,\n- verbose=options.verbose,\n- extra_argv=args,\n- doctests=options.doctests,\n- durations=int(options.durations),\n- coverage=options.coverage)\n-\n-if result:\n- sys.exit(0)\n-else:\n- sys.exit(1)\n", "issue": "MAINT: remove numpy/tools/test-installed-numpy.py ?\nLooking at Matti's modifications in #13482, I don't see a good reason to keep `test-installed-numpy.py` if we are going to continue using `runtests.py`. Both have separate parsers to accomplish similar things, but `runtests.py` is more developed / actively used while `test-installed-numpy.py` still has discussion of nose / python 2.4, and Matti's proposed change there adds `-n` option which does something different in `runtests.py`.\r\n\r\n`runtests.py -n` will test installed NumPy instead of rebuilding, so seems like redundancy / maintenance burden we don't need moving forward\n", "before_files": [{"content": "#!/usr/bin/env python\nfrom __future__ import division, absolute_import, print_function\n\n# A simple script to test the installed version of numpy by calling\n# 'numpy.test()'. Key features:\n# -- convenient command-line syntax\n# -- sets exit status appropriately, useful for automated test environments\n\n# It would be better to set this up as a module in the numpy namespace, so\n# that it could be run as:\n# python -m numpy.run_tests <args>\n# But, python2.4's -m switch only works with top-level modules, not modules\n# that are inside packages. So, once we drop 2.4 support, maybe...\n\nimport sys, os\n# In case we are run from the source directory, we don't want to import numpy\n# from there, we want to import the installed version:\nsys.path.pop(0)\n\nfrom optparse import OptionParser\nparser = OptionParser(\"usage: %prog [options] -- [nosetests options]\")\nparser.add_option(\"-v\", \"--verbose\",\n action=\"count\", dest=\"verbose\", default=1,\n help=\"increase verbosity\")\nparser.add_option(\"--doctests\",\n action=\"store_true\", dest=\"doctests\", default=False,\n help=\"Run doctests in module\")\nparser.add_option(\"--coverage\",\n action=\"store_true\", dest=\"coverage\", default=False,\n help=\"report coverage of NumPy code (requires 'pytest-cov' module\")\nparser.add_option(\"-m\", \"--mode\",\n action=\"store\", dest=\"mode\", default=\"fast\",\n help=\"'fast', 'full', or something that could be \"\n \"passed to pytest [default: %default]\")\nparser.add_option(\"-n\", \"--durations\",\n dest=\"durations\", default=-1,\n help=\"show time to run slowest N tests [default: -1]\")\n(options, args) = parser.parse_args()\n\nimport numpy\n\n# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.\n# The same flags check is also used in the tests to switch behavior.\nif (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\"):\n if not numpy.ones((10, 1), order='C').flags.f_contiguous:\n print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')\n sys.exit(1)\nelif numpy.ones((10, 1), order='C').flags.f_contiguous:\n print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')\n sys.exit(1)\n\nif options.coverage:\n # Produce code coverage XML report for codecov.io\n args += [\"--cov-report=xml\"]\n\nresult = numpy.test(options.mode,\n verbose=options.verbose,\n extra_argv=args,\n doctests=options.doctests,\n durations=int(options.durations),\n coverage=options.coverage)\n\nif result:\n sys.exit(0)\nelse:\n sys.exit(1)\n", "path": "tools/test-installed-numpy.py"}], "after_files": [{"content": null, "path": "tools/test-installed-numpy.py"}]} | 1,178 | 739 |
gh_patches_debug_35722 | rasdani/github-patches | git_diff | mdn__kuma-7782 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[regression] Multi-locale search is not searching in all locales
**Summary**
http://localhost.org:8000/api/v1/search?q=video&locale=fr&locale=de
actually searches on `locale == ['de']` and
http://localhost.org:8000/api/v1/search?q=video&locale=de&locale=fr actually searches on `locale== ['fr']`.
Originally from here:
https://github.com/mdn/yari/pull/1473#pullrequestreview-584750752
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/api/v1/search/forms.py`
Content:
```
1 from django import forms
2 from django.conf import settings
3 from django.utils.datastructures import MultiValueDict
4
5
6 class SearchForm(forms.Form):
7 q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)
8 locale = forms.MultipleChoiceField(
9 required=False,
10 # The `settings.LANGUAGES` looks like this:
11 # [('en-US', 'English (US)'), ...]
12 # But all locales are stored in lowercase in Elasticsearch, so
13 # force everything to lowercase.
14 choices=[(code.lower(), name) for code, name in settings.LANGUAGES],
15 )
16
17 SORT_CHOICES = ("best", "relevance", "popularity")
18 sort = forms.ChoiceField(required=False, choices=[(x, x) for x in SORT_CHOICES])
19
20 ARCHIVE_CHOICES = ("exclude", "include", "only")
21 archive = forms.ChoiceField(
22 required=False, choices=[(x, x) for x in ARCHIVE_CHOICES]
23 )
24
25 size = forms.IntegerField(required=True, min_value=1, max_value=100)
26 page = forms.IntegerField(required=True, min_value=1, max_value=10)
27
28 def __init__(self, data, **kwargs):
29 initial = kwargs.get("initial", {})
30 # This makes it possible to supply `initial={some dict}` to the form
31 # and have its values become part of the default. Normally, in Django,
32 # the `SomeForm(data, initial={...})` is just used to prepopulate the
33 # HTML generated form widgets.
34 # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered
35 data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})
36
37 # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`
38 # we can't edit it there. So instead, we mutate it here in the `data`
39 if "locale" in data:
40 # Always force it to lowercase, because that's what the ChoiceField
41 # is configured to. And the searches should always be in lower case.
42 # Remember, Django forms will allow this to be a single string
43 # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).
44 if isinstance(data["locale"], str):
45 data["locale"] = data["locale"].lower()
46 else:
47 data["locale"] = [x.lower() for x in data["locale"]]
48
49 # If, for keys we have an initial value for, it was passed an empty string,
50 # then swap it for the initial value.
51 # For example `?q=searching&page=` you probably meant to omit it
52 # but "allowing" it to be an empty string makes it convenient for the client.
53 for key, values in data.items():
54 if key in initial and values == "":
55 data[key] = initial[key]
56
57 super().__init__(data, **kwargs)
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/api/v1/search/forms.py b/kuma/api/v1/search/forms.py
--- a/kuma/api/v1/search/forms.py
+++ b/kuma/api/v1/search/forms.py
@@ -3,15 +3,26 @@
from django.utils.datastructures import MultiValueDict
+class MultipleChoiceFieldICase(forms.MultipleChoiceField):
+ """Just like forms.MultipleChoiceField but everything's case insentive.
+
+ For simplicity, this field assumes that each choice is a tuple where
+ the first element is always a string.
+ """
+
+ def valid_value(self, value):
+ return str(value).lower() in [x[0].lower() for x in self.choices]
+
+
class SearchForm(forms.Form):
q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)
- locale = forms.MultipleChoiceField(
+ locale = MultipleChoiceFieldICase(
required=False,
# The `settings.LANGUAGES` looks like this:
# [('en-US', 'English (US)'), ...]
# But all locales are stored in lowercase in Elasticsearch, so
# force everything to lowercase.
- choices=[(code.lower(), name) for code, name in settings.LANGUAGES],
+ choices=[(code, name) for code, name in settings.LANGUAGES],
)
SORT_CHOICES = ("best", "relevance", "popularity")
@@ -34,18 +45,6 @@
# See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered
data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})
- # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`
- # we can't edit it there. So instead, we mutate it here in the `data`
- if "locale" in data:
- # Always force it to lowercase, because that's what the ChoiceField
- # is configured to. And the searches should always be in lower case.
- # Remember, Django forms will allow this to be a single string
- # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).
- if isinstance(data["locale"], str):
- data["locale"] = data["locale"].lower()
- else:
- data["locale"] = [x.lower() for x in data["locale"]]
-
# If, for keys we have an initial value for, it was passed an empty string,
# then swap it for the initial value.
# For example `?q=searching&page=` you probably meant to omit it
| {"golden_diff": "diff --git a/kuma/api/v1/search/forms.py b/kuma/api/v1/search/forms.py\n--- a/kuma/api/v1/search/forms.py\n+++ b/kuma/api/v1/search/forms.py\n@@ -3,15 +3,26 @@\n from django.utils.datastructures import MultiValueDict\n \n \n+class MultipleChoiceFieldICase(forms.MultipleChoiceField):\n+ \"\"\"Just like forms.MultipleChoiceField but everything's case insentive.\n+\n+ For simplicity, this field assumes that each choice is a tuple where\n+ the first element is always a string.\n+ \"\"\"\n+\n+ def valid_value(self, value):\n+ return str(value).lower() in [x[0].lower() for x in self.choices]\n+\n+\n class SearchForm(forms.Form):\n q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)\n- locale = forms.MultipleChoiceField(\n+ locale = MultipleChoiceFieldICase(\n required=False,\n # The `settings.LANGUAGES` looks like this:\n # [('en-US', 'English (US)'), ...]\n # But all locales are stored in lowercase in Elasticsearch, so\n # force everything to lowercase.\n- choices=[(code.lower(), name) for code, name in settings.LANGUAGES],\n+ choices=[(code, name) for code, name in settings.LANGUAGES],\n )\n \n SORT_CHOICES = (\"best\", \"relevance\", \"popularity\")\n@@ -34,18 +45,6 @@\n # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered\n data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})\n \n- # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`\n- # we can't edit it there. So instead, we mutate it here in the `data`\n- if \"locale\" in data:\n- # Always force it to lowercase, because that's what the ChoiceField\n- # is configured to. And the searches should always be in lower case.\n- # Remember, Django forms will allow this to be a single string\n- # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).\n- if isinstance(data[\"locale\"], str):\n- data[\"locale\"] = data[\"locale\"].lower()\n- else:\n- data[\"locale\"] = [x.lower() for x in data[\"locale\"]]\n-\n # If, for keys we have an initial value for, it was passed an empty string,\n # then swap it for the initial value.\n # For example `?q=searching&page=` you probably meant to omit it\n", "issue": "[regression] Multi-locale search is not searching in all locales\n**Summary**\r\nhttp://localhost.org:8000/api/v1/search?q=video&locale=fr&locale=de\r\nactually searches on `locale == ['de']` and \r\nhttp://localhost.org:8000/api/v1/search?q=video&locale=de&locale=fr actually searches on `locale== ['fr']`.\r\n\r\nOriginally from here:\r\nhttps://github.com/mdn/yari/pull/1473#pullrequestreview-584750752\r\n\r\n\n", "before_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.datastructures import MultiValueDict\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)\n locale = forms.MultipleChoiceField(\n required=False,\n # The `settings.LANGUAGES` looks like this:\n # [('en-US', 'English (US)'), ...]\n # But all locales are stored in lowercase in Elasticsearch, so\n # force everything to lowercase.\n choices=[(code.lower(), name) for code, name in settings.LANGUAGES],\n )\n\n SORT_CHOICES = (\"best\", \"relevance\", \"popularity\")\n sort = forms.ChoiceField(required=False, choices=[(x, x) for x in SORT_CHOICES])\n\n ARCHIVE_CHOICES = (\"exclude\", \"include\", \"only\")\n archive = forms.ChoiceField(\n required=False, choices=[(x, x) for x in ARCHIVE_CHOICES]\n )\n\n size = forms.IntegerField(required=True, min_value=1, max_value=100)\n page = forms.IntegerField(required=True, min_value=1, max_value=10)\n\n def __init__(self, data, **kwargs):\n initial = kwargs.get(\"initial\", {})\n # This makes it possible to supply `initial={some dict}` to the form\n # and have its values become part of the default. Normally, in Django,\n # the `SomeForm(data, initial={...})` is just used to prepopulate the\n # HTML generated form widgets.\n # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered\n data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})\n\n # Because the `?locale=en-US&locale=Fr` might come in from the `request.GET`\n # we can't edit it there. So instead, we mutate it here in the `data`\n if \"locale\" in data:\n # Always force it to lowercase, because that's what the ChoiceField\n # is configured to. And the searches should always be in lower case.\n # Remember, Django forms will allow this to be a single string\n # (e.g. `?locale=Fr`) or a multi-value (`?locale=fr&locale=En-US`).\n if isinstance(data[\"locale\"], str):\n data[\"locale\"] = data[\"locale\"].lower()\n else:\n data[\"locale\"] = [x.lower() for x in data[\"locale\"]]\n\n # If, for keys we have an initial value for, it was passed an empty string,\n # then swap it for the initial value.\n # For example `?q=searching&page=` you probably meant to omit it\n # but \"allowing\" it to be an empty string makes it convenient for the client.\n for key, values in data.items():\n if key in initial and values == \"\":\n data[key] = initial[key]\n\n super().__init__(data, **kwargs)\n", "path": "kuma/api/v1/search/forms.py"}], "after_files": [{"content": "from django import forms\nfrom django.conf import settings\nfrom django.utils.datastructures import MultiValueDict\n\n\nclass MultipleChoiceFieldICase(forms.MultipleChoiceField):\n \"\"\"Just like forms.MultipleChoiceField but everything's case insentive.\n\n For simplicity, this field assumes that each choice is a tuple where\n the first element is always a string.\n \"\"\"\n\n def valid_value(self, value):\n return str(value).lower() in [x[0].lower() for x in self.choices]\n\n\nclass SearchForm(forms.Form):\n q = forms.CharField(max_length=settings.ES_Q_MAXLENGTH)\n locale = MultipleChoiceFieldICase(\n required=False,\n # The `settings.LANGUAGES` looks like this:\n # [('en-US', 'English (US)'), ...]\n # But all locales are stored in lowercase in Elasticsearch, so\n # force everything to lowercase.\n choices=[(code, name) for code, name in settings.LANGUAGES],\n )\n\n SORT_CHOICES = (\"best\", \"relevance\", \"popularity\")\n sort = forms.ChoiceField(required=False, choices=[(x, x) for x in SORT_CHOICES])\n\n ARCHIVE_CHOICES = (\"exclude\", \"include\", \"only\")\n archive = forms.ChoiceField(\n required=False, choices=[(x, x) for x in ARCHIVE_CHOICES]\n )\n\n size = forms.IntegerField(required=True, min_value=1, max_value=100)\n page = forms.IntegerField(required=True, min_value=1, max_value=10)\n\n def __init__(self, data, **kwargs):\n initial = kwargs.get(\"initial\", {})\n # This makes it possible to supply `initial={some dict}` to the form\n # and have its values become part of the default. Normally, in Django,\n # the `SomeForm(data, initial={...})` is just used to prepopulate the\n # HTML generated form widgets.\n # See https://www.peterbe.com/plog/initial-values-bound-django-form-rendered\n data = MultiValueDict({**{k: [v] for k, v in initial.items()}, **data})\n\n # If, for keys we have an initial value for, it was passed an empty string,\n # then swap it for the initial value.\n # For example `?q=searching&page=` you probably meant to omit it\n # but \"allowing\" it to be an empty string makes it convenient for the client.\n for key, values in data.items():\n if key in initial and values == \"\":\n data[key] = initial[key]\n\n super().__init__(data, **kwargs)\n", "path": "kuma/api/v1/search/forms.py"}]} | 1,151 | 603 |
gh_patches_debug_24202 | rasdani/github-patches | git_diff | helmholtz-analytics__heat-57 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix project description on PyPI
PyPI requires both a description and long_description to be set, with the former being used for listing a project among others and the latter for the detailed project page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from distutils.core import setup
2
3 setup(
4 name='heat',
5 packages=['heat'],
6 version='0.0.1',
7 description='A framework for high performance data analytics and machine learning.',
8 author='Helmholtz Association',
9 author_email='[email protected]',
10 url='https://github.com/helmholtz-analytics/heat',
11 # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD
12 keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],
13 classifiers=[],
14 install_requires=[
15 'numpy>=1.13.0',
16 # 'torch>=0.4.0'
17 ],
18 extras_require={
19 'hdf5': ['h5py>=2.8.0']
20 }
21 )
22
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -1,19 +1,28 @@
from distutils.core import setup
+with open("README.md", "r") as fh:
+ long_description = fh.read()
+
setup(
name='heat',
packages=['heat'],
version='0.0.1',
description='A framework for high performance data analytics and machine learning.',
+ long_description=long_description,
+ long_description_content_type='text/markdown',
author='Helmholtz Association',
author_email='[email protected]',
url='https://github.com/helmholtz-analytics/heat',
- # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD
keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],
- classifiers=[],
+ classifiers=[
+ 'Development Status :: 2 - Pre-Alpha',
+ 'Programming Language :: Python :: 3.5',
+ 'License :: OSI Approved :: MIT License',
+ 'Intended Audience :: Science/Research',
+ 'Topic :: Scientific/Engineering'
+ ],
install_requires=[
'numpy>=1.13.0',
- # 'torch>=0.4.0'
],
extras_require={
'hdf5': ['h5py>=2.8.0']
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,19 +1,28 @@\n from distutils.core import setup\n \n+with open(\"README.md\", \"r\") as fh:\n+ long_description = fh.read()\n+\n setup(\n name='heat',\n packages=['heat'],\n version='0.0.1',\n description='A framework for high performance data analytics and machine learning.',\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n author='Helmholtz Association',\n author_email='[email protected]',\n url='https://github.com/helmholtz-analytics/heat',\n- # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD\n keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],\n- classifiers=[],\n+ classifiers=[\n+ 'Development Status :: 2 - Pre-Alpha',\n+ 'Programming Language :: Python :: 3.5',\n+ 'License :: OSI Approved :: MIT License',\n+ 'Intended Audience :: Science/Research',\n+ 'Topic :: Scientific/Engineering'\n+ ],\n install_requires=[\n 'numpy>=1.13.0',\n- # 'torch>=0.4.0'\n ],\n extras_require={\n 'hdf5': ['h5py>=2.8.0']\n", "issue": "Fix project description on PyPI\nPyPI requires both a description and long_description to be set, with the former being used for listing a project among others and the latter for the detailed project page.\n", "before_files": [{"content": "from distutils.core import setup\n\nsetup(\n name='heat',\n packages=['heat'],\n version='0.0.1',\n description='A framework for high performance data analytics and machine learning.',\n author='Helmholtz Association',\n author_email='[email protected]',\n url='https://github.com/helmholtz-analytics/heat',\n # download_url = 'https://github.com/helmholtz-analytics/heat/archive/0.1.tar.gz', # TBD\n keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],\n classifiers=[],\n install_requires=[\n 'numpy>=1.13.0',\n # 'torch>=0.4.0'\n ],\n extras_require={\n 'hdf5': ['h5py>=2.8.0']\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "from distutils.core import setup\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\nsetup(\n name='heat',\n packages=['heat'],\n version='0.0.1',\n description='A framework for high performance data analytics and machine learning.',\n long_description=long_description,\n long_description_content_type='text/markdown',\n author='Helmholtz Association',\n author_email='[email protected]',\n url='https://github.com/helmholtz-analytics/heat',\n keywords=['data', 'analytics', 'tensors', 'distributed', 'gpu'],\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Programming Language :: Python :: 3.5',\n 'License :: OSI Approved :: MIT License',\n 'Intended Audience :: Science/Research',\n 'Topic :: Scientific/Engineering'\n ],\n install_requires=[\n 'numpy>=1.13.0',\n ],\n extras_require={\n 'hdf5': ['h5py>=2.8.0']\n }\n)\n", "path": "setup.py"}]} | 518 | 323 |
gh_patches_debug_15862 | rasdani/github-patches | git_diff | optuna__optuna-2265 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`study.trials_dataframe` call fails for multi-objective studies
When using the new multi-objective study framework in v2.4.0 (`optuna.create_study(..., directions=[...])`), calling the `trials_dataframe` message raises an exception caused by some incorrect column spec type comparison in the _dataframe framework. It appears the column aggregation for contains a bug when used with multiple objectives, which adds an extra `values` Tuple to the `values` Set during the initial mapping of `'value' -> 'values'` for a multi-objective study.
## Expected behavior
Accessing the `trials_dataframe` method of a study works the same for both single and multi-objective studies.
## Environment
- Optuna version: 2.3.0 -> 2.4.0
- Python version: 3.8
- OS: Ubuntu 20.04.1, Linux Kernel v5.4.0-56
## Error messages, stack traces, or logs
```
Traceback (most recent call last):
File "optuna_vis.py", line 12, in <module>
study_df = study.trials_dataframe()
File "/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/study.py", line 499, in trials_dataframe
return _trials_dataframe(self, attrs, multi_index)
File "/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/_dataframe.py", line 80, in _trials_dataframe
columns: List[Tuple[str, str]] = sum(
File "/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/_dataframe.py", line 81, in <genexpr>
(sorted(column_agg[k]) for k in attrs if k in column_agg), []
TypeError: '<' not supported between instances of 'str' and 'int'
```
By inspecting the `attr` field in the `_trials_dataframe` method (line 23, _dataframe.py), reveals an extra `values` item:
```
{'number': {('number', '')}, 'values': {('values', 1), ('values', ''), ('values', 0), ('values', 3), ('values', 2)}, 'datetime_start': {('datetime_start', '')}, 'datetime_complete': {('datetime_complete', '')}, 'duration': {('duration', '')}, 'params': {(<param_tuples>), ...}, 'user_attrs': {('user_attrs', 'total_x'), ('user_attrs', 'total_y'), ('user_attrs', '_time_'), ('user_attrs', 'value_total')}, 'system_attrs': {('system_attrs', 'nsga2:generation')}, 'state': {('state', '')}}
```
For context, I have defined 4 objectives in this study. After the first trial, there appears this `('values', '')` anomaly which causes the above exception.
## Steps to reproduce
1. Create a MPO study with 4 objectives.
2. Start your study and after at least 2 trials, try to call the `study.trials_dataframe()` method.
3. The above exception will be raised.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/_dataframe.py`
Content:
```
1 import collections
2 from typing import Any
3 from typing import DefaultDict
4 from typing import Dict
5 from typing import List
6 from typing import Set
7 from typing import Tuple
8
9 import optuna
10 from optuna._imports import try_import
11 from optuna.trial._state import TrialState
12
13
14 with try_import() as _imports:
15 # `Study.trials_dataframe` is disabled if pandas is not available.
16 import pandas as pd
17
18 # Required for type annotation in `Study.trials_dataframe`.
19 if not _imports.is_successful():
20 pd = object # type: ignore # NOQA
21
22
23 def _trials_dataframe(
24 study: "optuna.Study", attrs: Tuple[str, ...], multi_index: bool
25 ) -> "pd.DataFrame":
26 _imports.check()
27
28 trials = study.get_trials(deepcopy=False)
29
30 # If no trials, return an empty dataframe.
31 if not len(trials):
32 return pd.DataFrame()
33
34 if "value" in attrs and study._is_multi_objective():
35 attrs = tuple("values" if attr == "value" else attr for attr in attrs)
36
37 attrs_to_df_columns: Dict[str, str] = collections.OrderedDict()
38 for attr in attrs:
39 if attr.startswith("_"):
40 # Python conventional underscores are omitted in the dataframe.
41 df_column = attr[1:]
42 else:
43 df_column = attr
44 attrs_to_df_columns[attr] = df_column
45
46 # column_agg is an aggregator of column names.
47 # Keys of column agg are attributes of `FrozenTrial` such as 'trial_id' and 'params'.
48 # Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers').
49 column_agg: DefaultDict[str, Set] = collections.defaultdict(set)
50 non_nested_attr = ""
51
52 def _create_record_and_aggregate_column(
53 trial: "optuna.trial.FrozenTrial",
54 ) -> Dict[Tuple[str, str], Any]:
55
56 record = {}
57 for attr, df_column in attrs_to_df_columns.items():
58 value = getattr(trial, attr)
59 if isinstance(value, TrialState):
60 # Convert TrialState to str and remove the common prefix.
61 value = str(value).split(".")[-1]
62 if isinstance(value, dict):
63 for nested_attr, nested_value in value.items():
64 record[(df_column, nested_attr)] = nested_value
65 column_agg[attr].add((df_column, nested_attr))
66 elif isinstance(value, list):
67 # Expand trial.values.
68 for nested_attr, nested_value in enumerate(value):
69 record[(df_column, nested_attr)] = nested_value
70 column_agg[attr].add((df_column, nested_attr))
71 else:
72 record[(df_column, non_nested_attr)] = value
73 column_agg[attr].add((df_column, non_nested_attr))
74 return record
75
76 records = list([_create_record_and_aggregate_column(trial) for trial in trials])
77
78 columns: List[Tuple[str, str]] = sum(
79 (sorted(column_agg[k]) for k in attrs if k in column_agg), []
80 )
81
82 df = pd.DataFrame(records, columns=pd.MultiIndex.from_tuples(columns))
83
84 if not multi_index:
85 # Flatten the `MultiIndex` columns where names are concatenated with underscores.
86 # Filtering is required to omit non-nested columns avoiding unwanted trailing
87 # underscores.
88 df.columns = ["_".join(filter(lambda c: c, map(lambda c: str(c), col))) for col in columns]
89
90 return df
91
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/_dataframe.py b/optuna/_dataframe.py
--- a/optuna/_dataframe.py
+++ b/optuna/_dataframe.py
@@ -68,6 +68,12 @@
for nested_attr, nested_value in enumerate(value):
record[(df_column, nested_attr)] = nested_value
column_agg[attr].add((df_column, nested_attr))
+ elif attr == "values":
+ # trial.values should be None when the trial's state is FAIL or PRUNED.
+ assert value is None
+ for nested_attr in range(len(study.directions)):
+ record[(df_column, nested_attr)] = None
+ column_agg[attr].add((df_column, nested_attr))
else:
record[(df_column, non_nested_attr)] = value
column_agg[attr].add((df_column, non_nested_attr))
| {"golden_diff": "diff --git a/optuna/_dataframe.py b/optuna/_dataframe.py\n--- a/optuna/_dataframe.py\n+++ b/optuna/_dataframe.py\n@@ -68,6 +68,12 @@\n for nested_attr, nested_value in enumerate(value):\n record[(df_column, nested_attr)] = nested_value\n column_agg[attr].add((df_column, nested_attr))\n+ elif attr == \"values\":\n+ # trial.values should be None when the trial's state is FAIL or PRUNED.\n+ assert value is None\n+ for nested_attr in range(len(study.directions)):\n+ record[(df_column, nested_attr)] = None\n+ column_agg[attr].add((df_column, nested_attr))\n else:\n record[(df_column, non_nested_attr)] = value\n column_agg[attr].add((df_column, non_nested_attr))\n", "issue": "`study.trials_dataframe` call fails for multi-objective studies\nWhen using the new multi-objective study framework in v2.4.0 (`optuna.create_study(..., directions=[...])`), calling the `trials_dataframe` message raises an exception caused by some incorrect column spec type comparison in the _dataframe framework. It appears the column aggregation for contains a bug when used with multiple objectives, which adds an extra `values` Tuple to the `values` Set during the initial mapping of `'value' -> 'values'` for a multi-objective study.\r\n\r\n## Expected behavior\r\n\r\nAccessing the `trials_dataframe` method of a study works the same for both single and multi-objective studies.\r\n\r\n## Environment\r\n\r\n- Optuna version: 2.3.0 -> 2.4.0\r\n- Python version: 3.8\r\n- OS: Ubuntu 20.04.1, Linux Kernel v5.4.0-56\r\n\r\n## Error messages, stack traces, or logs\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"optuna_vis.py\", line 12, in <module>\r\n study_df = study.trials_dataframe()\r\n File \"/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/study.py\", line 499, in trials_dataframe\r\n return _trials_dataframe(self, attrs, multi_index)\r\n File \"/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/_dataframe.py\", line 80, in _trials_dataframe\r\n columns: List[Tuple[str, str]] = sum(\r\n File \"/home/user.name/anaconda3/lib/python3.8/site-packages/optuna/_dataframe.py\", line 81, in <genexpr>\r\n (sorted(column_agg[k]) for k in attrs if k in column_agg), []\r\nTypeError: '<' not supported between instances of 'str' and 'int'\r\n```\r\n\r\nBy inspecting the `attr` field in the `_trials_dataframe` method (line 23, _dataframe.py), reveals an extra `values` item:\r\n\r\n```\r\n{'number': {('number', '')}, 'values': {('values', 1), ('values', ''), ('values', 0), ('values', 3), ('values', 2)}, 'datetime_start': {('datetime_start', '')}, 'datetime_complete': {('datetime_complete', '')}, 'duration': {('duration', '')}, 'params': {(<param_tuples>), ...}, 'user_attrs': {('user_attrs', 'total_x'), ('user_attrs', 'total_y'), ('user_attrs', '_time_'), ('user_attrs', 'value_total')}, 'system_attrs': {('system_attrs', 'nsga2:generation')}, 'state': {('state', '')}}\r\n```\r\n\r\nFor context, I have defined 4 objectives in this study. After the first trial, there appears this `('values', '')` anomaly which causes the above exception.\r\n\r\n## Steps to reproduce\r\n\r\n1. Create a MPO study with 4 objectives.\r\n2. Start your study and after at least 2 trials, try to call the `study.trials_dataframe()` method.\r\n3. The above exception will be raised.\r\n\n", "before_files": [{"content": "import collections\nfrom typing import Any\nfrom typing import DefaultDict\nfrom typing import Dict\nfrom typing import List\nfrom typing import Set\nfrom typing import Tuple\n\nimport optuna\nfrom optuna._imports import try_import\nfrom optuna.trial._state import TrialState\n\n\nwith try_import() as _imports:\n # `Study.trials_dataframe` is disabled if pandas is not available.\n import pandas as pd\n\n# Required for type annotation in `Study.trials_dataframe`.\nif not _imports.is_successful():\n pd = object # type: ignore # NOQA\n\n\ndef _trials_dataframe(\n study: \"optuna.Study\", attrs: Tuple[str, ...], multi_index: bool\n) -> \"pd.DataFrame\":\n _imports.check()\n\n trials = study.get_trials(deepcopy=False)\n\n # If no trials, return an empty dataframe.\n if not len(trials):\n return pd.DataFrame()\n\n if \"value\" in attrs and study._is_multi_objective():\n attrs = tuple(\"values\" if attr == \"value\" else attr for attr in attrs)\n\n attrs_to_df_columns: Dict[str, str] = collections.OrderedDict()\n for attr in attrs:\n if attr.startswith(\"_\"):\n # Python conventional underscores are omitted in the dataframe.\n df_column = attr[1:]\n else:\n df_column = attr\n attrs_to_df_columns[attr] = df_column\n\n # column_agg is an aggregator of column names.\n # Keys of column agg are attributes of `FrozenTrial` such as 'trial_id' and 'params'.\n # Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers').\n column_agg: DefaultDict[str, Set] = collections.defaultdict(set)\n non_nested_attr = \"\"\n\n def _create_record_and_aggregate_column(\n trial: \"optuna.trial.FrozenTrial\",\n ) -> Dict[Tuple[str, str], Any]:\n\n record = {}\n for attr, df_column in attrs_to_df_columns.items():\n value = getattr(trial, attr)\n if isinstance(value, TrialState):\n # Convert TrialState to str and remove the common prefix.\n value = str(value).split(\".\")[-1]\n if isinstance(value, dict):\n for nested_attr, nested_value in value.items():\n record[(df_column, nested_attr)] = nested_value\n column_agg[attr].add((df_column, nested_attr))\n elif isinstance(value, list):\n # Expand trial.values.\n for nested_attr, nested_value in enumerate(value):\n record[(df_column, nested_attr)] = nested_value\n column_agg[attr].add((df_column, nested_attr))\n else:\n record[(df_column, non_nested_attr)] = value\n column_agg[attr].add((df_column, non_nested_attr))\n return record\n\n records = list([_create_record_and_aggregate_column(trial) for trial in trials])\n\n columns: List[Tuple[str, str]] = sum(\n (sorted(column_agg[k]) for k in attrs if k in column_agg), []\n )\n\n df = pd.DataFrame(records, columns=pd.MultiIndex.from_tuples(columns))\n\n if not multi_index:\n # Flatten the `MultiIndex` columns where names are concatenated with underscores.\n # Filtering is required to omit non-nested columns avoiding unwanted trailing\n # underscores.\n df.columns = [\"_\".join(filter(lambda c: c, map(lambda c: str(c), col))) for col in columns]\n\n return df\n", "path": "optuna/_dataframe.py"}], "after_files": [{"content": "import collections\nfrom typing import Any\nfrom typing import DefaultDict\nfrom typing import Dict\nfrom typing import List\nfrom typing import Set\nfrom typing import Tuple\n\nimport optuna\nfrom optuna._imports import try_import\nfrom optuna.trial._state import TrialState\n\n\nwith try_import() as _imports:\n # `Study.trials_dataframe` is disabled if pandas is not available.\n import pandas as pd\n\n# Required for type annotation in `Study.trials_dataframe`.\nif not _imports.is_successful():\n pd = object # type: ignore # NOQA\n\n\ndef _trials_dataframe(\n study: \"optuna.Study\", attrs: Tuple[str, ...], multi_index: bool\n) -> \"pd.DataFrame\":\n _imports.check()\n\n trials = study.get_trials(deepcopy=False)\n\n # If no trials, return an empty dataframe.\n if not len(trials):\n return pd.DataFrame()\n\n if \"value\" in attrs and study._is_multi_objective():\n attrs = tuple(\"values\" if attr == \"value\" else attr for attr in attrs)\n\n attrs_to_df_columns: Dict[str, str] = collections.OrderedDict()\n for attr in attrs:\n if attr.startswith(\"_\"):\n # Python conventional underscores are omitted in the dataframe.\n df_column = attr[1:]\n else:\n df_column = attr\n attrs_to_df_columns[attr] = df_column\n\n # column_agg is an aggregator of column names.\n # Keys of column agg are attributes of `FrozenTrial` such as 'trial_id' and 'params'.\n # Values are dataframe columns such as ('trial_id', '') and ('params', 'n_layers').\n column_agg: DefaultDict[str, Set] = collections.defaultdict(set)\n non_nested_attr = \"\"\n\n def _create_record_and_aggregate_column(\n trial: \"optuna.trial.FrozenTrial\",\n ) -> Dict[Tuple[str, str], Any]:\n\n record = {}\n for attr, df_column in attrs_to_df_columns.items():\n value = getattr(trial, attr)\n if isinstance(value, TrialState):\n # Convert TrialState to str and remove the common prefix.\n value = str(value).split(\".\")[-1]\n if isinstance(value, dict):\n for nested_attr, nested_value in value.items():\n record[(df_column, nested_attr)] = nested_value\n column_agg[attr].add((df_column, nested_attr))\n elif isinstance(value, list):\n # Expand trial.values.\n for nested_attr, nested_value in enumerate(value):\n record[(df_column, nested_attr)] = nested_value\n column_agg[attr].add((df_column, nested_attr))\n elif attr == \"values\":\n # trial.values should be None when the trial's state is FAIL or PRUNED.\n assert value is None\n for nested_attr in range(len(study.directions)):\n record[(df_column, nested_attr)] = None\n column_agg[attr].add((df_column, nested_attr))\n else:\n record[(df_column, non_nested_attr)] = value\n column_agg[attr].add((df_column, non_nested_attr))\n return record\n\n records = list([_create_record_and_aggregate_column(trial) for trial in trials])\n\n columns: List[Tuple[str, str]] = sum(\n (sorted(column_agg[k]) for k in attrs if k in column_agg), []\n )\n\n df = pd.DataFrame(records, columns=pd.MultiIndex.from_tuples(columns))\n\n if not multi_index:\n # Flatten the `MultiIndex` columns where names are concatenated with underscores.\n # Filtering is required to omit non-nested columns avoiding unwanted trailing\n # underscores.\n df.columns = [\"_\".join(filter(lambda c: c, map(lambda c: str(c), col))) for col in columns]\n\n return df\n", "path": "optuna/_dataframe.py"}]} | 1,871 | 191 |
gh_patches_debug_17616 | rasdani/github-patches | git_diff | python-discord__bot-655 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Write unit tests for `bot/rules/mentions.py`
Write unit tests for [`bot/rules/mentions.py`](../blob/master/bot/rules/mentions.py).
## Implementation details
Please make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.
## Additional information
If you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bot/rules/attachments.py`
Content:
```
1 from typing import Dict, Iterable, List, Optional, Tuple
2
3 from discord import Member, Message
4
5
6 async def apply(
7 last_message: Message, recent_messages: List[Message], config: Dict[str, int]
8 ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
9 """Detects total attachments exceeding the limit sent by a single user."""
10 relevant_messages = [last_message] + [
11 msg
12 for msg in recent_messages
13 if (
14 msg.author == last_message.author
15 and len(msg.attachments) > 0
16 )
17 ]
18 total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)
19
20 if total_recent_attachments > config['max']:
21 return (
22 f"sent {total_recent_attachments} attachments in {config['max']}s",
23 (last_message.author,),
24 relevant_messages
25 )
26 return None
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py
--- a/bot/rules/attachments.py
+++ b/bot/rules/attachments.py
@@ -7,14 +7,14 @@
last_message: Message, recent_messages: List[Message], config: Dict[str, int]
) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:
"""Detects total attachments exceeding the limit sent by a single user."""
- relevant_messages = [last_message] + [
+ relevant_messages = tuple(
msg
for msg in recent_messages
if (
msg.author == last_message.author
and len(msg.attachments) > 0
)
- ]
+ )
total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)
if total_recent_attachments > config['max']:
| {"golden_diff": "diff --git a/bot/rules/attachments.py b/bot/rules/attachments.py\n--- a/bot/rules/attachments.py\n+++ b/bot/rules/attachments.py\n@@ -7,14 +7,14 @@\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n ) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n- relevant_messages = [last_message] + [\n+ relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n- ]\n+ )\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n \n if total_recent_attachments > config['max']:\n", "issue": "Write unit tests for `bot/rules/mentions.py`\nWrite unit tests for [`bot/rules/mentions.py`](../blob/master/bot/rules/mentions.py).\n\n## Implementation details\nPlease make sure to read the general information in the [meta issue](553) and the [testing README](../blob/master/tests/README.md). We are aiming for a 100% [branch coverage](https://coverage.readthedocs.io/en/stable/branch.html) for this file, but if you think that is not possible, please discuss that in this issue.\n\n## Additional information\nIf you want to work on this issue, **please make sure that you get assigned to it** by one of the core devs before starting to work on it. We would like to prevent the situation that multiple people are working on the same issue. To get assigned, leave a comment showing your interesting in tackling this issue.\n\n", "before_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n relevant_messages = [last_message] + [\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n ]\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n\n if total_recent_attachments > config['max']:\n return (\n f\"sent {total_recent_attachments} attachments in {config['max']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/attachments.py"}], "after_files": [{"content": "from typing import Dict, Iterable, List, Optional, Tuple\n\nfrom discord import Member, Message\n\n\nasync def apply(\n last_message: Message, recent_messages: List[Message], config: Dict[str, int]\n) -> Optional[Tuple[str, Iterable[Member], Iterable[Message]]]:\n \"\"\"Detects total attachments exceeding the limit sent by a single user.\"\"\"\n relevant_messages = tuple(\n msg\n for msg in recent_messages\n if (\n msg.author == last_message.author\n and len(msg.attachments) > 0\n )\n )\n total_recent_attachments = sum(len(msg.attachments) for msg in relevant_messages)\n\n if total_recent_attachments > config['max']:\n return (\n f\"sent {total_recent_attachments} attachments in {config['max']}s\",\n (last_message.author,),\n relevant_messages\n )\n return None\n", "path": "bot/rules/attachments.py"}]} | 678 | 185 |
gh_patches_debug_5761 | rasdani/github-patches | git_diff | electricitymaps__electricitymaps-contrib-1456 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Data error for Aruba
For a few days in a row now (possibly longer), data for the island country of Aruba has been offline. I went to check back on the source: https://www.webaruba.com/ and saw that the figures for electricity generation under the "Aruba's renewable energy monitor" block giving 0 MW for everything.

When I click on [View ->], it takes me to [this page](https://webaruba.com/renewable-energy-dashboard/aruba)

Real-time data for Aruba's electricity generation is present there but not on the front page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/AW.py`
Content:
```
1 #!/usr/bin/env python3
2
3 import arrow
4 import requests
5 import datetime
6
7
8 def fetch_production(zone_key='AW', session=None, target_datetime=None, logger=None):
9 if target_datetime:
10 raise NotImplementedError('This parser is not yet able to parse past dates')
11
12 r = session or requests.session()
13 url = 'https://www.webaruba.com/renewable-energy-dashboard/app/rest/results.json'
14 # User agent is mandatory or services answers 404
15 headers = {'user-agent': 'electricitymap.org'}
16 response = r.get(url, headers=headers)
17 aruba_json = response.json()
18 top_data = aruba_json['dashboard_top_data']
19
20 # Values currenlty used from service
21 fossil = top_data['Fossil']
22 wind = top_data['Wind']
23 solar = top_data['TotalSolar']
24
25 # We're using Fossil data to get timestamp in correct time zone
26 local_date_time = datetime.datetime.strptime(fossil['timestamp'], "%Y-%m-%d %H:%M:%S.%f")
27 zone_date_time = arrow.Arrow.fromdatetime(local_date_time, 'America/Aruba')
28
29 data = {
30 'zoneKey': zone_key,
31 'datetime': zone_date_time.datetime,
32 'production': {
33 'oil': fossil['value'],
34 'wind': wind['value'],
35 'solar': solar['value'],
36 },
37 'storage': {},
38 'source': 'webaruba.com',
39 }
40
41 return data
42
43
44 if __name__ == '__main__':
45 print(fetch_production())
46
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/parsers/AW.py b/parsers/AW.py
--- a/parsers/AW.py
+++ b/parsers/AW.py
@@ -30,9 +30,9 @@
'zoneKey': zone_key,
'datetime': zone_date_time.datetime,
'production': {
- 'oil': fossil['value'],
- 'wind': wind['value'],
- 'solar': solar['value'],
+ 'oil': float(fossil['value']),
+ 'wind': float(wind['value']),
+ 'solar': float(solar['value']),
},
'storage': {},
'source': 'webaruba.com',
| {"golden_diff": "diff --git a/parsers/AW.py b/parsers/AW.py\n--- a/parsers/AW.py\n+++ b/parsers/AW.py\n@@ -30,9 +30,9 @@\n 'zoneKey': zone_key,\n 'datetime': zone_date_time.datetime,\n 'production': {\n- 'oil': fossil['value'],\n- 'wind': wind['value'],\n- 'solar': solar['value'],\n+ 'oil': float(fossil['value']),\n+ 'wind': float(wind['value']),\n+ 'solar': float(solar['value']),\n },\n 'storage': {},\n 'source': 'webaruba.com',\n", "issue": "Data error for Aruba\nFor a few days in a row now (possibly longer), data for the island country of Aruba has been offline. I went to check back on the source: https://www.webaruba.com/ and saw that the figures for electricity generation under the \"Aruba's renewable energy monitor\" block giving 0 MW for everything.\r\n\r\n\r\n\r\nWhen I click on [View ->], it takes me to [this page](https://webaruba.com/renewable-energy-dashboard/aruba)\r\n\r\n\r\n\r\nReal-time data for Aruba's electricity generation is present there but not on the front page.\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport requests\nimport datetime\n\n\ndef fetch_production(zone_key='AW', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n r = session or requests.session()\n url = 'https://www.webaruba.com/renewable-energy-dashboard/app/rest/results.json'\n # User agent is mandatory or services answers 404\n headers = {'user-agent': 'electricitymap.org'}\n response = r.get(url, headers=headers)\n aruba_json = response.json()\n top_data = aruba_json['dashboard_top_data']\n\n # Values currenlty used from service\n fossil = top_data['Fossil']\n wind = top_data['Wind']\n solar = top_data['TotalSolar']\n\n # We're using Fossil data to get timestamp in correct time zone\n local_date_time = datetime.datetime.strptime(fossil['timestamp'], \"%Y-%m-%d %H:%M:%S.%f\")\n zone_date_time = arrow.Arrow.fromdatetime(local_date_time, 'America/Aruba')\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': zone_date_time.datetime,\n 'production': {\n 'oil': fossil['value'],\n 'wind': wind['value'],\n 'solar': solar['value'],\n },\n 'storage': {},\n 'source': 'webaruba.com',\n }\n\n return data\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/AW.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\nimport arrow\nimport requests\nimport datetime\n\n\ndef fetch_production(zone_key='AW', session=None, target_datetime=None, logger=None):\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n r = session or requests.session()\n url = 'https://www.webaruba.com/renewable-energy-dashboard/app/rest/results.json'\n # User agent is mandatory or services answers 404\n headers = {'user-agent': 'electricitymap.org'}\n response = r.get(url, headers=headers)\n aruba_json = response.json()\n top_data = aruba_json['dashboard_top_data']\n\n # Values currenlty used from service\n fossil = top_data['Fossil']\n wind = top_data['Wind']\n solar = top_data['TotalSolar']\n\n # We're using Fossil data to get timestamp in correct time zone\n local_date_time = datetime.datetime.strptime(fossil['timestamp'], \"%Y-%m-%d %H:%M:%S.%f\")\n zone_date_time = arrow.Arrow.fromdatetime(local_date_time, 'America/Aruba')\n\n data = {\n 'zoneKey': zone_key,\n 'datetime': zone_date_time.datetime,\n 'production': {\n 'oil': float(fossil['value']),\n 'wind': float(wind['value']),\n 'solar': float(solar['value']),\n },\n 'storage': {},\n 'source': 'webaruba.com',\n }\n\n return data\n\n\nif __name__ == '__main__':\n print(fetch_production())\n", "path": "parsers/AW.py"}]} | 928 | 145 |
gh_patches_debug_25582 | rasdani/github-patches | git_diff | cal-itp__benefits-211 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Send Content-Security-Policy header
The Content-Security-Policy (CSP) header (with the `frame-ancestors` directive) replaces the now deprecated `X-Frame-Options` header, to instruct the browser about appropriate actions to perform if a site is included inside an `<iframe>`.
See more at https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP
We already have Django's built-in [Clickjacking/`X-Frame-Options` features enabled](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py#L52). Since this app should never be run from an `<iframe>`, let's create another Middleware that sets the CSP header like so:
```
Content-Security-Policy: default-src 'self'; frame-ancestors 'none';
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `benefits/settings.py`
Content:
```
1 """
2 Django settings for benefits project.
3 """
4 import os
5
6 # Build paths inside the project like this: os.path.join(BASE_DIR, ...)
7 BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
8
9 # SECURITY WARNING: keep the secret key used in production secret!
10 SECRET_KEY = os.environ["DJANGO_SECRET_KEY"]
11
12 # SECURITY WARNING: don't run with debug turned on in production!
13 DEBUG = os.environ.get("DJANGO_DEBUG", "False").lower() == "true"
14
15 ADMIN = os.environ.get("DJANGO_ADMIN", "False").lower() == "true"
16
17 ALLOWED_HOSTS = []
18
19 if DEBUG:
20 ALLOWED_HOSTS.extend(["*"])
21 else:
22 hosts = os.environ["DJANGO_ALLOWED_HOSTS"].split(",")
23 ALLOWED_HOSTS.extend(hosts)
24
25 # Application definition
26
27 INSTALLED_APPS = [
28 "django.contrib.sessions",
29 "django.contrib.staticfiles",
30 "benefits.core",
31 "benefits.enrollment",
32 "benefits.eligibility",
33 ]
34
35 if ADMIN:
36 INSTALLED_APPS.extend(
37 [
38 "django.contrib.admin",
39 "django.contrib.auth",
40 "django.contrib.contenttypes",
41 "django.contrib.messages",
42 ]
43 )
44
45 MIDDLEWARE = [
46 "django.middleware.security.SecurityMiddleware",
47 "django.contrib.sessions.middleware.SessionMiddleware",
48 "django.middleware.locale.LocaleMiddleware",
49 "benefits.core.middleware.Healthcheck",
50 "django.middleware.common.CommonMiddleware",
51 "django.middleware.csrf.CsrfViewMiddleware",
52 "django.middleware.clickjacking.XFrameOptionsMiddleware",
53 "benefits.core.middleware.DebugSession",
54 "benefits.core.middleware.ChangedLanguageEvent",
55 ]
56
57 if ADMIN:
58 MIDDLEWARE.extend(
59 [
60 "django.contrib.auth.middleware.AuthenticationMiddleware",
61 "django.contrib.messages.middleware.MessageMiddleware",
62 ]
63 )
64
65 CSRF_COOKIE_AGE = None
66 CSRF_COOKIE_SAMESITE = "Strict"
67 CSRF_COOKIE_HTTPONLY = True
68
69 SESSION_COOKIE_SAMESITE = "Strict"
70 SESSION_ENGINE = "django.contrib.sessions.backends.signed_cookies"
71 SESSION_EXPIRE_AT_BROWSER_CLOSE = True
72
73 if not DEBUG:
74 CSRF_COOKIE_SECURE = True
75 CSRF_FAILURE_VIEW = "benefits.core.views.csrf_failure"
76 SESSION_COOKIE_SECURE = True
77
78 ROOT_URLCONF = "benefits.urls"
79
80 template_ctx_processors = [
81 "django.template.context_processors.request",
82 "benefits.core.context_processors.analytics",
83 ]
84
85 if DEBUG:
86 template_ctx_processors.extend(
87 [
88 "django.template.context_processors.debug",
89 "benefits.core.context_processors.debug",
90 ]
91 )
92
93 if ADMIN:
94 template_ctx_processors.extend(
95 [
96 "django.contrib.auth.context_processors.auth",
97 "django.contrib.messages.context_processors.messages",
98 ]
99 )
100
101 TEMPLATES = [
102 {
103 "BACKEND": "django.template.backends.django.DjangoTemplates",
104 "DIRS": [os.path.join(BASE_DIR, "benefits", "templates")],
105 "APP_DIRS": True,
106 "OPTIONS": {
107 "context_processors": template_ctx_processors,
108 },
109 },
110 ]
111
112 WSGI_APPLICATION = "benefits.wsgi.application"
113
114 DATABASES = {
115 "default": {
116 "ENGINE": "django.db.backends.sqlite3",
117 "NAME": os.environ.get("DJANGO_DB", "django") + ".db",
118 }
119 }
120
121 # Password validation
122
123 AUTH_PASSWORD_VALIDATORS = []
124
125 if ADMIN:
126 AUTH_PASSWORD_VALIDATORS.extend(
127 [
128 {
129 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
130 },
131 {
132 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
133 },
134 {
135 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
136 },
137 {
138 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
139 },
140 ]
141 )
142
143 # Internationalization
144
145 LANGUAGE_CODE = "en"
146
147 LANGUAGE_COOKIE_HTTPONLY = True
148 LANGUAGE_COOKIE_SAMESITE = "Strict"
149 LANGUAGE_COOKIE_SECURE = True
150
151 LANGUAGES = [("en", "English"), ("es", "Español")]
152
153 LOCALE_PATHS = [os.path.join(BASE_DIR, "benefits", "locale")]
154
155 USE_I18N = True
156 USE_L10N = True
157
158 TIME_ZONE = "UTC"
159 USE_TZ = True
160
161 # Static files (CSS, JavaScript, Images)
162
163 STATIC_URL = "/static/"
164 STATICFILES_DIRS = [os.path.join(BASE_DIR, "benefits", "static")]
165 STATIC_ROOT = os.path.join(BASE_DIR, "static")
166
167 # Logging configuration
168
169 LOG_LEVEL = os.environ.get("DJANGO_LOG_LEVEL", "DEBUG" if DEBUG else "WARNING")
170 LOGGING = {
171 "version": 1,
172 "disable_existing_loggers": False,
173 "formatters": {
174 "default": {
175 "format": "[{asctime}] {levelname} {name}:{lineno} {message}",
176 "datefmt": "%d/%b/%Y %H:%M:%S",
177 "style": "{",
178 },
179 },
180 "handlers": {
181 "default": {"class": "logging.StreamHandler", "formatter": "default"},
182 },
183 "root": {
184 "handlers": ["default"],
185 "level": LOG_LEVEL,
186 },
187 "loggers": {"django": {"handlers": ["default"], "propagate": False}},
188 }
189
190 # Analytics configuration
191
192 ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
193
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/benefits/settings.py b/benefits/settings.py
--- a/benefits/settings.py
+++ b/benefits/settings.py
@@ -50,6 +50,7 @@
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
+ "csp.middleware.CSPMiddleware",
"benefits.core.middleware.DebugSession",
"benefits.core.middleware.ChangedLanguageEvent",
]
@@ -188,3 +189,35 @@
# Analytics configuration
ANALYTICS_KEY = os.environ.get("ANALYTICS_KEY")
+
+# Content Security Policy
+# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html
+
+# In particular, note that the inner single-quotes are required!
+# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings
+
+CSP_DEFAULT_SRC = ["'self'"]
+
+CSP_CONNECT_SRC = ["'self'", "https://api.amplitude.com/"]
+
+CSP_FONT_SRC = ["https://california.azureedge.net/cdt/statetemplate/", "https://fonts.gstatic.com/"]
+
+CSP_FRAME_ANCESTORS = ["'none'"]
+CSP_FRAME_SRC = ["'none'"]
+
+CSP_SCRIPT_SRC_ELEM = [
+ "'unsafe-inline'",
+ "https://california.azureedge.net/cdt/statetemplate/",
+ "https://cdn.amplitude.com/libs/",
+ "https://code.jquery.com/",
+ "*.littlepay.com",
+]
+
+CSP_STYLE_SRC = ["'unsafe-inline'"]
+
+CSP_STYLE_SRC_ELEM = [
+ "'self'",
+ "'unsafe-inline'",
+ "https://california.azureedge.net/cdt/statetemplate/",
+ "https://fonts.googleapis.com/css",
+]
| {"golden_diff": "diff --git a/benefits/settings.py b/benefits/settings.py\n--- a/benefits/settings.py\n+++ b/benefits/settings.py\n@@ -50,6 +50,7 @@\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n+ \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n ]\n@@ -188,3 +189,35 @@\n # Analytics configuration\n \n ANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n+\n+# Content Security Policy\n+# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n+\n+# In particular, note that the inner single-quotes are required!\n+# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n+\n+CSP_DEFAULT_SRC = [\"'self'\"]\n+\n+CSP_CONNECT_SRC = [\"'self'\", \"https://api.amplitude.com/\"]\n+\n+CSP_FONT_SRC = [\"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\n+\n+CSP_FRAME_ANCESTORS = [\"'none'\"]\n+CSP_FRAME_SRC = [\"'none'\"]\n+\n+CSP_SCRIPT_SRC_ELEM = [\n+ \"'unsafe-inline'\",\n+ \"https://california.azureedge.net/cdt/statetemplate/\",\n+ \"https://cdn.amplitude.com/libs/\",\n+ \"https://code.jquery.com/\",\n+ \"*.littlepay.com\",\n+]\n+\n+CSP_STYLE_SRC = [\"'unsafe-inline'\"]\n+\n+CSP_STYLE_SRC_ELEM = [\n+ \"'self'\",\n+ \"'unsafe-inline'\",\n+ \"https://california.azureedge.net/cdt/statetemplate/\",\n+ \"https://fonts.googleapis.com/css\",\n+]\n", "issue": "Send Content-Security-Policy header\nThe Content-Security-Policy (CSP) header (with the `frame-ancestors` directive) replaces the now deprecated `X-Frame-Options` header, to instruct the browser about appropriate actions to perform if a site is included inside an `<iframe>`.\r\n\r\nSee more at https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP\r\n\r\nWe already have Django's built-in [Clickjacking/`X-Frame-Options` features enabled](https://github.com/cal-itp/benefits/blob/dev/benefits/settings.py#L52). Since this app should never be run from an `<iframe>`, let's create another Middleware that sets the CSP header like so:\r\n\r\n```\r\nContent-Security-Policy: default-src 'self'; frame-ancestors 'none';\r\n```\n", "before_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_AGE = None\nCSRF_COOKIE_SAMESITE = \"Strict\"\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n", "path": "benefits/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for benefits project.\n\"\"\"\nimport os\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.environ[\"DJANGO_SECRET_KEY\"]\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = os.environ.get(\"DJANGO_DEBUG\", \"False\").lower() == \"true\"\n\nADMIN = os.environ.get(\"DJANGO_ADMIN\", \"False\").lower() == \"true\"\n\nALLOWED_HOSTS = []\n\nif DEBUG:\n ALLOWED_HOSTS.extend([\"*\"])\nelse:\n hosts = os.environ[\"DJANGO_ALLOWED_HOSTS\"].split(\",\")\n ALLOWED_HOSTS.extend(hosts)\n\n# Application definition\n\nINSTALLED_APPS = [\n \"django.contrib.sessions\",\n \"django.contrib.staticfiles\",\n \"benefits.core\",\n \"benefits.enrollment\",\n \"benefits.eligibility\",\n]\n\nif ADMIN:\n INSTALLED_APPS.extend(\n [\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.messages\",\n ]\n )\n\nMIDDLEWARE = [\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.locale.LocaleMiddleware\",\n \"benefits.core.middleware.Healthcheck\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"csp.middleware.CSPMiddleware\",\n \"benefits.core.middleware.DebugSession\",\n \"benefits.core.middleware.ChangedLanguageEvent\",\n]\n\nif ADMIN:\n MIDDLEWARE.extend(\n [\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n ]\n )\n\nCSRF_COOKIE_HTTPONLY = True\n\nSESSION_COOKIE_AGE = 3600\nSESSION_COOKIE_SAMESITE = \"Strict\"\nSESSION_ENGINE = \"django.contrib.sessions.backends.signed_cookies\"\n\nif not DEBUG:\n CSRF_COOKIE_SECURE = True\n CSRF_FAILURE_VIEW = \"benefits.core.views.csrf_failure\"\n SESSION_COOKIE_SECURE = True\n\nROOT_URLCONF = \"benefits.urls\"\n\ntemplate_ctx_processors = [\n \"django.template.context_processors.request\",\n \"benefits.core.context_processors.analytics\",\n]\n\nif DEBUG:\n template_ctx_processors.extend(\n [\n \"django.template.context_processors.debug\",\n \"benefits.core.context_processors.debug\",\n ]\n )\n\nif ADMIN:\n template_ctx_processors.extend(\n [\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ]\n )\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [os.path.join(BASE_DIR, \"benefits\", \"templates\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": template_ctx_processors,\n },\n },\n]\n\nWSGI_APPLICATION = \"benefits.wsgi.application\"\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": os.environ.get(\"DJANGO_DB\", \"django\") + \".db\",\n }\n}\n\n# Password validation\n\nAUTH_PASSWORD_VALIDATORS = []\n\nif ADMIN:\n AUTH_PASSWORD_VALIDATORS.extend(\n [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n ]\n )\n\n# Internationalization\n\nLANGUAGE_CODE = \"en\"\n\nLANGUAGE_COOKIE_HTTPONLY = True\nLANGUAGE_COOKIE_SAMESITE = \"Strict\"\nLANGUAGE_COOKIE_SECURE = True\n\nLANGUAGES = [(\"en\", \"English\"), (\"es\", \"Espa\u00f1ol\")]\n\nLOCALE_PATHS = [os.path.join(BASE_DIR, \"benefits\", \"locale\")]\n\nUSE_I18N = True\nUSE_L10N = True\n\nTIME_ZONE = \"UTC\"\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = [os.path.join(BASE_DIR, \"benefits\", \"static\")]\nSTATIC_ROOT = os.path.join(BASE_DIR, \"static\")\n\n# Logging configuration\n\nLOG_LEVEL = os.environ.get(\"DJANGO_LOG_LEVEL\", \"DEBUG\" if DEBUG else \"WARNING\")\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"default\": {\n \"format\": \"[{asctime}] {levelname} {name}:{lineno} {message}\",\n \"datefmt\": \"%d/%b/%Y %H:%M:%S\",\n \"style\": \"{\",\n },\n },\n \"handlers\": {\n \"default\": {\"class\": \"logging.StreamHandler\", \"formatter\": \"default\"},\n },\n \"root\": {\n \"handlers\": [\"default\"],\n \"level\": LOG_LEVEL,\n },\n \"loggers\": {\"django\": {\"handlers\": [\"default\"], \"propagate\": False}},\n}\n\n# Analytics configuration\n\nANALYTICS_KEY = os.environ.get(\"ANALYTICS_KEY\")\n\n# Content Security Policy\n# Configuration docs at https://django-csp.readthedocs.io/en/latest/configuration.html\n\n# In particular, note that the inner single-quotes are required!\n# https://django-csp.readthedocs.io/en/latest/configuration.html#policy-settings\n\nCSP_DEFAULT_SRC = [\"'self'\"]\n\nCSP_CONNECT_SRC = [\"'self'\", \"https://api.amplitude.com/\"]\n\nCSP_FONT_SRC = [\"https://california.azureedge.net/cdt/statetemplate/\", \"https://fonts.gstatic.com/\"]\n\nCSP_FRAME_ANCESTORS = [\"'none'\"]\nCSP_FRAME_SRC = [\"'none'\"]\n\nCSP_SCRIPT_SRC_ELEM = [\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/cdt/statetemplate/\",\n \"https://cdn.amplitude.com/libs/\",\n \"https://code.jquery.com/\",\n \"*.littlepay.com\",\n]\n\nCSP_STYLE_SRC = [\"'unsafe-inline'\"]\n\nCSP_STYLE_SRC_ELEM = [\n \"'self'\",\n \"'unsafe-inline'\",\n \"https://california.azureedge.net/cdt/statetemplate/\",\n \"https://fonts.googleapis.com/css\",\n]\n", "path": "benefits/settings.py"}]} | 2,046 | 397 |
gh_patches_debug_49143 | rasdani/github-patches | git_diff | vyperlang__vyper-2513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
test fail with web3.py 5.21.0
### Version Information
* vyper Version (output of `vyper --version`): latest master (cff69d63)
* OS: macos
* Python Version (output of `python --version`): 3.9.6
### What's your issue about?
tests fail
tests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J...
FAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO...
FAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as...
FAILED tests/parser/features/test_assert.py::test_assest_reason_revert
misses the string "execution reverted"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 import os
4 import subprocess
5
6 from setuptools import find_packages, setup
7
8 __version__ = "0.3.0"
9
10 extras_require = {
11 "test": [
12 "pytest>=5.4,<6.0",
13 "pytest-cov>=2.10,<3.0",
14 "pytest-instafail>=0.4,<1.0",
15 "pytest-xdist>=1.32,<2.0",
16 "eth-tester[py-evm]>=0.5.0b1,<0.6",
17 "py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+
18 "web3==5.12.3",
19 "tox>=3.15,<4.0",
20 "lark-parser==0.10.0",
21 "hypothesis[lark]>=5.37.1,<6.0",
22 ],
23 "lint": [
24 "black==21.9b0",
25 "flake8==3.9.2",
26 "flake8-bugbear==20.1.4",
27 "flake8-use-fstring==1.1",
28 "isort==5.9.3",
29 "mypy==0.910",
30 ],
31 "docs": ["recommonmark", "sphinx>=3.0,<4.0", "sphinx_rtd_theme>=0.5,<0.6"],
32 "dev": ["ipython", "pre-commit", "pyinstaller", "twine"],
33 }
34
35 extras_require["dev"] = (
36 extras_require["test"] + extras_require["lint"] + extras_require["docs"] + extras_require["dev"]
37 )
38
39 hash_file_rel_path = os.path.join("vyper", "vyper_git_version.txt")
40 hashfile = os.path.relpath(hash_file_rel_path)
41
42 try:
43 commithash = subprocess.check_output("git rev-parse HEAD".split())
44 commithash_str = commithash.decode("utf-8").strip()
45 with open(hashfile, "w") as fh:
46 fh.write(f"{__version__}\n{commithash_str}")
47 except subprocess.CalledProcessError:
48 pass
49
50 with open("README.md", "r") as f:
51 long_description = f.read()
52
53 setup(
54 name="vyper",
55 version=__version__,
56 description="Vyper: the Pythonic Programming Language for the EVM",
57 long_description=long_description,
58 long_description_content_type="text/markdown",
59 author="Vyper Team",
60 author_email="",
61 url="https://github.com/vyperlang/vyper",
62 license="Apache License 2.0",
63 keywords="ethereum evm smart contract language",
64 include_package_data=True,
65 packages=find_packages(exclude=("tests", "docs")),
66 python_requires=">=3.7,<3.10",
67 py_modules=["vyper"],
68 install_requires=[
69 "asttokens==2.0.4",
70 "pycryptodome>=3.5.1,<4",
71 "semantic-version==2.8.5",
72 "cached-property==1.5.2 ; python_version<'3.8'",
73 ],
74 setup_requires=["pytest-runner"],
75 tests_require=extras_require["test"],
76 extras_require=extras_require,
77 entry_points={
78 "console_scripts": [
79 "vyper=vyper.cli.vyper_compile:_parse_cli_args",
80 "vyper-serve=vyper.cli.vyper_serve:_parse_cli_args",
81 "vyper-lll=vyper.cli.vyper_lll:_parse_cli_args",
82 "vyper-json=vyper.cli.vyper_json:_parse_cli_args",
83 ]
84 },
85 classifiers=[
86 "Intended Audience :: Developers",
87 "License :: OSI Approved :: Apache Software License",
88 "Programming Language :: Python :: 3.7",
89 "Programming Language :: Python :: 3.8",
90 "Programming Language :: Python :: 3.9",
91 ],
92 data_files=[("", [hash_file_rel_path])],
93 )
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -15,7 +15,7 @@
"pytest-xdist>=1.32,<2.0",
"eth-tester[py-evm]>=0.5.0b1,<0.6",
"py-evm==0.4.0a4", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+
- "web3==5.12.3",
+ "web3==5.21.0",
"tox>=3.15,<4.0",
"lark-parser==0.10.0",
"hypothesis[lark]>=5.37.1,<6.0",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -15,7 +15,7 @@\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n- \"web3==5.12.3\",\n+ \"web3==5.21.0\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n", "issue": "test fail with web3.py 5.21.0\n### Version Information\r\n\r\n* vyper Version (output of `vyper --version`): latest master (cff69d63)\r\n* OS: macos\r\n* Python Version (output of `python --version`): 3.9.6\r\n\r\n### What's your issue about?\r\n\r\ntests fail\r\n\r\ntests/cli/vyper_json/test_parse_args_vyperjson.py::test_pretty_json - json.decoder.J...\r\nFAILED tests/cli/vyper_json/test_parse_args_vyperjson.py::test_to_stdout - json.decoder.JSO...\r\nFAILED tests/parser/features/test_assert.py::test_assert_reason[False] - AssertionError: as...\r\nFAILED tests/parser/features/test_assert.py::test_assest_reason_revert\r\n\r\nmisses the string \"execution reverted\"\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\n__version__ = \"0.3.0\"\n\nextras_require = {\n \"test\": [\n \"pytest>=5.4,<6.0\",\n \"pytest-cov>=2.10,<3.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n \"web3==5.12.3\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n ],\n \"lint\": [\n \"black==21.9b0\",\n \"flake8==3.9.2\",\n \"flake8-bugbear==20.1.4\",\n \"flake8-use-fstring==1.1\",\n \"isort==5.9.3\",\n \"mypy==0.910\",\n ],\n \"docs\": [\"recommonmark\", \"sphinx>=3.0,<4.0\", \"sphinx_rtd_theme>=0.5,<0.6\"],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"test\"] + extras_require[\"lint\"] + extras_require[\"docs\"] + extras_require[\"dev\"]\n)\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_version.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\ntry:\n commithash = subprocess.check_output(\"git rev-parse HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(f\"{__version__}\\n{commithash_str}\")\nexcept subprocess.CalledProcessError:\n pass\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"vyper\",\n version=__version__,\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n python_requires=\">=3.7,<3.10\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"asttokens==2.0.4\",\n \"pycryptodome>=3.5.1,<4\",\n \"semantic-version==2.8.5\",\n \"cached-property==1.5.2 ; python_version<'3.8'\",\n ],\n setup_requires=[\"pytest-runner\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"vyper-lll=vyper.cli.vyper_lll:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nimport subprocess\n\nfrom setuptools import find_packages, setup\n\n__version__ = \"0.3.0\"\n\nextras_require = {\n \"test\": [\n \"pytest>=5.4,<6.0\",\n \"pytest-cov>=2.10,<3.0\",\n \"pytest-instafail>=0.4,<1.0\",\n \"pytest-xdist>=1.32,<2.0\",\n \"eth-tester[py-evm]>=0.5.0b1,<0.6\",\n \"py-evm==0.4.0a4\", # NOTE: temporarily pinned until we have support for py-evm 0.5.0a0+\n \"web3==5.21.0\",\n \"tox>=3.15,<4.0\",\n \"lark-parser==0.10.0\",\n \"hypothesis[lark]>=5.37.1,<6.0\",\n ],\n \"lint\": [\n \"black==21.9b0\",\n \"flake8==3.9.2\",\n \"flake8-bugbear==20.1.4\",\n \"flake8-use-fstring==1.1\",\n \"isort==5.9.3\",\n \"mypy==0.910\",\n ],\n \"docs\": [\"recommonmark\", \"sphinx>=3.0,<4.0\", \"sphinx_rtd_theme>=0.5,<0.6\"],\n \"dev\": [\"ipython\", \"pre-commit\", \"pyinstaller\", \"twine\"],\n}\n\nextras_require[\"dev\"] = (\n extras_require[\"test\"] + extras_require[\"lint\"] + extras_require[\"docs\"] + extras_require[\"dev\"]\n)\n\nhash_file_rel_path = os.path.join(\"vyper\", \"vyper_git_version.txt\")\nhashfile = os.path.relpath(hash_file_rel_path)\n\ntry:\n commithash = subprocess.check_output(\"git rev-parse HEAD\".split())\n commithash_str = commithash.decode(\"utf-8\").strip()\n with open(hashfile, \"w\") as fh:\n fh.write(f\"{__version__}\\n{commithash_str}\")\nexcept subprocess.CalledProcessError:\n pass\n\nwith open(\"README.md\", \"r\") as f:\n long_description = f.read()\n\nsetup(\n name=\"vyper\",\n version=__version__,\n description=\"Vyper: the Pythonic Programming Language for the EVM\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n author=\"Vyper Team\",\n author_email=\"\",\n url=\"https://github.com/vyperlang/vyper\",\n license=\"Apache License 2.0\",\n keywords=\"ethereum evm smart contract language\",\n include_package_data=True,\n packages=find_packages(exclude=(\"tests\", \"docs\")),\n python_requires=\">=3.7,<3.10\",\n py_modules=[\"vyper\"],\n install_requires=[\n \"asttokens==2.0.4\",\n \"pycryptodome>=3.5.1,<4\",\n \"semantic-version==2.8.5\",\n \"cached-property==1.5.2 ; python_version<'3.8'\",\n ],\n setup_requires=[\"pytest-runner\"],\n tests_require=extras_require[\"test\"],\n extras_require=extras_require,\n entry_points={\n \"console_scripts\": [\n \"vyper=vyper.cli.vyper_compile:_parse_cli_args\",\n \"vyper-serve=vyper.cli.vyper_serve:_parse_cli_args\",\n \"vyper-lll=vyper.cli.vyper_lll:_parse_cli_args\",\n \"vyper-json=vyper.cli.vyper_json:_parse_cli_args\",\n ]\n },\n classifiers=[\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n ],\n data_files=[(\"\", [hash_file_rel_path])],\n)\n", "path": "setup.py"}]} | 1,493 | 178 |
gh_patches_debug_10810 | rasdani/github-patches | git_diff | sunpy__sunpy-4430 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Wrong plot in goes hek m25 example
<!--
We know asking good questions takes effort, and we appreciate your time.
Thank you.
Please be aware that everyone has to follow our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst
Also that these comments are hidden when you submit this github issue.
Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue!
-->
### Description
<!-- Provide a general description of the bug. -->
The plot in timeseries example gallery of sunpy for goes and hek flare data looks very different when same code is run on master.
### Expected behavior
<!-- What did you expect to happen. -->
It should look like this
https://docs.sunpy.org/en/stable/_images/sphx_glr_goes_hek_m25_001.png
### Actual behavior
<!--
What actually happened.
Was the output confusing or poorly described?
-->

### Steps to Reproduce
<!--
Please include **code** that reproduces the issue whenever possible.
The best reproductions are self-contained scripts with minimal dependencies.
-->
https://docs.sunpy.org/en/stable/generated/gallery/time_series/goes_hek_m25.html#sphx-glr-generated-gallery-time-series-goes-hek-m25-py run this example.
### System Details
<!--
We at least need to know the sunpy version you are using.
We provide a short function (``sunpy.util.system_info()``) that will provide most of the below information.
This step is optional but strongly recommended.
-->
- SunPy Version: master, 2.0.1, 2.0.rc2 all are giving error
- Astropy Version: 4.0.1.post1
- Python Version: 3.8.5, 3.7.5 both were giving error
- OS information: Ubuntu 18.04 LTS
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/time_series/goes_hek_m25.py`
Content:
```
1 """
2 ==============================
3 Flare times on a GOES XRS plot
4 ==============================
5
6 How to plot flare times as provided by the HEK on a GOES XRS plot.
7 """
8 import matplotlib.pyplot as plt
9
10 from sunpy.net import Fido
11 from sunpy.net import attrs as a
12 from sunpy.net import hek
13 from sunpy.time import TimeRange, parse_time
14 from sunpy.timeseries import TimeSeries
15
16 ###############################################################################
17 # Let's first grab GOES XRS data for a particular time of interest
18 tr = TimeRange(['2011-06-07 04:00', '2011-06-07 12:00'])
19 results = Fido.search(a.Time(tr), a.Instrument.xrs)
20
21 ###############################################################################
22 # Then download the data and load it into a TimeSeries
23 files = Fido.fetch(results)
24 goes = TimeSeries(files)
25
26 ###############################################################################
27 # Next lets grab the HEK flare data for this time from the NOAA Space Weather
28 # Prediction Center (SWPC)
29 client = hek.HEKClient()
30 flares_hek = client.search(hek.attrs.Time(tr.start, tr.end),
31 hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')
32
33 ###############################################################################
34 # Lets plot everything together
35 fig, ax = plt.subplots()
36 goes.plot()
37 ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)
38 ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,
39 parse_time(flares_hek[0].get('event_endtime')).plot_date,
40 alpha=0.2, label=flares_hek[0].get('fl_goescls'))
41 ax.legend(loc=2)
42 ax.set_yscale('log')
43 plt.show()
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/time_series/goes_hek_m25.py b/examples/time_series/goes_hek_m25.py
--- a/examples/time_series/goes_hek_m25.py
+++ b/examples/time_series/goes_hek_m25.py
@@ -34,9 +34,9 @@
# Lets plot everything together
fig, ax = plt.subplots()
goes.plot()
-ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)
-ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,
- parse_time(flares_hek[0].get('event_endtime')).plot_date,
+ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).datetime)
+ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).datetime,
+ parse_time(flares_hek[0].get('event_endtime')).datetime,
alpha=0.2, label=flares_hek[0].get('fl_goescls'))
ax.legend(loc=2)
ax.set_yscale('log')
| {"golden_diff": "diff --git a/examples/time_series/goes_hek_m25.py b/examples/time_series/goes_hek_m25.py\n--- a/examples/time_series/goes_hek_m25.py\n+++ b/examples/time_series/goes_hek_m25.py\n@@ -34,9 +34,9 @@\n # Lets plot everything together\n fig, ax = plt.subplots()\n goes.plot()\n-ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)\n-ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,\n- parse_time(flares_hek[0].get('event_endtime')).plot_date,\n+ax.axvline(parse_time(flares_hek[0].get('event_peaktime')).datetime)\n+ax.axvspan(parse_time(flares_hek[0].get('event_starttime')).datetime,\n+ parse_time(flares_hek[0].get('event_endtime')).datetime,\n alpha=0.2, label=flares_hek[0].get('fl_goescls'))\n ax.legend(loc=2)\n ax.set_yscale('log')\n", "issue": "Wrong plot in goes hek m25 example\n<!--\r\nWe know asking good questions takes effort, and we appreciate your time.\r\nThank you.\r\n\r\nPlease be aware that everyone has to follow our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst\r\n\r\nAlso that these comments are hidden when you submit this github issue.\r\n\r\nPlease have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue!\r\n-->\r\n\r\n### Description\r\n<!-- Provide a general description of the bug. -->\r\nThe plot in timeseries example gallery of sunpy for goes and hek flare data looks very different when same code is run on master.\r\n\r\n### Expected behavior\r\n<!-- What did you expect to happen. -->\r\nIt should look like this\r\nhttps://docs.sunpy.org/en/stable/_images/sphx_glr_goes_hek_m25_001.png\r\n\r\n### Actual behavior\r\n<!--\r\nWhat actually happened.\r\nWas the output confusing or poorly described?\r\n-->\r\n\r\n### Steps to Reproduce\r\n<!--\r\nPlease include **code** that reproduces the issue whenever possible.\r\nThe best reproductions are self-contained scripts with minimal dependencies.\r\n-->\r\n\r\nhttps://docs.sunpy.org/en/stable/generated/gallery/time_series/goes_hek_m25.html#sphx-glr-generated-gallery-time-series-goes-hek-m25-py run this example.\r\n\r\n### System Details\r\n<!--\r\nWe at least need to know the sunpy version you are using.\r\nWe provide a short function (``sunpy.util.system_info()``) that will provide most of the below information.\r\nThis step is optional but strongly recommended.\r\n-->\r\n- SunPy Version: master, 2.0.1, 2.0.rc2 all are giving error\r\n- Astropy Version: 4.0.1.post1\r\n- Python Version: 3.8.5, 3.7.5 both were giving error\r\n- OS information: Ubuntu 18.04 LTS\r\n\n", "before_files": [{"content": "\"\"\"\n==============================\nFlare times on a GOES XRS plot\n==============================\n\nHow to plot flare times as provided by the HEK on a GOES XRS plot.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\nfrom sunpy.net import hek\nfrom sunpy.time import TimeRange, parse_time\nfrom sunpy.timeseries import TimeSeries\n\n###############################################################################\n# Let's first grab GOES XRS data for a particular time of interest\ntr = TimeRange(['2011-06-07 04:00', '2011-06-07 12:00'])\nresults = Fido.search(a.Time(tr), a.Instrument.xrs)\n\n###############################################################################\n# Then download the data and load it into a TimeSeries\nfiles = Fido.fetch(results)\ngoes = TimeSeries(files)\n\n###############################################################################\n# Next lets grab the HEK flare data for this time from the NOAA Space Weather\n# Prediction Center (SWPC)\nclient = hek.HEKClient()\nflares_hek = client.search(hek.attrs.Time(tr.start, tr.end),\n hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')\n\n###############################################################################\n# Lets plot everything together\nfig, ax = plt.subplots()\ngoes.plot()\nax.axvline(parse_time(flares_hek[0].get('event_peaktime')).plot_date)\nax.axvspan(parse_time(flares_hek[0].get('event_starttime')).plot_date,\n parse_time(flares_hek[0].get('event_endtime')).plot_date,\n alpha=0.2, label=flares_hek[0].get('fl_goescls'))\nax.legend(loc=2)\nax.set_yscale('log')\nplt.show()\n", "path": "examples/time_series/goes_hek_m25.py"}], "after_files": [{"content": "\"\"\"\n==============================\nFlare times on a GOES XRS plot\n==============================\n\nHow to plot flare times as provided by the HEK on a GOES XRS plot.\n\"\"\"\nimport matplotlib.pyplot as plt\n\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\nfrom sunpy.net import hek\nfrom sunpy.time import TimeRange, parse_time\nfrom sunpy.timeseries import TimeSeries\n\n###############################################################################\n# Let's first grab GOES XRS data for a particular time of interest\ntr = TimeRange(['2011-06-07 04:00', '2011-06-07 12:00'])\nresults = Fido.search(a.Time(tr), a.Instrument.xrs)\n\n###############################################################################\n# Then download the data and load it into a TimeSeries\nfiles = Fido.fetch(results)\ngoes = TimeSeries(files)\n\n###############################################################################\n# Next lets grab the HEK flare data for this time from the NOAA Space Weather\n# Prediction Center (SWPC)\nclient = hek.HEKClient()\nflares_hek = client.search(hek.attrs.Time(tr.start, tr.end),\n hek.attrs.FL, hek.attrs.FRM.Name == 'SWPC')\n\n###############################################################################\n# Lets plot everything together\nfig, ax = plt.subplots()\ngoes.plot()\nax.axvline(parse_time(flares_hek[0].get('event_peaktime')).datetime)\nax.axvspan(parse_time(flares_hek[0].get('event_starttime')).datetime,\n parse_time(flares_hek[0].get('event_endtime')).datetime,\n alpha=0.2, label=flares_hek[0].get('fl_goescls'))\nax.legend(loc=2)\nax.set_yscale('log')\nplt.show()\n", "path": "examples/time_series/goes_hek_m25.py"}]} | 1,240 | 255 |
gh_patches_debug_13115 | rasdani/github-patches | git_diff | TheAlgorithms__Python-10394 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Improve our test coverage
### Feature description
Many of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.
### How to find low-coverage files
Go to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under "Run Tests" and scroll down until you find the section on code coverage:
```
---------- coverage: platform linux, python 3.12.0-final-0 -----------
Name Stmts Miss Cover Missing
-----------------------------------------------------------------------------------------------------------
quantum/q_fourier_transform.py 30 30 0% 14-93
scripts/validate_solutions.py 54 54 0% 2-94
strings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129
...
```
The "Cover" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.
Some files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.
_**When you open your PR, put "Contributes to #9943" in the PR description.**_ Do not use the word "fixes", "resolves", or "closes". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.
### How to add doctests
A doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:
```py
def add(a: int, b: int) -> int:
"""
Adds two non-negative numbers.
>>> add(1, 1)
2
>>> add(2, 5)
7
>>> add(1, 0)
1
>>> add(-1, -1)
Traceback (most recent last):
...
ValueError: Numbers must be non-negative
"""
```
For every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).
Do not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.
_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bit_manipulation/missing_number.py`
Content:
```
1 def find_missing_number(nums: list[int]) -> int:
2 """
3 Finds the missing number in a list of consecutive integers.
4
5 Args:
6 nums: A list of integers.
7
8 Returns:
9 The missing number.
10
11 Example:
12 >>> find_missing_number([0, 1, 3, 4])
13 2
14 >>> find_missing_number([1, 3, 4, 5, 6])
15 2
16 >>> find_missing_number([6, 5, 4, 2, 1])
17 3
18 >>> find_missing_number([6, 1, 5, 3, 4])
19 2
20 """
21 low = min(nums)
22 high = max(nums)
23 missing_number = high
24
25 for i in range(low, high):
26 missing_number ^= i ^ nums[i - low]
27
28 return missing_number
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py
--- a/bit_manipulation/missing_number.py
+++ b/bit_manipulation/missing_number.py
@@ -11,6 +11,12 @@
Example:
>>> find_missing_number([0, 1, 3, 4])
2
+ >>> find_missing_number([4, 3, 1, 0])
+ 2
+ >>> find_missing_number([-4, -3, -1, 0])
+ -2
+ >>> find_missing_number([-2, 2, 1, 3, 0])
+ -1
>>> find_missing_number([1, 3, 4, 5, 6])
2
>>> find_missing_number([6, 5, 4, 2, 1])
@@ -26,3 +32,9 @@
missing_number ^= i ^ nums[i - low]
return missing_number
+
+
+if __name__ == "__main__":
+ import doctest
+
+ doctest.testmod()
| {"golden_diff": "diff --git a/bit_manipulation/missing_number.py b/bit_manipulation/missing_number.py\n--- a/bit_manipulation/missing_number.py\n+++ b/bit_manipulation/missing_number.py\n@@ -11,6 +11,12 @@\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n+ >>> find_missing_number([4, 3, 1, 0])\n+ 2\n+ >>> find_missing_number([-4, -3, -1, 0])\n+ -2\n+ >>> find_missing_number([-2, 2, 1, 3, 0])\n+ -1\n >>> find_missing_number([1, 3, 4, 5, 6])\n 2\n >>> find_missing_number([6, 5, 4, 2, 1])\n@@ -26,3 +32,9 @@\n missing_number ^= i ^ nums[i - low]\n \n return missing_number\n+\n+\n+if __name__ == \"__main__\":\n+ import doctest\n+\n+ doctest.testmod()\n", "issue": "Improve our test coverage\n### Feature description\r\n\r\nMany of our existing algorithm files have little to no unit testing. This is problematic because this can easily let bugs slip through. We want some assurance that the code we currently have is correct and functional. We welcome all contributors to open PRs to help us add tests to our codebase.\r\n\r\n### How to find low-coverage files\r\n\r\nGo to the Actions tab in this repository and find the most recent **build** workflow run. Open the logs under \"Run Tests\" and scroll down until you find the section on code coverage:\r\n```\r\n---------- coverage: platform linux, python 3.12.0-final-0 -----------\r\nName Stmts Miss Cover Missing\r\n-----------------------------------------------------------------------------------------------------------\r\nquantum/q_fourier_transform.py 30 30 0% 14-93\r\nscripts/validate_solutions.py 54 54 0% 2-94\r\nstrings/min_cost_string_conversion.py 78 75 4% 20-57, 61-75, 79-129\r\n...\r\n```\r\nThe \"Cover\" column tells you what percentage of the lines in that file are covered by tests. We want to increase this percentage for existing files. Find a file with low coverage percentage that you wish to write tests for, add doctests for each function, and open a PR with your changes. You do not need to have a perfect coverage percentage, but all functions should have doctests.\r\n\r\nSome files will naturally be hard to write tests for. For example, the file may be poorly written because they lack any functions. Other files might be how-tos, meaning they simply demonstrate how to use an existing library's functions rather than implementing the algorithm themselves. Ignore these kinds of files, as they will need to be rewritten eventually. Furthermore, ignore files in the `web_programming` and `project_euler` directories. Web programming files are inherently hard to test and Project Euler files have their own validation workflow, so don't worry about their test coverage.\r\n\r\n_**When you open your PR, put \"Contributes to #9943\" in the PR description.**_ Do not use the word \"fixes\", \"resolves\", or \"closes\". This issue is an ongoing one, and your PR will not single-handedly resolve this issue.\r\n\r\n### How to add doctests\r\n\r\nA doctest is a unit test that is contained within the documentation comment (docstring) for a function. Here is an example of what doctests look like within a docstring:\r\n```py\r\ndef add(a: int, b: int) -> int:\r\n \"\"\"\r\n Adds two non-negative numbers.\r\n >>> add(1, 1)\r\n 2\r\n >>> add(2, 5)\r\n 7\r\n >>> add(1, 0)\r\n 1\r\n >>> add(-1, -1)\r\n Traceback (most recent last):\r\n ...\r\n ValueError: Numbers must be non-negative\r\n \"\"\"\r\n```\r\nFor every function in the file you choose, you should write doctests like the ones shown above in its docstring. If a function doesn't have a docstring, add one. Your doctests should be comprehensive but not excessive: you should write just enough tests to cover all basic cases as well as all edge cases (e.g., negative numbers, empty lists, etc).\r\n\r\nDo not simply run a function on some example inputs and put its output as the expected output for a doctest. This assumes that the function is implemented correctly when it might not be. Verify independently that your doctests and their expected outputs are correct. **Your PR will not be merged if it has failing tests.** If you happen to discover a bug while writing doctests, please fix it.\r\n\r\n_**Please read our [contributing guidelines](https://github.com/TheAlgorithms/Python/blob/master/CONTRIBUTING.md) before you contribute.**_\n", "before_files": [{"content": "def find_missing_number(nums: list[int]) -> int:\n \"\"\"\n Finds the missing number in a list of consecutive integers.\n\n Args:\n nums: A list of integers.\n\n Returns:\n The missing number.\n\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n >>> find_missing_number([1, 3, 4, 5, 6])\n 2\n >>> find_missing_number([6, 5, 4, 2, 1])\n 3\n >>> find_missing_number([6, 1, 5, 3, 4])\n 2\n \"\"\"\n low = min(nums)\n high = max(nums)\n missing_number = high\n\n for i in range(low, high):\n missing_number ^= i ^ nums[i - low]\n\n return missing_number\n", "path": "bit_manipulation/missing_number.py"}], "after_files": [{"content": "def find_missing_number(nums: list[int]) -> int:\n \"\"\"\n Finds the missing number in a list of consecutive integers.\n\n Args:\n nums: A list of integers.\n\n Returns:\n The missing number.\n\n Example:\n >>> find_missing_number([0, 1, 3, 4])\n 2\n >>> find_missing_number([4, 3, 1, 0])\n 2\n >>> find_missing_number([-4, -3, -1, 0])\n -2\n >>> find_missing_number([-2, 2, 1, 3, 0])\n -1\n >>> find_missing_number([1, 3, 4, 5, 6])\n 2\n >>> find_missing_number([6, 5, 4, 2, 1])\n 3\n >>> find_missing_number([6, 1, 5, 3, 4])\n 2\n \"\"\"\n low = min(nums)\n high = max(nums)\n missing_number = high\n\n for i in range(low, high):\n missing_number ^= i ^ nums[i - low]\n\n return missing_number\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "bit_manipulation/missing_number.py"}]} | 1,343 | 251 |
gh_patches_debug_26597 | rasdani/github-patches | git_diff | python-discord__site-577 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
SystemExit: 1
Sentry Issue: [SITE-1G](https://sentry.io/organizations/python-discord/issues/2623186847/?referrer=github_integration)
```
SystemExit: 1
(36 additional frame(s) were not displayed)
...
File "urllib3/connectionpool.py", line 1010, in _validate_conn
conn.connect()
File "urllib3/connection.py", line 353, in connect
conn = self._new_conn()
File "urllib3/connection.py", line 169, in _new_conn
conn = connection.create_connection(
File "urllib3/util/connection.py", line 86, in create_connection
sock.connect(sa)
File "gunicorn/workers/base.py", line 201, in handle_abort
sys.exit(1)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydis_site/constants.py`
Content:
```
1 import os
2
3 GIT_SHA = os.environ.get("GIT_SHA", "development")
4 GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
5
```
Path: `pydis_site/apps/home/views/home.py`
Content:
```
1 import logging
2 from typing import Dict, List
3
4 import requests
5 from django.core.handlers.wsgi import WSGIRequest
6 from django.http import HttpResponse
7 from django.shortcuts import render
8 from django.utils import timezone
9 from django.views import View
10
11 from pydis_site.apps.home.models import RepositoryMetadata
12 from pydis_site.constants import GITHUB_TOKEN
13
14 log = logging.getLogger(__name__)
15
16
17 class HomeView(View):
18 """The main landing page for the website."""
19
20 github_api = "https://api.github.com/users/python-discord/repos?per_page=100"
21 repository_cache_ttl = 3600
22
23 # Which of our GitHub repos should be displayed on the front page, and in which order?
24 repos = [
25 "python-discord/site",
26 "python-discord/bot",
27 "python-discord/snekbox",
28 "python-discord/sir-lancebot",
29 "python-discord/metricity",
30 "python-discord/django-simple-bulma",
31 ]
32
33 def __init__(self):
34 """Clean up stale RepositoryMetadata."""
35 RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()
36
37 # If no token is defined (for example in local development), then
38 # it does not make sense to pass the Authorization header. More
39 # specifically, GitHub will reject any requests from us due to the
40 # invalid header. We can make a limited number of anonymous requests
41 # though, which is useful for testing.
42 if GITHUB_TOKEN:
43 self.headers = {"Authorization": f"token {GITHUB_TOKEN}"}
44 else:
45 self.headers = {}
46
47 def _get_api_data(self) -> Dict[str, Dict[str, str]]:
48 """
49 Call the GitHub API and get information about our repos.
50
51 If we're unable to get that info for any reason, return an empty dict.
52 """
53 repo_dict = {}
54
55 # Fetch the data from the GitHub API
56 api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()
57
58 # Process the API data into our dict
59 for repo in api_data:
60 try:
61 full_name = repo["full_name"]
62
63 if full_name in self.repos:
64 repo_dict[full_name] = {
65 "full_name": repo["full_name"],
66 "description": repo["description"],
67 "language": repo["language"],
68 "forks_count": repo["forks_count"],
69 "stargazers_count": repo["stargazers_count"],
70 }
71 # Something is not right about the API data we got back from GitHub.
72 except (TypeError, ConnectionError, KeyError) as e:
73 log.error(
74 "Unable to parse the GitHub repository metadata from response!",
75 extra={
76 'api_data': api_data,
77 'error': e
78 }
79 )
80 continue
81
82 return repo_dict
83
84 def _get_repo_data(self) -> List[RepositoryMetadata]:
85 """Build a list of RepositoryMetadata objects that we can use to populate the front page."""
86 # First off, load the timestamp of the least recently updated entry.
87 last_update = (
88 RepositoryMetadata.objects.values_list("last_updated", flat=True)
89 .order_by("last_updated").first()
90 )
91
92 # If we did not retrieve any results here, we should import them!
93 if last_update is None:
94
95 # Try to get new data from the API. If it fails, we'll return an empty list.
96 # In this case, we simply don't display our projects on the site.
97 api_repositories = self._get_api_data()
98
99 # Create all the repodata records in the database.
100 return RepositoryMetadata.objects.bulk_create(
101 RepositoryMetadata(
102 repo_name=api_data["full_name"],
103 description=api_data["description"],
104 forks=api_data["forks_count"],
105 stargazers=api_data["stargazers_count"],
106 language=api_data["language"],
107 )
108 for api_data in api_repositories.values()
109 )
110
111 # If the data is stale, we should refresh it.
112 if (timezone.now() - last_update).seconds > self.repository_cache_ttl:
113 # Try to get new data from the API. If it fails, return the cached data.
114 api_repositories = self._get_api_data()
115
116 if not api_repositories:
117 return RepositoryMetadata.objects.all()
118
119 # Update or create all RepoData objects in self.repos
120 database_repositories = []
121 for api_data in api_repositories.values():
122 repo_data, _created = RepositoryMetadata.objects.update_or_create(
123 repo_name=api_data["full_name"],
124 defaults={
125 'repo_name': api_data["full_name"],
126 'description': api_data["description"],
127 'forks': api_data["forks_count"],
128 'stargazers': api_data["stargazers_count"],
129 'language': api_data["language"],
130 }
131 )
132 database_repositories.append(repo_data)
133 return database_repositories
134
135 # Otherwise, if the data is fresher than 2 minutes old, we should just return it.
136 else:
137 return RepositoryMetadata.objects.all()
138
139 def get(self, request: WSGIRequest) -> HttpResponse:
140 """Collect repo data and render the homepage view."""
141 repo_data = self._get_repo_data()
142 return render(request, "home/index.html", {"repo_data": repo_data})
143
144
145 def timeline(request: WSGIRequest) -> HttpResponse:
146 """Render timeline view."""
147 return render(request, 'home/timeline.html')
148
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py
--- a/pydis_site/apps/home/views/home.py
+++ b/pydis_site/apps/home/views/home.py
@@ -9,7 +9,7 @@
from django.views import View
from pydis_site.apps.home.models import RepositoryMetadata
-from pydis_site.constants import GITHUB_TOKEN
+from pydis_site.constants import GITHUB_TOKEN, TIMEOUT_PERIOD
log = logging.getLogger(__name__)
@@ -51,9 +51,16 @@
If we're unable to get that info for any reason, return an empty dict.
"""
repo_dict = {}
-
- # Fetch the data from the GitHub API
- api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()
+ try:
+ # Fetch the data from the GitHub API
+ api_data: List[dict] = requests.get(
+ self.github_api,
+ headers=self.headers,
+ timeout=TIMEOUT_PERIOD
+ ).json()
+ except requests.exceptions.Timeout:
+ log.error("Request to fetch GitHub repository metadata for timed out!")
+ return repo_dict
# Process the API data into our dict
for repo in api_data:
diff --git a/pydis_site/constants.py b/pydis_site/constants.py
--- a/pydis_site/constants.py
+++ b/pydis_site/constants.py
@@ -2,3 +2,5 @@
GIT_SHA = os.environ.get("GIT_SHA", "development")
GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN")
+# How long to wait for synchronous requests before timing out
+TIMEOUT_PERIOD = int(os.environ.get("TIMEOUT_PERIOD", 5))
| {"golden_diff": "diff --git a/pydis_site/apps/home/views/home.py b/pydis_site/apps/home/views/home.py\n--- a/pydis_site/apps/home/views/home.py\n+++ b/pydis_site/apps/home/views/home.py\n@@ -9,7 +9,7 @@\n from django.views import View\n \n from pydis_site.apps.home.models import RepositoryMetadata\n-from pydis_site.constants import GITHUB_TOKEN\n+from pydis_site.constants import GITHUB_TOKEN, TIMEOUT_PERIOD\n \n log = logging.getLogger(__name__)\n \n@@ -51,9 +51,16 @@\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n-\n- # Fetch the data from the GitHub API\n- api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n+ try:\n+ # Fetch the data from the GitHub API\n+ api_data: List[dict] = requests.get(\n+ self.github_api,\n+ headers=self.headers,\n+ timeout=TIMEOUT_PERIOD\n+ ).json()\n+ except requests.exceptions.Timeout:\n+ log.error(\"Request to fetch GitHub repository metadata for timed out!\")\n+ return repo_dict\n \n # Process the API data into our dict\n for repo in api_data:\ndiff --git a/pydis_site/constants.py b/pydis_site/constants.py\n--- a/pydis_site/constants.py\n+++ b/pydis_site/constants.py\n@@ -2,3 +2,5 @@\n \n GIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\n GITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n+# How long to wait for synchronous requests before timing out\n+TIMEOUT_PERIOD = int(os.environ.get(\"TIMEOUT_PERIOD\", 5))\n", "issue": "SystemExit: 1\nSentry Issue: [SITE-1G](https://sentry.io/organizations/python-discord/issues/2623186847/?referrer=github_integration)\n\n```\nSystemExit: 1\n(36 additional frame(s) were not displayed)\n...\n File \"urllib3/connectionpool.py\", line 1010, in _validate_conn\n conn.connect()\n File \"urllib3/connection.py\", line 353, in connect\n conn = self._new_conn()\n File \"urllib3/connection.py\", line 169, in _new_conn\n conn = connection.create_connection(\n File \"urllib3/util/connection.py\", line 86, in create_connection\n sock.connect(sa)\n File \"gunicorn/workers/base.py\", line 201, in handle_abort\n sys.exit(1)\n```\n", "before_files": [{"content": "import os\n\nGIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\nGITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n", "path": "pydis_site/constants.py"}, {"content": "import logging\nfrom typing import Dict, List\n\nimport requests\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site.apps.home.models import RepositoryMetadata\nfrom pydis_site.constants import GITHUB_TOKEN\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/django-simple-bulma\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n # If no token is defined (for example in local development), then\n # it does not make sense to pass the Authorization header. More\n # specifically, GitHub will reject any requests from us due to the\n # invalid header. We can make a limited number of anonymous requests\n # though, which is useful for testing.\n if GITHUB_TOKEN:\n self.headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}\n else:\n self.headers = {}\n\n def _get_api_data(self) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n\n # Fetch the data from the GitHub API\n api_data: List[dict] = requests.get(self.github_api, headers=self.headers).json()\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def _get_repo_data(self) -> List[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n # First off, load the timestamp of the least recently updated entry.\n last_update = (\n RepositoryMetadata.objects.values_list(\"last_updated\", flat=True)\n .order_by(\"last_updated\").first()\n )\n\n # If we did not retrieve any results here, we should import them!\n if last_update is None:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n return RepositoryMetadata.objects.bulk_create(\n RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n for api_data in api_repositories.values()\n )\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - last_update).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n database_repositories = []\n for api_data in api_repositories.values():\n repo_data, _created = RepositoryMetadata.objects.update_or_create(\n repo_name=api_data[\"full_name\"],\n defaults={\n 'repo_name': api_data[\"full_name\"],\n 'description': api_data[\"description\"],\n 'forks': api_data[\"forks_count\"],\n 'stargazers': api_data[\"stargazers_count\"],\n 'language': api_data[\"language\"],\n }\n )\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n else:\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self._get_repo_data()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')\n", "path": "pydis_site/apps/home/views/home.py"}], "after_files": [{"content": "import os\n\nGIT_SHA = os.environ.get(\"GIT_SHA\", \"development\")\nGITHUB_TOKEN = os.environ.get(\"GITHUB_TOKEN\")\n# How long to wait for synchronous requests before timing out\nTIMEOUT_PERIOD = int(os.environ.get(\"TIMEOUT_PERIOD\", 5))\n", "path": "pydis_site/constants.py"}, {"content": "import logging\nfrom typing import Dict, List\n\nimport requests\nfrom django.core.handlers.wsgi import WSGIRequest\nfrom django.http import HttpResponse\nfrom django.shortcuts import render\nfrom django.utils import timezone\nfrom django.views import View\n\nfrom pydis_site.apps.home.models import RepositoryMetadata\nfrom pydis_site.constants import GITHUB_TOKEN, TIMEOUT_PERIOD\n\nlog = logging.getLogger(__name__)\n\n\nclass HomeView(View):\n \"\"\"The main landing page for the website.\"\"\"\n\n github_api = \"https://api.github.com/users/python-discord/repos?per_page=100\"\n repository_cache_ttl = 3600\n\n # Which of our GitHub repos should be displayed on the front page, and in which order?\n repos = [\n \"python-discord/site\",\n \"python-discord/bot\",\n \"python-discord/snekbox\",\n \"python-discord/sir-lancebot\",\n \"python-discord/metricity\",\n \"python-discord/django-simple-bulma\",\n ]\n\n def __init__(self):\n \"\"\"Clean up stale RepositoryMetadata.\"\"\"\n RepositoryMetadata.objects.exclude(repo_name__in=self.repos).delete()\n\n # If no token is defined (for example in local development), then\n # it does not make sense to pass the Authorization header. More\n # specifically, GitHub will reject any requests from us due to the\n # invalid header. We can make a limited number of anonymous requests\n # though, which is useful for testing.\n if GITHUB_TOKEN:\n self.headers = {\"Authorization\": f\"token {GITHUB_TOKEN}\"}\n else:\n self.headers = {}\n\n def _get_api_data(self) -> Dict[str, Dict[str, str]]:\n \"\"\"\n Call the GitHub API and get information about our repos.\n\n If we're unable to get that info for any reason, return an empty dict.\n \"\"\"\n repo_dict = {}\n try:\n # Fetch the data from the GitHub API\n api_data: List[dict] = requests.get(\n self.github_api,\n headers=self.headers,\n timeout=TIMEOUT_PERIOD\n ).json()\n except requests.exceptions.Timeout:\n log.error(\"Request to fetch GitHub repository metadata for timed out!\")\n return repo_dict\n\n # Process the API data into our dict\n for repo in api_data:\n try:\n full_name = repo[\"full_name\"]\n\n if full_name in self.repos:\n repo_dict[full_name] = {\n \"full_name\": repo[\"full_name\"],\n \"description\": repo[\"description\"],\n \"language\": repo[\"language\"],\n \"forks_count\": repo[\"forks_count\"],\n \"stargazers_count\": repo[\"stargazers_count\"],\n }\n # Something is not right about the API data we got back from GitHub.\n except (TypeError, ConnectionError, KeyError) as e:\n log.error(\n \"Unable to parse the GitHub repository metadata from response!\",\n extra={\n 'api_data': api_data,\n 'error': e\n }\n )\n continue\n\n return repo_dict\n\n def _get_repo_data(self) -> List[RepositoryMetadata]:\n \"\"\"Build a list of RepositoryMetadata objects that we can use to populate the front page.\"\"\"\n # First off, load the timestamp of the least recently updated entry.\n last_update = (\n RepositoryMetadata.objects.values_list(\"last_updated\", flat=True)\n .order_by(\"last_updated\").first()\n )\n\n # If we did not retrieve any results here, we should import them!\n if last_update is None:\n\n # Try to get new data from the API. If it fails, we'll return an empty list.\n # In this case, we simply don't display our projects on the site.\n api_repositories = self._get_api_data()\n\n # Create all the repodata records in the database.\n return RepositoryMetadata.objects.bulk_create(\n RepositoryMetadata(\n repo_name=api_data[\"full_name\"],\n description=api_data[\"description\"],\n forks=api_data[\"forks_count\"],\n stargazers=api_data[\"stargazers_count\"],\n language=api_data[\"language\"],\n )\n for api_data in api_repositories.values()\n )\n\n # If the data is stale, we should refresh it.\n if (timezone.now() - last_update).seconds > self.repository_cache_ttl:\n # Try to get new data from the API. If it fails, return the cached data.\n api_repositories = self._get_api_data()\n\n if not api_repositories:\n return RepositoryMetadata.objects.all()\n\n # Update or create all RepoData objects in self.repos\n database_repositories = []\n for api_data in api_repositories.values():\n repo_data, _created = RepositoryMetadata.objects.update_or_create(\n repo_name=api_data[\"full_name\"],\n defaults={\n 'repo_name': api_data[\"full_name\"],\n 'description': api_data[\"description\"],\n 'forks': api_data[\"forks_count\"],\n 'stargazers': api_data[\"stargazers_count\"],\n 'language': api_data[\"language\"],\n }\n )\n database_repositories.append(repo_data)\n return database_repositories\n\n # Otherwise, if the data is fresher than 2 minutes old, we should just return it.\n else:\n return RepositoryMetadata.objects.all()\n\n def get(self, request: WSGIRequest) -> HttpResponse:\n \"\"\"Collect repo data and render the homepage view.\"\"\"\n repo_data = self._get_repo_data()\n return render(request, \"home/index.html\", {\"repo_data\": repo_data})\n\n\ndef timeline(request: WSGIRequest) -> HttpResponse:\n \"\"\"Render timeline view.\"\"\"\n return render(request, 'home/timeline.html')\n", "path": "pydis_site/apps/home/views/home.py"}]} | 2,027 | 370 |
gh_patches_debug_18480 | rasdani/github-patches | git_diff | litestar-org__litestar-1718 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs: template autoescaping behavior
### Summary
I appreciate this framework having a built-in choice between Jinja and Mako. The documentation however makes no mention of a significant difference in the Litestar behavior between the two -- that using the Jinja engine will autoescape for you, whereas Mako will not.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `litestar/contrib/mako.py`
Content:
```
1 from __future__ import annotations
2
3 from functools import partial
4 from typing import TYPE_CHECKING, Any, Callable
5
6 from litestar.exceptions import MissingDependencyException, TemplateNotFoundException
7 from litestar.template.base import (
8 TemplateEngineProtocol,
9 TemplateProtocol,
10 csrf_token,
11 url_for,
12 url_for_static_asset,
13 )
14
15 __all__ = ("MakoTemplate", "MakoTemplateEngine")
16
17
18 try:
19 import mako # noqa: F401
20 except ImportError as e:
21 raise MissingDependencyException("mako") from e
22
23
24 from mako.exceptions import TemplateLookupException as MakoTemplateNotFound
25 from mako.lookup import TemplateLookup
26
27 if TYPE_CHECKING:
28 from mako.template import Template as _MakoTemplate
29 from pydantic import DirectoryPath
30
31
32 class MakoTemplate(TemplateProtocol):
33 """Mako template, implementing ``TemplateProtocol``"""
34
35 def __init__(self, template: _MakoTemplate, template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]]):
36 """Initialize a template.
37
38 Args:
39 template: Base ``MakoTemplate`` used by the underlying mako-engine
40 template_callables: List of callables passed to the template
41 """
42 super().__init__()
43 self.template = template
44 self.template_callables = template_callables
45
46 def render(self, *args: Any, **kwargs: Any) -> str:
47 """Render a template.
48
49 Args:
50 args: Positional arguments passed to the engines ``render`` function
51 kwargs: Keyword arguments passed to the engines ``render`` function
52
53 Returns:
54 Rendered template as a string
55 """
56 for callable_key, template_callable in self.template_callables:
57 kwargs_copy = {**kwargs}
58 kwargs[callable_key] = partial(template_callable, kwargs_copy)
59
60 return str(self.template.render(*args, **kwargs))
61
62
63 class MakoTemplateEngine(TemplateEngineProtocol[MakoTemplate]):
64 """Mako based TemplateEngine."""
65
66 def __init__(self, directory: DirectoryPath | list[DirectoryPath]) -> None:
67 """Initialize template engine.
68
69 Args:
70 directory: Direct path or list of directory paths from which to serve templates.
71 """
72 super().__init__(directory=directory)
73 self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])
74 self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []
75 self.register_template_callable(key="url_for_static_asset", template_callable=url_for_static_asset) # type: ignore
76 self.register_template_callable(key="csrf_token", template_callable=csrf_token) # type: ignore
77 self.register_template_callable(key="url_for", template_callable=url_for) # type: ignore
78
79 def get_template(self, template_name: str) -> MakoTemplate:
80 """Retrieve a template by matching its name (dotted path) with files in the directory or directories provided.
81
82 Args:
83 template_name: A dotted path
84
85 Returns:
86 MakoTemplate instance
87
88 Raises:
89 TemplateNotFoundException: if no template is found.
90 """
91 try:
92 return MakoTemplate(
93 template=self.engine.get_template(template_name), template_callables=self._template_callables
94 )
95 except MakoTemplateNotFound as exc:
96 raise TemplateNotFoundException(template_name=template_name) from exc
97
98 def register_template_callable(self, key: str, template_callable: Callable[[dict[str, Any]], Any]) -> None:
99 """Register a callable on the template engine.
100
101 Args:
102 key: The callable key, i.e. the value to use inside the template to call the callable.
103 template_callable: A callable to register.
104
105 Returns:
106 None
107 """
108 self._template_callables.append((key, template_callable))
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/litestar/contrib/mako.py b/litestar/contrib/mako.py
--- a/litestar/contrib/mako.py
+++ b/litestar/contrib/mako.py
@@ -70,7 +70,9 @@
directory: Direct path or list of directory paths from which to serve templates.
"""
super().__init__(directory=directory)
- self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])
+ self.engine = TemplateLookup(
+ directories=directory if isinstance(directory, (list, tuple)) else [directory], default_filters=["h"]
+ )
self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []
self.register_template_callable(key="url_for_static_asset", template_callable=url_for_static_asset) # type: ignore
self.register_template_callable(key="csrf_token", template_callable=csrf_token) # type: ignore
| {"golden_diff": "diff --git a/litestar/contrib/mako.py b/litestar/contrib/mako.py\n--- a/litestar/contrib/mako.py\n+++ b/litestar/contrib/mako.py\n@@ -70,7 +70,9 @@\n directory: Direct path or list of directory paths from which to serve templates.\n \"\"\"\n super().__init__(directory=directory)\n- self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])\n+ self.engine = TemplateLookup(\n+ directories=directory if isinstance(directory, (list, tuple)) else [directory], default_filters=[\"h\"]\n+ )\n self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []\n self.register_template_callable(key=\"url_for_static_asset\", template_callable=url_for_static_asset) # type: ignore\n self.register_template_callable(key=\"csrf_token\", template_callable=csrf_token) # type: ignore\n", "issue": "Docs: template autoescaping behavior\n### Summary\r\n\r\nI appreciate this framework having a built-in choice between Jinja and Mako. The documentation however makes no mention of a significant difference in the Litestar behavior between the two -- that using the Jinja engine will autoescape for you, whereas Mako will not. \n", "before_files": [{"content": "from __future__ import annotations\n\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Callable\n\nfrom litestar.exceptions import MissingDependencyException, TemplateNotFoundException\nfrom litestar.template.base import (\n TemplateEngineProtocol,\n TemplateProtocol,\n csrf_token,\n url_for,\n url_for_static_asset,\n)\n\n__all__ = (\"MakoTemplate\", \"MakoTemplateEngine\")\n\n\ntry:\n import mako # noqa: F401\nexcept ImportError as e:\n raise MissingDependencyException(\"mako\") from e\n\n\nfrom mako.exceptions import TemplateLookupException as MakoTemplateNotFound\nfrom mako.lookup import TemplateLookup\n\nif TYPE_CHECKING:\n from mako.template import Template as _MakoTemplate\n from pydantic import DirectoryPath\n\n\nclass MakoTemplate(TemplateProtocol):\n \"\"\"Mako template, implementing ``TemplateProtocol``\"\"\"\n\n def __init__(self, template: _MakoTemplate, template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]]):\n \"\"\"Initialize a template.\n\n Args:\n template: Base ``MakoTemplate`` used by the underlying mako-engine\n template_callables: List of callables passed to the template\n \"\"\"\n super().__init__()\n self.template = template\n self.template_callables = template_callables\n\n def render(self, *args: Any, **kwargs: Any) -> str:\n \"\"\"Render a template.\n\n Args:\n args: Positional arguments passed to the engines ``render`` function\n kwargs: Keyword arguments passed to the engines ``render`` function\n\n Returns:\n Rendered template as a string\n \"\"\"\n for callable_key, template_callable in self.template_callables:\n kwargs_copy = {**kwargs}\n kwargs[callable_key] = partial(template_callable, kwargs_copy)\n\n return str(self.template.render(*args, **kwargs))\n\n\nclass MakoTemplateEngine(TemplateEngineProtocol[MakoTemplate]):\n \"\"\"Mako based TemplateEngine.\"\"\"\n\n def __init__(self, directory: DirectoryPath | list[DirectoryPath]) -> None:\n \"\"\"Initialize template engine.\n\n Args:\n directory: Direct path or list of directory paths from which to serve templates.\n \"\"\"\n super().__init__(directory=directory)\n self.engine = TemplateLookup(directories=directory if isinstance(directory, (list, tuple)) else [directory])\n self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []\n self.register_template_callable(key=\"url_for_static_asset\", template_callable=url_for_static_asset) # type: ignore\n self.register_template_callable(key=\"csrf_token\", template_callable=csrf_token) # type: ignore\n self.register_template_callable(key=\"url_for\", template_callable=url_for) # type: ignore\n\n def get_template(self, template_name: str) -> MakoTemplate:\n \"\"\"Retrieve a template by matching its name (dotted path) with files in the directory or directories provided.\n\n Args:\n template_name: A dotted path\n\n Returns:\n MakoTemplate instance\n\n Raises:\n TemplateNotFoundException: if no template is found.\n \"\"\"\n try:\n return MakoTemplate(\n template=self.engine.get_template(template_name), template_callables=self._template_callables\n )\n except MakoTemplateNotFound as exc:\n raise TemplateNotFoundException(template_name=template_name) from exc\n\n def register_template_callable(self, key: str, template_callable: Callable[[dict[str, Any]], Any]) -> None:\n \"\"\"Register a callable on the template engine.\n\n Args:\n key: The callable key, i.e. the value to use inside the template to call the callable.\n template_callable: A callable to register.\n\n Returns:\n None\n \"\"\"\n self._template_callables.append((key, template_callable))\n", "path": "litestar/contrib/mako.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom functools import partial\nfrom typing import TYPE_CHECKING, Any, Callable\n\nfrom litestar.exceptions import MissingDependencyException, TemplateNotFoundException\nfrom litestar.template.base import (\n TemplateEngineProtocol,\n TemplateProtocol,\n csrf_token,\n url_for,\n url_for_static_asset,\n)\n\n__all__ = (\"MakoTemplate\", \"MakoTemplateEngine\")\n\n\ntry:\n import mako # noqa: F401\nexcept ImportError as e:\n raise MissingDependencyException(\"mako\") from e\n\n\nfrom mako.exceptions import TemplateLookupException as MakoTemplateNotFound\nfrom mako.lookup import TemplateLookup\n\nif TYPE_CHECKING:\n from mako.template import Template as _MakoTemplate\n from pydantic import DirectoryPath\n\n\nclass MakoTemplate(TemplateProtocol):\n \"\"\"Mako template, implementing ``TemplateProtocol``\"\"\"\n\n def __init__(self, template: _MakoTemplate, template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]]):\n \"\"\"Initialize a template.\n\n Args:\n template: Base ``MakoTemplate`` used by the underlying mako-engine\n template_callables: List of callables passed to the template\n \"\"\"\n super().__init__()\n self.template = template\n self.template_callables = template_callables\n\n def render(self, *args: Any, **kwargs: Any) -> str:\n \"\"\"Render a template.\n\n Args:\n args: Positional arguments passed to the engines ``render`` function\n kwargs: Keyword arguments passed to the engines ``render`` function\n\n Returns:\n Rendered template as a string\n \"\"\"\n for callable_key, template_callable in self.template_callables:\n kwargs_copy = {**kwargs}\n kwargs[callable_key] = partial(template_callable, kwargs_copy)\n\n return str(self.template.render(*args, **kwargs))\n\n\nclass MakoTemplateEngine(TemplateEngineProtocol[MakoTemplate]):\n \"\"\"Mako based TemplateEngine.\"\"\"\n\n def __init__(self, directory: DirectoryPath | list[DirectoryPath]) -> None:\n \"\"\"Initialize template engine.\n\n Args:\n directory: Direct path or list of directory paths from which to serve templates.\n \"\"\"\n super().__init__(directory=directory)\n self.engine = TemplateLookup(\n directories=directory if isinstance(directory, (list, tuple)) else [directory], default_filters=[\"h\"]\n )\n self._template_callables: list[tuple[str, Callable[[dict[str, Any]], Any]]] = []\n self.register_template_callable(key=\"url_for_static_asset\", template_callable=url_for_static_asset) # type: ignore\n self.register_template_callable(key=\"csrf_token\", template_callable=csrf_token) # type: ignore\n self.register_template_callable(key=\"url_for\", template_callable=url_for) # type: ignore\n\n def get_template(self, template_name: str) -> MakoTemplate:\n \"\"\"Retrieve a template by matching its name (dotted path) with files in the directory or directories provided.\n\n Args:\n template_name: A dotted path\n\n Returns:\n MakoTemplate instance\n\n Raises:\n TemplateNotFoundException: if no template is found.\n \"\"\"\n try:\n return MakoTemplate(\n template=self.engine.get_template(template_name), template_callables=self._template_callables\n )\n except MakoTemplateNotFound as exc:\n raise TemplateNotFoundException(template_name=template_name) from exc\n\n def register_template_callable(self, key: str, template_callable: Callable[[dict[str, Any]], Any]) -> None:\n \"\"\"Register a callable on the template engine.\n\n Args:\n key: The callable key, i.e. the value to use inside the template to call the callable.\n template_callable: A callable to register.\n\n Returns:\n None\n \"\"\"\n self._template_callables.append((key, template_callable))\n", "path": "litestar/contrib/mako.py"}]} | 1,375 | 213 |
gh_patches_debug_27060 | rasdani/github-patches | git_diff | svthalia__concrexit-1399 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documents page broken
Steps to reproduce:
1. Go to https://thalia.nu/association/documents/
Sentry Issue: [CONCREXIT-4E](https://sentry.io/organizations/thalia/issues/2020405926/?referrer=github_integration)
```
FieldError: Cannot resolve keyword 'name_en' into field. Choices are: annualdocument, category, created, event, eventdocument, file, generalmeeting, id, last_updated, members_only, minutes, name
(5 additional frame(s) were not displayed)
...
File "documents/views.py", line 54, in get_context_data
"association_documents": AssociationDocument.objects.order_by(
File "django/db/models/manager.py", line 85, in manager_method
return getattr(self.get_queryset(), name)(*args, **kwargs)
File "django/db/models/query.py", line 1134, in order_by
obj.query.add_ordering(*field_names)
File "django/db/models/sql/query.py", line 1919, in add_ordering
self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)
File "django/db/models/sql/query.py", line 1481, in names_to_path
raise FieldError("Cannot resolve keyword '%s' into field. "
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/documents/views.py`
Content:
```
1 """Views provided by the documents package"""
2 import os
3
4 from django.conf import settings
5 from django.core.exceptions import PermissionDenied
6 from django.http import Http404, HttpResponse
7 from django.shortcuts import redirect
8 from django.utils import timezone
9 from django.utils.text import slugify
10 from django.utils.translation import get_language
11 from django.views.generic import TemplateView, DetailView
12 from django_sendfile import sendfile
13
14 from documents.models import (
15 AnnualDocument,
16 AssociationDocument,
17 GeneralMeeting,
18 Document,
19 )
20 from utils.snippets import datetime_to_lectureyear
21
22
23 class DocumentsIndexView(TemplateView):
24 """
25 View that renders the documents index page
26 """
27
28 template_name = "documents/index.html"
29
30 def get_context_data(self, **kwargs) -> dict:
31 lecture_year = datetime_to_lectureyear(timezone.now())
32
33 years = {x: {} for x in reversed(range(1990, lecture_year + 1))}
34 for year in years:
35 years[year] = {
36 "documents": {"policy": None, "report": None, "financial": None},
37 "general_meetings": [],
38 }
39
40 for document in AnnualDocument.objects.filter(subcategory="policy"):
41 years[document.year]["documents"]["policy"] = document
42 for document in AnnualDocument.objects.filter(subcategory="report"):
43 years[document.year]["documents"]["report"] = document
44 for document in AnnualDocument.objects.filter(subcategory="financial"):
45 years[document.year]["documents"]["financial"] = document
46
47 for obj in GeneralMeeting.objects.all():
48 meeting_year = datetime_to_lectureyear(obj.datetime)
49 years[meeting_year]["general_meetings"].append(obj)
50
51 context = super().get_context_data(**kwargs)
52 context.update(
53 {
54 "association_documents": AssociationDocument.objects.order_by(
55 f"name_{get_language()}"
56 ).all(),
57 "years": list(years.items()),
58 }
59 )
60 return context
61
62
63 class DocumentDownloadView(DetailView):
64 """
65 View that allows you to download a specific document based on it's and your
66 permissions settings
67 """
68
69 model = Document
70
71 def get(self, request, *args, **kwargs) -> HttpResponse:
72 """
73 :return: either a 302 redirect to the login page or
74 a 200 with the document
75 """
76 response = super().get(request, *args, **kwargs)
77 document = response.context_data["document"]
78
79 if document.members_only and not request.user.is_authenticated:
80 return redirect("{}?next={}".format(settings.LOGIN_URL, request.path))
81 if document.members_only and not request.member.has_active_membership():
82 raise PermissionDenied
83
84 lang = request.GET.get("language")
85 try:
86 if lang == "en":
87 file = document.file_en
88 else: # Fall back on language detection
89 file = document.file
90 except ValueError as e:
91 raise Http404("This document does not exist.") from e
92
93 ext = os.path.splitext(file.path)[1]
94
95 return sendfile(
96 request,
97 file.path,
98 attachment=True,
99 attachment_filename=slugify(document.name) + ext,
100 )
101
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/website/documents/views.py b/website/documents/views.py
--- a/website/documents/views.py
+++ b/website/documents/views.py
@@ -7,7 +7,6 @@
from django.shortcuts import redirect
from django.utils import timezone
from django.utils.text import slugify
-from django.utils.translation import get_language
from django.views.generic import TemplateView, DetailView
from django_sendfile import sendfile
@@ -52,7 +51,7 @@
context.update(
{
"association_documents": AssociationDocument.objects.order_by(
- f"name_{get_language()}"
+ "name"
).all(),
"years": list(years.items()),
}
@@ -81,12 +80,8 @@
if document.members_only and not request.member.has_active_membership():
raise PermissionDenied
- lang = request.GET.get("language")
try:
- if lang == "en":
- file = document.file_en
- else: # Fall back on language detection
- file = document.file
+ file = document.file
except ValueError as e:
raise Http404("This document does not exist.") from e
| {"golden_diff": "diff --git a/website/documents/views.py b/website/documents/views.py\n--- a/website/documents/views.py\n+++ b/website/documents/views.py\n@@ -7,7 +7,6 @@\n from django.shortcuts import redirect\n from django.utils import timezone\n from django.utils.text import slugify\n-from django.utils.translation import get_language\n from django.views.generic import TemplateView, DetailView\n from django_sendfile import sendfile\n \n@@ -52,7 +51,7 @@\n context.update(\n {\n \"association_documents\": AssociationDocument.objects.order_by(\n- f\"name_{get_language()}\"\n+ \"name\"\n ).all(),\n \"years\": list(years.items()),\n }\n@@ -81,12 +80,8 @@\n if document.members_only and not request.member.has_active_membership():\n raise PermissionDenied\n \n- lang = request.GET.get(\"language\")\n try:\n- if lang == \"en\":\n- file = document.file_en\n- else: # Fall back on language detection\n- file = document.file\n+ file = document.file\n except ValueError as e:\n raise Http404(\"This document does not exist.\") from e\n", "issue": "Documents page broken\nSteps to reproduce:\n1. Go to https://thalia.nu/association/documents/\n\n\nSentry Issue: [CONCREXIT-4E](https://sentry.io/organizations/thalia/issues/2020405926/?referrer=github_integration)\n\n```\nFieldError: Cannot resolve keyword 'name_en' into field. Choices are: annualdocument, category, created, event, eventdocument, file, generalmeeting, id, last_updated, members_only, minutes, name\n(5 additional frame(s) were not displayed)\n...\n File \"documents/views.py\", line 54, in get_context_data\n \"association_documents\": AssociationDocument.objects.order_by(\n File \"django/db/models/manager.py\", line 85, in manager_method\n return getattr(self.get_queryset(), name)(*args, **kwargs)\n File \"django/db/models/query.py\", line 1134, in order_by\n obj.query.add_ordering(*field_names)\n File \"django/db/models/sql/query.py\", line 1919, in add_ordering\n self.names_to_path(item.split(LOOKUP_SEP), self.model._meta)\n File \"django/db/models/sql/query.py\", line 1481, in names_to_path\n raise FieldError(\"Cannot resolve keyword '%s' into field. \"\n```\n", "before_files": [{"content": "\"\"\"Views provided by the documents package\"\"\"\nimport os\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom django.utils.translation import get_language\nfrom django.views.generic import TemplateView, DetailView\nfrom django_sendfile import sendfile\n\nfrom documents.models import (\n AnnualDocument,\n AssociationDocument,\n GeneralMeeting,\n Document,\n)\nfrom utils.snippets import datetime_to_lectureyear\n\n\nclass DocumentsIndexView(TemplateView):\n \"\"\"\n View that renders the documents index page\n \"\"\"\n\n template_name = \"documents/index.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n lecture_year = datetime_to_lectureyear(timezone.now())\n\n years = {x: {} for x in reversed(range(1990, lecture_year + 1))}\n for year in years:\n years[year] = {\n \"documents\": {\"policy\": None, \"report\": None, \"financial\": None},\n \"general_meetings\": [],\n }\n\n for document in AnnualDocument.objects.filter(subcategory=\"policy\"):\n years[document.year][\"documents\"][\"policy\"] = document\n for document in AnnualDocument.objects.filter(subcategory=\"report\"):\n years[document.year][\"documents\"][\"report\"] = document\n for document in AnnualDocument.objects.filter(subcategory=\"financial\"):\n years[document.year][\"documents\"][\"financial\"] = document\n\n for obj in GeneralMeeting.objects.all():\n meeting_year = datetime_to_lectureyear(obj.datetime)\n years[meeting_year][\"general_meetings\"].append(obj)\n\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"association_documents\": AssociationDocument.objects.order_by(\n f\"name_{get_language()}\"\n ).all(),\n \"years\": list(years.items()),\n }\n )\n return context\n\n\nclass DocumentDownloadView(DetailView):\n \"\"\"\n View that allows you to download a specific document based on it's and your\n permissions settings\n \"\"\"\n\n model = Document\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n \"\"\"\n :return: either a 302 redirect to the login page or\n a 200 with the document\n \"\"\"\n response = super().get(request, *args, **kwargs)\n document = response.context_data[\"document\"]\n\n if document.members_only and not request.user.is_authenticated:\n return redirect(\"{}?next={}\".format(settings.LOGIN_URL, request.path))\n if document.members_only and not request.member.has_active_membership():\n raise PermissionDenied\n\n lang = request.GET.get(\"language\")\n try:\n if lang == \"en\":\n file = document.file_en\n else: # Fall back on language detection\n file = document.file\n except ValueError as e:\n raise Http404(\"This document does not exist.\") from e\n\n ext = os.path.splitext(file.path)[1]\n\n return sendfile(\n request,\n file.path,\n attachment=True,\n attachment_filename=slugify(document.name) + ext,\n )\n", "path": "website/documents/views.py"}], "after_files": [{"content": "\"\"\"Views provided by the documents package\"\"\"\nimport os\n\nfrom django.conf import settings\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nfrom django.utils.text import slugify\nfrom django.views.generic import TemplateView, DetailView\nfrom django_sendfile import sendfile\n\nfrom documents.models import (\n AnnualDocument,\n AssociationDocument,\n GeneralMeeting,\n Document,\n)\nfrom utils.snippets import datetime_to_lectureyear\n\n\nclass DocumentsIndexView(TemplateView):\n \"\"\"\n View that renders the documents index page\n \"\"\"\n\n template_name = \"documents/index.html\"\n\n def get_context_data(self, **kwargs) -> dict:\n lecture_year = datetime_to_lectureyear(timezone.now())\n\n years = {x: {} for x in reversed(range(1990, lecture_year + 1))}\n for year in years:\n years[year] = {\n \"documents\": {\"policy\": None, \"report\": None, \"financial\": None},\n \"general_meetings\": [],\n }\n\n for document in AnnualDocument.objects.filter(subcategory=\"policy\"):\n years[document.year][\"documents\"][\"policy\"] = document\n for document in AnnualDocument.objects.filter(subcategory=\"report\"):\n years[document.year][\"documents\"][\"report\"] = document\n for document in AnnualDocument.objects.filter(subcategory=\"financial\"):\n years[document.year][\"documents\"][\"financial\"] = document\n\n for obj in GeneralMeeting.objects.all():\n meeting_year = datetime_to_lectureyear(obj.datetime)\n years[meeting_year][\"general_meetings\"].append(obj)\n\n context = super().get_context_data(**kwargs)\n context.update(\n {\n \"association_documents\": AssociationDocument.objects.order_by(\n \"name\"\n ).all(),\n \"years\": list(years.items()),\n }\n )\n return context\n\n\nclass DocumentDownloadView(DetailView):\n \"\"\"\n View that allows you to download a specific document based on it's and your\n permissions settings\n \"\"\"\n\n model = Document\n\n def get(self, request, *args, **kwargs) -> HttpResponse:\n \"\"\"\n :return: either a 302 redirect to the login page or\n a 200 with the document\n \"\"\"\n response = super().get(request, *args, **kwargs)\n document = response.context_data[\"document\"]\n\n if document.members_only and not request.user.is_authenticated:\n return redirect(\"{}?next={}\".format(settings.LOGIN_URL, request.path))\n if document.members_only and not request.member.has_active_membership():\n raise PermissionDenied\n\n try:\n file = document.file\n except ValueError as e:\n raise Http404(\"This document does not exist.\") from e\n\n ext = os.path.splitext(file.path)[1]\n\n return sendfile(\n request,\n file.path,\n attachment=True,\n attachment_filename=slugify(document.name) + ext,\n )\n", "path": "website/documents/views.py"}]} | 1,427 | 255 |
gh_patches_debug_18608 | rasdani/github-patches | git_diff | safe-global__safe-config-service-80 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Missing CORS headers
Since CORS headers are missing, resources from this application cannot be accessed by other domains
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/config/settings.py`
Content:
```
1 """
2 Django settings for safe_client_config_service project.
3
4 Generated by 'django-admin startproject' using Django 3.2.
5
6 For more information on this file, see
7 https://docs.djangoproject.com/en/3.2/topics/settings/
8
9 For the full list of settings and their values, see
10 https://docs.djangoproject.com/en/3.2/ref/settings/
11 """
12 import os
13 from distutils.util import strtobool
14 from pathlib import Path
15
16 # Build paths inside the project like this: BASE_DIR / 'subdir'.
17 BASE_DIR = Path(__file__).resolve().parent.parent
18
19 # Quick-start development settings - unsuitable for production
20 # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
21
22 # SECURITY WARNING: keep the secret key used in production secret!
23 SECRET_KEY = os.getenv("SECRET_KEY", None)
24
25 # SECURITY WARNING: don't run with debug turned on in production!
26 DEBUG = bool(strtobool(os.getenv("DEBUG", "false")))
27
28 # https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS
29 allowed_hosts = os.getenv("DJANGO_ALLOWED_HOSTS", ".localhost,127.0.0.1,[::1]")
30 ALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(",")]
31
32 # Application definition
33
34 REST_FRAMEWORK = {
35 # https://www.django-rest-framework.org/api-guide/renderers/
36 "DEFAULT_RENDERER_CLASSES": [
37 "djangorestframework_camel_case.render.CamelCaseJSONRenderer",
38 ]
39 }
40
41 INSTALLED_APPS = [
42 "safe_apps.apps.AppsConfig",
43 "django.contrib.admin",
44 "django.contrib.auth",
45 "django.contrib.contenttypes",
46 "django.contrib.sessions",
47 "django.contrib.messages",
48 "django.contrib.staticfiles",
49 "rest_framework",
50 "drf_yasg",
51 ]
52
53 MIDDLEWARE = [
54 "config.middleware.LoggingMiddleware",
55 "django.middleware.security.SecurityMiddleware",
56 "django.contrib.sessions.middleware.SessionMiddleware",
57 "django.middleware.common.CommonMiddleware",
58 "django.middleware.csrf.CsrfViewMiddleware",
59 "django.contrib.auth.middleware.AuthenticationMiddleware",
60 "django.contrib.messages.middleware.MessageMiddleware",
61 "django.middleware.clickjacking.XFrameOptionsMiddleware",
62 ]
63
64 CACHES = {
65 "default": {
66 "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
67 },
68 "safe-apps": {
69 "BACKEND": "django.core.cache.backends.locmem.LocMemCache",
70 },
71 }
72
73 LOGGING = {
74 "version": 1,
75 "disable_existing_loggers": False,
76 "formatters": {
77 "short": {"format": "%(asctime)s %(message)s"},
78 "verbose": {
79 "format": "%(asctime)s [%(levelname)s] [%(processName)s] %(message)s"
80 },
81 },
82 "handlers": {
83 "console": {
84 "class": "logging.StreamHandler",
85 "formatter": "verbose",
86 },
87 "console_short": {
88 "class": "logging.StreamHandler",
89 "formatter": "short",
90 },
91 },
92 "root": {
93 "handlers": ["console"],
94 "level": os.getenv("ROOT_LOG_LEVEL", "INFO"),
95 },
96 "loggers": {
97 "LoggingMiddleware": {
98 "handlers": ["console_short"],
99 "level": "INFO",
100 "propagate": False,
101 },
102 },
103 }
104
105 ROOT_URLCONF = "config.urls"
106
107 TEMPLATES = [
108 {
109 "BACKEND": "django.template.backends.django.DjangoTemplates",
110 "DIRS": [],
111 "APP_DIRS": True,
112 "OPTIONS": {
113 "context_processors": [
114 "django.template.context_processors.debug",
115 "django.template.context_processors.request",
116 "django.contrib.auth.context_processors.auth",
117 "django.contrib.messages.context_processors.messages",
118 ],
119 },
120 },
121 ]
122
123 WSGI_APPLICATION = "config.wsgi.application"
124
125 # Database
126 # https://docs.djangoproject.com/en/3.2/ref/settings/#databases
127
128 DATABASES = {
129 "default": {
130 "ENGINE": "django.db.backends.postgresql",
131 "NAME": os.getenv("POSTGRES_NAME", "postgres"),
132 "USER": os.getenv("POSTGRES_USER", "postgres"),
133 "PASSWORD": os.getenv("POSTGRES_PASSWORD", "postgres"),
134 "HOST": os.getenv("POSTGRES_HOST", "db"),
135 "PORT": os.getenv("POSTGRES_PORT", "5432"),
136 }
137 }
138
139 # Password validation
140 # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
141
142 AUTH_PASSWORD_VALIDATORS = [
143 {
144 "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator",
145 },
146 {
147 "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator",
148 },
149 {
150 "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator",
151 },
152 {
153 "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator",
154 },
155 ]
156
157 # Internationalization
158 # https://docs.djangoproject.com/en/3.2/topics/i18n/
159
160 LANGUAGE_CODE = "en-us"
161
162 TIME_ZONE = "UTC"
163
164 USE_I18N = True
165
166 USE_L10N = True
167
168 USE_TZ = True
169
170 # Static files (CSS, JavaScript, Images)
171 # https://docs.djangoproject.com/en/3.2/howto/static-files/
172
173 STATIC_URL = "/static/"
174
175 STATIC_ROOT = "staticfiles"
176
177 # Default primary key field type
178 # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
179
180 DEFAULT_AUTO_FIELD = "django.db.models.BigAutoField"
181
182 SWAGGER_SETTINGS = {
183 "DEFAULT_INFO": "config.swagger_info.SAFE_CONFIG_SERVICE_SWAGGER_INFO"
184 }
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/config/settings.py b/src/config/settings.py
--- a/src/config/settings.py
+++ b/src/config/settings.py
@@ -39,6 +39,7 @@
}
INSTALLED_APPS = [
+ "corsheaders",
"safe_apps.apps.AppsConfig",
"django.contrib.admin",
"django.contrib.auth",
@@ -52,6 +53,7 @@
MIDDLEWARE = [
"config.middleware.LoggingMiddleware",
+ "corsheaders.middleware.CorsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
@@ -182,3 +184,6 @@
SWAGGER_SETTINGS = {
"DEFAULT_INFO": "config.swagger_info.SAFE_CONFIG_SERVICE_SWAGGER_INFO"
}
+
+CORS_ALLOW_ALL_ORIGINS = True
+CORS_URLS_REGEX = r"^/api/.*$"
| {"golden_diff": "diff --git a/src/config/settings.py b/src/config/settings.py\n--- a/src/config/settings.py\n+++ b/src/config/settings.py\n@@ -39,6 +39,7 @@\n }\n \n INSTALLED_APPS = [\n+ \"corsheaders\",\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n@@ -52,6 +53,7 @@\n \n MIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n+ \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n@@ -182,3 +184,6 @@\n SWAGGER_SETTINGS = {\n \"DEFAULT_INFO\": \"config.swagger_info.SAFE_CONFIG_SERVICE_SWAGGER_INFO\"\n }\n+\n+CORS_ALLOW_ALL_ORIGINS = True\n+CORS_URLS_REGEX = r\"^/api/.*$\"\n", "issue": "Missing CORS headers\nSince CORS headers are missing, resources from this application cannot be accessed by other domains\n", "before_files": [{"content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ]\n}\n\nINSTALLED_APPS = [\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"drf_yasg\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATIC_ROOT = \"staticfiles\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\nSWAGGER_SETTINGS = {\n \"DEFAULT_INFO\": \"config.swagger_info.SAFE_CONFIG_SERVICE_SWAGGER_INFO\"\n}\n", "path": "src/config/settings.py"}], "after_files": [{"content": "\"\"\"\nDjango settings for safe_client_config_service project.\n\nGenerated by 'django-admin startproject' using Django 3.2.\n\nFor more information on this file, see\nhttps://docs.djangoproject.com/en/3.2/topics/settings/\n\nFor the full list of settings and their values, see\nhttps://docs.djangoproject.com/en/3.2/ref/settings/\n\"\"\"\nimport os\nfrom distutils.util import strtobool\nfrom pathlib import Path\n\n# Build paths inside the project like this: BASE_DIR / 'subdir'.\nBASE_DIR = Path(__file__).resolve().parent.parent\n\n# Quick-start development settings - unsuitable for production\n# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/\n\n# SECURITY WARNING: keep the secret key used in production secret!\nSECRET_KEY = os.getenv(\"SECRET_KEY\", None)\n\n# SECURITY WARNING: don't run with debug turned on in production!\nDEBUG = bool(strtobool(os.getenv(\"DEBUG\", \"false\")))\n\n# https://docs.djangoproject.com/en/3.2/ref/settings/#std:setting-ALLOWED_HOSTS\nallowed_hosts = os.getenv(\"DJANGO_ALLOWED_HOSTS\", \".localhost,127.0.0.1,[::1]\")\nALLOWED_HOSTS = [allowed_host.strip() for allowed_host in allowed_hosts.split(\",\")]\n\n# Application definition\n\nREST_FRAMEWORK = {\n # https://www.django-rest-framework.org/api-guide/renderers/\n \"DEFAULT_RENDERER_CLASSES\": [\n \"djangorestframework_camel_case.render.CamelCaseJSONRenderer\",\n ]\n}\n\nINSTALLED_APPS = [\n \"corsheaders\",\n \"safe_apps.apps.AppsConfig\",\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"rest_framework\",\n \"drf_yasg\",\n]\n\nMIDDLEWARE = [\n \"config.middleware.LoggingMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n \"safe-apps\": {\n \"BACKEND\": \"django.core.cache.backends.locmem.LocMemCache\",\n },\n}\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"short\": {\"format\": \"%(asctime)s %(message)s\"},\n \"verbose\": {\n \"format\": \"%(asctime)s [%(levelname)s] [%(processName)s] %(message)s\"\n },\n },\n \"handlers\": {\n \"console\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"verbose\",\n },\n \"console_short\": {\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"short\",\n },\n },\n \"root\": {\n \"handlers\": [\"console\"],\n \"level\": os.getenv(\"ROOT_LOG_LEVEL\", \"INFO\"),\n },\n \"loggers\": {\n \"LoggingMiddleware\": {\n \"handlers\": [\"console_short\"],\n \"level\": \"INFO\",\n \"propagate\": False,\n },\n },\n}\n\nROOT_URLCONF = \"config.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"config.wsgi.application\"\n\n# Database\n# https://docs.djangoproject.com/en/3.2/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": os.getenv(\"POSTGRES_NAME\", \"postgres\"),\n \"USER\": os.getenv(\"POSTGRES_USER\", \"postgres\"),\n \"PASSWORD\": os.getenv(\"POSTGRES_PASSWORD\", \"postgres\"),\n \"HOST\": os.getenv(\"POSTGRES_HOST\", \"db\"),\n \"PORT\": os.getenv(\"POSTGRES_PORT\", \"5432\"),\n }\n}\n\n# Password validation\n# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\n# Internationalization\n# https://docs.djangoproject.com/en/3.2/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/3.2/howto/static-files/\n\nSTATIC_URL = \"/static/\"\n\nSTATIC_ROOT = \"staticfiles\"\n\n# Default primary key field type\n# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field\n\nDEFAULT_AUTO_FIELD = \"django.db.models.BigAutoField\"\n\nSWAGGER_SETTINGS = {\n \"DEFAULT_INFO\": \"config.swagger_info.SAFE_CONFIG_SERVICE_SWAGGER_INFO\"\n}\n\nCORS_ALLOW_ALL_ORIGINS = True\nCORS_URLS_REGEX = r\"^/api/.*$\"\n", "path": "src/config/settings.py"}]} | 1,959 | 198 |
gh_patches_debug_43076 | rasdani/github-patches | git_diff | pfnet__pytorch-pfn-extras-187 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`autoload=True` not working with models in the gpu
If we create the snapshot extension with `autoload=True` the model will not correctly load its state.
`autoload=true` loads the state in the CPU. but it is not executed until the first iteration and it will overwrite the device of the model. This requires us to call `start_extensions` manually and then do the device move for it to work
```python
# model parameters will be moved to cpu at the beginning of the first iteration due autoload being executed there
...
manager.extend(extensions.snapshot(autoload=True)
# move the model, but this will be overwriten later
model.cuda()
for batch in train_loader:
with manager.run_iteration(): # Snapshot load happens the first time this is executed
model(batch.cuda()) # Error! weights are in the cpu again
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pytorch_pfn_extras/nn/modules/lazy.py`
Content:
```
1 import inspect
2 from typing import Tuple
3 import warnings
4
5 import torch
6
7
8 class LazyInitializationMixin:
9
10 """A mixin for modules that lazily initialize buffers and parameters.
11
12 Unlike regular modules, subclasses of this module can initialize
13 buffers and parameters outside of the constructor (``__init__``).
14 This allows you to, for example, initialize parameters in ``forward``
15 method to determine the shape of the weight based on the initial input.
16
17 Be sure to run "dummy" forward once to initialize all parameters that
18 should be trained, before passing ``module.parameters()`` to an optimizer;
19 otherwise weights initialized after ``module.parameters()`` (e.g., in
20 ``forward`` function) will never be trained.
21
22 Note that lazy modules cannot validate if the shape is correct during
23 deserialization. Also note that the initial weights may become different
24 from the original (non-lazy) module even if the random seed is manually
25 configured, as the order of initialization is different from the original
26 one; especially, ``module.cuda()`` may cause the initialization to run on
27 a GPU.
28
29 The default value of lazy buffers and parameters are ``torch.Tensor([])``
30 and ``UninitializedParameter()``, respectively.
31 """
32
33 # Subclasses must override these fields and list names of all buffers /
34 # parameters that will be initialized lazily.
35 lazy_buffer_names: Tuple[str, ...] = ()
36 lazy_parameter_names: Tuple[str, ...] = ()
37
38 def __init__(self, *args, **kwargs):
39 self._lazy_ready = False
40
41 super().__init__(*args, **kwargs)
42
43 for name in self.lazy_buffer_names:
44 self.register_buffer(name, torch.Tensor([]))
45 for name in self.lazy_parameter_names:
46 self.register_parameter(name, UninitializedParameter())
47 self._register_load_state_dict_pre_hook(self._lazy_load_hook)
48 self._lazy_ready = True
49
50 @property
51 def lazy_parmeters_determined(self):
52 """Returns if all lazy parameters are determined.
53
54 Subclasses can perform parameters initialization after all lazy
55 parameters are determined. Note that this may be called during
56 ``__init__``.
57 """
58 return self._lazy_ready and all([
59 not isinstance(getattr(self, x), UninitializedParameter)
60 for x in self.lazy_parameter_names])
61
62 def state_dict(self, *args, **kwargs):
63 """Returns a dictionary containing a whole state of the module.
64
65 This function overrides the default behavior to exclude uninitialized
66 parameter from serialization. This is needed because we need to
67 discriminate lazy parameters (``UninitializedParameter()`) and
68 initialized empty parameters (``torch.nn.Parameter(torch.Tensor())``)
69 during deserialization.
70
71 See comments of ``_lazy_load_hook`` for details.
72 """
73 destination = super().state_dict(*args, **kwargs)
74 for name in self.lazy_parameter_names:
75 if isinstance(getattr(self, name), UninitializedParameter):
76 del destination[name]
77 return destination
78
79 def _lazy_load_hook(
80 self, state_dict, prefix, local_metadata, strict,
81 missing_keys, unexpected_keys, error_msgs):
82 """load_state_dict pre-hook function for lazy buffers and parameters.
83
84 The purpose of this hook is to adjust the current state and/or
85 ``state_dict`` being loaded so that a module instance serialized in
86 both un/initialized state can be deserialized onto both un/initialized
87 module instance.
88
89 See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``
90 for the details of the hook specification.
91 """
92 for name in self.lazy_buffer_names:
93 # Avoid shape mismatch error when loading an initialized buffer
94 # onto an uninitialized module instance.
95 self.register_buffer(name, state_dict[prefix + name])
96
97 for name in self.lazy_parameter_names:
98 # The parameter may not exist in the loaded ``state_dict`` if the
99 # original module was serialized before initializing lazy
100 # parameters (see comments of ``state_dict``).
101 key = prefix + name
102 if key in state_dict:
103 # The model was serialized after initialization.
104 self.register_parameter(
105 name, torch.nn.Parameter(state_dict[key]))
106 else:
107 # The model was serialized before initialization.
108 param = UninitializedParameter()
109 self.register_parameter(name, param)
110 state_dict[key] = param
111
112
113 class UninitializedParameter(torch.nn.Parameter):
114
115 def __repr__(self):
116 return 'Uninitialized lazy parameter'
117
118 def share_memory_(self):
119 raise RuntimeError(
120 'Can\'t share memory on an unitialized parameter. '
121 'Run forward to initialize the network before calling '
122 '`module.share_memory()`.')
123
124 @property
125 def is_leaf(self):
126 # Hacky workaround to detect use of uninitialized lazy parameters.
127 # This overrides ``is_leaf`` attribute which should always be ``True``
128 # for parameters; optimizers check for this attribute and raise an
129 # error if non-leaf tensors are detected.
130 frame = inspect.currentframe()
131 if frame.f_back.f_globals['__package__'].startswith('torch.optim'):
132 warnings.warn('''
133 Use of uninitialized lazy parameter in Optimizer has been detected.
134 Maybe you forgot to run forward before passing `module.parameters()` to the optimizer?''') # NOQA
135 return True
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pytorch_pfn_extras/nn/modules/lazy.py b/pytorch_pfn_extras/nn/modules/lazy.py
--- a/pytorch_pfn_extras/nn/modules/lazy.py
+++ b/pytorch_pfn_extras/nn/modules/lazy.py
@@ -81,32 +81,45 @@
missing_keys, unexpected_keys, error_msgs):
"""load_state_dict pre-hook function for lazy buffers and parameters.
- The purpose of this hook is to adjust the current state and/or
- ``state_dict`` being loaded so that a module instance serialized in
- both un/initialized state can be deserialized onto both un/initialized
- module instance.
+ The purpose of this hook is to check the current state and/or
+ ``state_dict`` being loaded and ensure that both are states
+ are properly initialized.
See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``
for the details of the hook specification.
"""
for name in self.lazy_buffer_names:
- # Avoid shape mismatch error when loading an initialized buffer
- # onto an uninitialized module instance.
- self.register_buffer(name, state_dict[prefix + name])
+ key = prefix + name
+ module_initialized = getattr(self, name).shape != (0,)
+ state_initialized = state_dict[key].shape != (0,)
+ if module_initialized and not state_initialized:
+ raise RuntimeError(
+ 'Can\'t load non-initialized buffers in already '
+ 'initialized modules')
+ elif not module_initialized and state_initialized:
+ # Here we need to avoid a tensor size mismatch
+ # this is a regular tensor without a materialize
+ # method, so we can just resize for the load logic to copy
+ # the contents later to the correct device the module
+ # was moved to
+ getattr(self, name).resize_(state_dict[key].size())
for name in self.lazy_parameter_names:
- # The parameter may not exist in the loaded ``state_dict`` if the
+ # The parameter does not exist in the loaded ``state_dict`` if the
# original module was serialized before initializing lazy
# parameters (see comments of ``state_dict``).
key = prefix + name
- if key in state_dict:
- # The model was serialized after initialization.
- self.register_parameter(
- name, torch.nn.Parameter(state_dict[key]))
- else:
- # The model was serialized before initialization.
+ module_initialized = not isinstance(
+ getattr(self, name), UninitializedParameter)
+ state_initialized = key in state_dict
+ if module_initialized and not state_initialized:
+ raise RuntimeError(
+ 'Can\'t load uninitialized parameters in already '
+ 'initialized modules')
+ elif not module_initialized and state_initialized:
+ getattr(self, name).materialize(state_dict[key].shape)
+ elif key not in state_dict and not module_initialized:
param = UninitializedParameter()
- self.register_parameter(name, param)
state_dict[key] = param
@@ -133,3 +146,25 @@
Use of uninitialized lazy parameter in Optimizer has been detected.
Maybe you forgot to run forward before passing `module.parameters()` to the optimizer?''') # NOQA
return True
+
+ def materialize(self, shape, device=None, dtype=None):
+ r"""Create a Parameter with the same properties of the uninitialized
+ one. Given a shape, it materializes a parameter in the same device
+ and with the same `dtype` as the current one or the specified ones in
+ the arguments.
+
+ Args:
+ shape : (tuple): the shape for the materialized tensor.
+ device (:class:`torch.device`): the desired device of the
+ parameters
+ and buffers in this module. Optional.
+ dtype (:class:`torch.dtype`): the desired floating point type of
+ the floating point parameters and buffers in this module.
+ Optional.
+ """
+ if device is None:
+ device = self.data.device
+ if dtype is None:
+ dtype = self.data.dtype
+ self.data = torch.empty(shape, device=device, dtype=dtype)
+ self.__class__ = torch.nn.Parameter
| {"golden_diff": "diff --git a/pytorch_pfn_extras/nn/modules/lazy.py b/pytorch_pfn_extras/nn/modules/lazy.py\n--- a/pytorch_pfn_extras/nn/modules/lazy.py\n+++ b/pytorch_pfn_extras/nn/modules/lazy.py\n@@ -81,32 +81,45 @@\n missing_keys, unexpected_keys, error_msgs):\n \"\"\"load_state_dict pre-hook function for lazy buffers and parameters.\n \n- The purpose of this hook is to adjust the current state and/or\n- ``state_dict`` being loaded so that a module instance serialized in\n- both un/initialized state can be deserialized onto both un/initialized\n- module instance.\n+ The purpose of this hook is to check the current state and/or\n+ ``state_dict`` being loaded and ensure that both are states\n+ are properly initialized.\n \n See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``\n for the details of the hook specification.\n \"\"\"\n for name in self.lazy_buffer_names:\n- # Avoid shape mismatch error when loading an initialized buffer\n- # onto an uninitialized module instance.\n- self.register_buffer(name, state_dict[prefix + name])\n+ key = prefix + name\n+ module_initialized = getattr(self, name).shape != (0,)\n+ state_initialized = state_dict[key].shape != (0,)\n+ if module_initialized and not state_initialized:\n+ raise RuntimeError(\n+ 'Can\\'t load non-initialized buffers in already '\n+ 'initialized modules')\n+ elif not module_initialized and state_initialized:\n+ # Here we need to avoid a tensor size mismatch\n+ # this is a regular tensor without a materialize\n+ # method, so we can just resize for the load logic to copy\n+ # the contents later to the correct device the module\n+ # was moved to\n+ getattr(self, name).resize_(state_dict[key].size())\n \n for name in self.lazy_parameter_names:\n- # The parameter may not exist in the loaded ``state_dict`` if the\n+ # The parameter does not exist in the loaded ``state_dict`` if the\n # original module was serialized before initializing lazy\n # parameters (see comments of ``state_dict``).\n key = prefix + name\n- if key in state_dict:\n- # The model was serialized after initialization.\n- self.register_parameter(\n- name, torch.nn.Parameter(state_dict[key]))\n- else:\n- # The model was serialized before initialization.\n+ module_initialized = not isinstance(\n+ getattr(self, name), UninitializedParameter)\n+ state_initialized = key in state_dict\n+ if module_initialized and not state_initialized:\n+ raise RuntimeError(\n+ 'Can\\'t load uninitialized parameters in already '\n+ 'initialized modules')\n+ elif not module_initialized and state_initialized:\n+ getattr(self, name).materialize(state_dict[key].shape)\n+ elif key not in state_dict and not module_initialized:\n param = UninitializedParameter()\n- self.register_parameter(name, param)\n state_dict[key] = param\n \n \n@@ -133,3 +146,25 @@\n Use of uninitialized lazy parameter in Optimizer has been detected.\n Maybe you forgot to run forward before passing `module.parameters()` to the optimizer?''') # NOQA\n return True\n+\n+ def materialize(self, shape, device=None, dtype=None):\n+ r\"\"\"Create a Parameter with the same properties of the uninitialized\n+ one. Given a shape, it materializes a parameter in the same device\n+ and with the same `dtype` as the current one or the specified ones in\n+ the arguments.\n+\n+ Args:\n+ shape : (tuple): the shape for the materialized tensor.\n+ device (:class:`torch.device`): the desired device of the\n+ parameters\n+ and buffers in this module. Optional.\n+ dtype (:class:`torch.dtype`): the desired floating point type of\n+ the floating point parameters and buffers in this module.\n+ Optional.\n+ \"\"\"\n+ if device is None:\n+ device = self.data.device\n+ if dtype is None:\n+ dtype = self.data.dtype\n+ self.data = torch.empty(shape, device=device, dtype=dtype)\n+ self.__class__ = torch.nn.Parameter\n", "issue": "`autoload=True` not working with models in the gpu\nIf we create the snapshot extension with `autoload=True` the model will not correctly load its state.\r\n\r\n`autoload=true` loads the state in the CPU. but it is not executed until the first iteration and it will overwrite the device of the model. This requires us to call `start_extensions` manually and then do the device move for it to work\r\n\r\n```python\r\n# model parameters will be moved to cpu at the beginning of the first iteration due autoload being executed there\r\n...\r\nmanager.extend(extensions.snapshot(autoload=True)\r\n# move the model, but this will be overwriten later\r\nmodel.cuda()\r\n\r\nfor batch in train_loader:\r\n with manager.run_iteration(): # Snapshot load happens the first time this is executed\r\n model(batch.cuda()) # Error! weights are in the cpu again\r\n```\n", "before_files": [{"content": "import inspect\nfrom typing import Tuple\nimport warnings\n\nimport torch\n\n\nclass LazyInitializationMixin:\n\n \"\"\"A mixin for modules that lazily initialize buffers and parameters.\n\n Unlike regular modules, subclasses of this module can initialize\n buffers and parameters outside of the constructor (``__init__``).\n This allows you to, for example, initialize parameters in ``forward``\n method to determine the shape of the weight based on the initial input.\n\n Be sure to run \"dummy\" forward once to initialize all parameters that\n should be trained, before passing ``module.parameters()`` to an optimizer;\n otherwise weights initialized after ``module.parameters()`` (e.g., in\n ``forward`` function) will never be trained.\n\n Note that lazy modules cannot validate if the shape is correct during\n deserialization. Also note that the initial weights may become different\n from the original (non-lazy) module even if the random seed is manually\n configured, as the order of initialization is different from the original\n one; especially, ``module.cuda()`` may cause the initialization to run on\n a GPU.\n\n The default value of lazy buffers and parameters are ``torch.Tensor([])``\n and ``UninitializedParameter()``, respectively.\n \"\"\"\n\n # Subclasses must override these fields and list names of all buffers /\n # parameters that will be initialized lazily.\n lazy_buffer_names: Tuple[str, ...] = ()\n lazy_parameter_names: Tuple[str, ...] = ()\n\n def __init__(self, *args, **kwargs):\n self._lazy_ready = False\n\n super().__init__(*args, **kwargs)\n\n for name in self.lazy_buffer_names:\n self.register_buffer(name, torch.Tensor([]))\n for name in self.lazy_parameter_names:\n self.register_parameter(name, UninitializedParameter())\n self._register_load_state_dict_pre_hook(self._lazy_load_hook)\n self._lazy_ready = True\n\n @property\n def lazy_parmeters_determined(self):\n \"\"\"Returns if all lazy parameters are determined.\n\n Subclasses can perform parameters initialization after all lazy\n parameters are determined. Note that this may be called during\n ``__init__``.\n \"\"\"\n return self._lazy_ready and all([\n not isinstance(getattr(self, x), UninitializedParameter)\n for x in self.lazy_parameter_names])\n\n def state_dict(self, *args, **kwargs):\n \"\"\"Returns a dictionary containing a whole state of the module.\n\n This function overrides the default behavior to exclude uninitialized\n parameter from serialization. This is needed because we need to\n discriminate lazy parameters (``UninitializedParameter()`) and\n initialized empty parameters (``torch.nn.Parameter(torch.Tensor())``)\n during deserialization.\n\n See comments of ``_lazy_load_hook`` for details.\n \"\"\"\n destination = super().state_dict(*args, **kwargs)\n for name in self.lazy_parameter_names:\n if isinstance(getattr(self, name), UninitializedParameter):\n del destination[name]\n return destination\n\n def _lazy_load_hook(\n self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n \"\"\"load_state_dict pre-hook function for lazy buffers and parameters.\n\n The purpose of this hook is to adjust the current state and/or\n ``state_dict`` being loaded so that a module instance serialized in\n both un/initialized state can be deserialized onto both un/initialized\n module instance.\n\n See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``\n for the details of the hook specification.\n \"\"\"\n for name in self.lazy_buffer_names:\n # Avoid shape mismatch error when loading an initialized buffer\n # onto an uninitialized module instance.\n self.register_buffer(name, state_dict[prefix + name])\n\n for name in self.lazy_parameter_names:\n # The parameter may not exist in the loaded ``state_dict`` if the\n # original module was serialized before initializing lazy\n # parameters (see comments of ``state_dict``).\n key = prefix + name\n if key in state_dict:\n # The model was serialized after initialization.\n self.register_parameter(\n name, torch.nn.Parameter(state_dict[key]))\n else:\n # The model was serialized before initialization.\n param = UninitializedParameter()\n self.register_parameter(name, param)\n state_dict[key] = param\n\n\nclass UninitializedParameter(torch.nn.Parameter):\n\n def __repr__(self):\n return 'Uninitialized lazy parameter'\n\n def share_memory_(self):\n raise RuntimeError(\n 'Can\\'t share memory on an unitialized parameter. '\n 'Run forward to initialize the network before calling '\n '`module.share_memory()`.')\n\n @property\n def is_leaf(self):\n # Hacky workaround to detect use of uninitialized lazy parameters.\n # This overrides ``is_leaf`` attribute which should always be ``True``\n # for parameters; optimizers check for this attribute and raise an\n # error if non-leaf tensors are detected.\n frame = inspect.currentframe()\n if frame.f_back.f_globals['__package__'].startswith('torch.optim'):\n warnings.warn('''\n Use of uninitialized lazy parameter in Optimizer has been detected.\n Maybe you forgot to run forward before passing `module.parameters()` to the optimizer?''') # NOQA\n return True\n", "path": "pytorch_pfn_extras/nn/modules/lazy.py"}], "after_files": [{"content": "import inspect\nfrom typing import Tuple\nimport warnings\n\nimport torch\n\n\nclass LazyInitializationMixin:\n\n \"\"\"A mixin for modules that lazily initialize buffers and parameters.\n\n Unlike regular modules, subclasses of this module can initialize\n buffers and parameters outside of the constructor (``__init__``).\n This allows you to, for example, initialize parameters in ``forward``\n method to determine the shape of the weight based on the initial input.\n\n Be sure to run \"dummy\" forward once to initialize all parameters that\n should be trained, before passing ``module.parameters()`` to an optimizer;\n otherwise weights initialized after ``module.parameters()`` (e.g., in\n ``forward`` function) will never be trained.\n\n Note that lazy modules cannot validate if the shape is correct during\n deserialization. Also note that the initial weights may become different\n from the original (non-lazy) module even if the random seed is manually\n configured, as the order of initialization is different from the original\n one; especially, ``module.cuda()`` may cause the initialization to run on\n a GPU.\n\n The default value of lazy buffers and parameters are ``torch.Tensor([])``\n and ``UninitializedParameter()``, respectively.\n \"\"\"\n\n # Subclasses must override these fields and list names of all buffers /\n # parameters that will be initialized lazily.\n lazy_buffer_names: Tuple[str, ...] = ()\n lazy_parameter_names: Tuple[str, ...] = ()\n\n def __init__(self, *args, **kwargs):\n self._lazy_ready = False\n\n super().__init__(*args, **kwargs)\n\n for name in self.lazy_buffer_names:\n self.register_buffer(name, torch.Tensor([]))\n for name in self.lazy_parameter_names:\n self.register_parameter(name, UninitializedParameter())\n self._register_load_state_dict_pre_hook(self._lazy_load_hook)\n self._lazy_ready = True\n\n @property\n def lazy_parmeters_determined(self):\n \"\"\"Returns if all lazy parameters are determined.\n\n Subclasses can perform parameters initialization after all lazy\n parameters are determined. Note that this may be called during\n ``__init__``.\n \"\"\"\n return self._lazy_ready and all([\n not isinstance(getattr(self, x), UninitializedParameter)\n for x in self.lazy_parameter_names])\n\n def state_dict(self, *args, **kwargs):\n \"\"\"Returns a dictionary containing a whole state of the module.\n\n This function overrides the default behavior to exclude uninitialized\n parameter from serialization. This is needed because we need to\n discriminate lazy parameters (``UninitializedParameter()`) and\n initialized empty parameters (``torch.nn.Parameter(torch.Tensor())``)\n during deserialization.\n\n See comments of ``_lazy_load_hook`` for details.\n \"\"\"\n destination = super().state_dict(*args, **kwargs)\n for name in self.lazy_parameter_names:\n if isinstance(getattr(self, name), UninitializedParameter):\n del destination[name]\n return destination\n\n def _lazy_load_hook(\n self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n \"\"\"load_state_dict pre-hook function for lazy buffers and parameters.\n\n The purpose of this hook is to check the current state and/or\n ``state_dict`` being loaded and ensure that both are states\n are properly initialized.\n\n See comment in ``torch.nn.Module._register_load_state_dict_pre_hook``\n for the details of the hook specification.\n \"\"\"\n for name in self.lazy_buffer_names:\n key = prefix + name\n module_initialized = getattr(self, name).shape != (0,)\n state_initialized = state_dict[key].shape != (0,)\n if module_initialized and not state_initialized:\n raise RuntimeError(\n 'Can\\'t load non-initialized buffers in already '\n 'initialized modules')\n elif not module_initialized and state_initialized:\n # Here we need to avoid a tensor size mismatch\n # this is a regular tensor without a materialize\n # method, so we can just resize for the load logic to copy\n # the contents later to the correct device the module\n # was moved to\n getattr(self, name).resize_(state_dict[key].size())\n\n for name in self.lazy_parameter_names:\n # The parameter does not exist in the loaded ``state_dict`` if the\n # original module was serialized before initializing lazy\n # parameters (see comments of ``state_dict``).\n key = prefix + name\n module_initialized = not isinstance(\n getattr(self, name), UninitializedParameter)\n state_initialized = key in state_dict\n if module_initialized and not state_initialized:\n raise RuntimeError(\n 'Can\\'t load uninitialized parameters in already '\n 'initialized modules')\n elif not module_initialized and state_initialized:\n getattr(self, name).materialize(state_dict[key].shape)\n elif key not in state_dict and not module_initialized:\n param = UninitializedParameter()\n state_dict[key] = param\n\n\nclass UninitializedParameter(torch.nn.Parameter):\n\n def __repr__(self):\n return 'Uninitialized lazy parameter'\n\n def share_memory_(self):\n raise RuntimeError(\n 'Can\\'t share memory on an unitialized parameter. '\n 'Run forward to initialize the network before calling '\n '`module.share_memory()`.')\n\n @property\n def is_leaf(self):\n # Hacky workaround to detect use of uninitialized lazy parameters.\n # This overrides ``is_leaf`` attribute which should always be ``True``\n # for parameters; optimizers check for this attribute and raise an\n # error if non-leaf tensors are detected.\n frame = inspect.currentframe()\n if frame.f_back.f_globals['__package__'].startswith('torch.optim'):\n warnings.warn('''\n Use of uninitialized lazy parameter in Optimizer has been detected.\n Maybe you forgot to run forward before passing `module.parameters()` to the optimizer?''') # NOQA\n return True\n\n def materialize(self, shape, device=None, dtype=None):\n r\"\"\"Create a Parameter with the same properties of the uninitialized\n one. Given a shape, it materializes a parameter in the same device\n and with the same `dtype` as the current one or the specified ones in\n the arguments.\n\n Args:\n shape : (tuple): the shape for the materialized tensor.\n device (:class:`torch.device`): the desired device of the\n parameters\n and buffers in this module. Optional.\n dtype (:class:`torch.dtype`): the desired floating point type of\n the floating point parameters and buffers in this module.\n Optional.\n \"\"\"\n if device is None:\n device = self.data.device\n if dtype is None:\n dtype = self.data.dtype\n self.data = torch.empty(shape, device=device, dtype=dtype)\n self.__class__ = torch.nn.Parameter\n", "path": "pytorch_pfn_extras/nn/modules/lazy.py"}]} | 1,886 | 943 |
gh_patches_debug_25656 | rasdani/github-patches | git_diff | roboflow__supervision-121 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Better handling of wrong data types of mask for sv.PolygonZone()
### Search before asking
- [X] I have searched the Supervision [issues](https://github.com/roboflow/supervision/issues) and found no similar feature requests.
### Description
<img width="898" alt="image" src="https://github.com/roboflow/supervision/assets/47161914/6eebe556-97f1-452b-8757-17062548ca12">
OpenCV errors out with an uninformative error when the dtype of the mask is wrong.
Would be good to add catch if dtype != np.int32 and raise an Exception.
### Use case
I was trying to create a mask in CVAT, the exported segmentation had decimal places, np.array(coords) did not convert to np.int32, and I was confused by the bad error message.
This is likely to happen to other users too .
### Additional
Happy to submit PR
### Are you willing to submit a PR?
- [X] Yes I'd like to help by submitting a PR!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `supervision/detection/tools/polygon_zone.py`
Content:
```
1 from dataclasses import replace
2 from typing import Optional, Tuple
3
4 import cv2
5 import numpy as np
6
7 from supervision import Detections
8 from supervision.detection.utils import clip_boxes, polygon_to_mask
9 from supervision.draw.color import Color
10 from supervision.draw.utils import draw_polygon, draw_text
11 from supervision.geometry.core import Position
12 from supervision.geometry.utils import get_polygon_center
13
14
15 class PolygonZone:
16 """
17 A class for defining a polygon-shaped zone within a frame for detecting objects.
18
19 Attributes:
20 polygon (np.ndarray): A numpy array defining the polygon vertices
21 frame_resolution_wh (Tuple[int, int]): The frame resolution (width, height)
22 triggering_position (Position): The position within the bounding box that triggers the zone (default: Position.BOTTOM_CENTER)
23 current_count (int): The current count of detected objects within the zone
24 mask (np.ndarray): The 2D bool mask for the polygon zone
25 """
26
27 def __init__(
28 self,
29 polygon: np.ndarray,
30 frame_resolution_wh: Tuple[int, int],
31 triggering_position: Position = Position.BOTTOM_CENTER,
32 ):
33 self.polygon = polygon
34 self.frame_resolution_wh = frame_resolution_wh
35 self.triggering_position = triggering_position
36 self.current_count = 0
37
38 width, height = frame_resolution_wh
39 self.mask = polygon_to_mask(
40 polygon=polygon, resolution_wh=(width + 1, height + 1)
41 )
42
43 def trigger(self, detections: Detections) -> np.ndarray:
44 """
45 Determines if the detections are within the polygon zone.
46
47 Parameters:
48 detections (Detections): The detections to be checked against the polygon zone
49
50 Returns:
51 np.ndarray: A boolean numpy array indicating if each detection is within the polygon zone
52 """
53
54 clipped_xyxy = clip_boxes(
55 boxes_xyxy=detections.xyxy, frame_resolution_wh=self.frame_resolution_wh
56 )
57 clipped_detections = replace(detections, xyxy=clipped_xyxy)
58 clipped_anchors = np.ceil(
59 clipped_detections.get_anchor_coordinates(anchor=self.triggering_position)
60 ).astype(int)
61 is_in_zone = self.mask[clipped_anchors[:, 1], clipped_anchors[:, 0]]
62 self.current_count = np.sum(is_in_zone)
63 return is_in_zone.astype(bool)
64
65
66 class PolygonZoneAnnotator:
67 """
68 A class for annotating a polygon-shaped zone within a frame with a count of detected objects.
69
70 Attributes:
71 zone (PolygonZone): The polygon zone to be annotated
72 color (Color): The color to draw the polygon lines
73 thickness (int): The thickness of the polygon lines, default is 2
74 text_color (Color): The color of the text on the polygon, default is black
75 text_scale (float): The scale of the text on the polygon, default is 0.5
76 text_thickness (int): The thickness of the text on the polygon, default is 1
77 text_padding (int): The padding around the text on the polygon, default is 10
78 font (int): The font type for the text on the polygon, default is cv2.FONT_HERSHEY_SIMPLEX
79 center (Tuple[int, int]): The center of the polygon for text placement
80 """
81
82 def __init__(
83 self,
84 zone: PolygonZone,
85 color: Color,
86 thickness: int = 2,
87 text_color: Color = Color.black(),
88 text_scale: float = 0.5,
89 text_thickness: int = 1,
90 text_padding: int = 10,
91 ):
92 self.zone = zone
93 self.color = color
94 self.thickness = thickness
95 self.text_color = text_color
96 self.text_scale = text_scale
97 self.text_thickness = text_thickness
98 self.text_padding = text_padding
99 self.font = cv2.FONT_HERSHEY_SIMPLEX
100 self.center = get_polygon_center(polygon=zone.polygon)
101
102 def annotate(self, scene: np.ndarray, label: Optional[str] = None) -> np.ndarray:
103 """
104 Annotates the polygon zone within a frame with a count of detected objects.
105
106 Parameters:
107 scene (np.ndarray): The image on which the polygon zone will be annotated
108 label (Optional[str]): An optional label for the count of detected objects within the polygon zone (default: None)
109
110 Returns:
111 np.ndarray: The image with the polygon zone and count of detected objects
112 """
113 annotated_frame = draw_polygon(
114 scene=scene,
115 polygon=self.zone.polygon,
116 color=self.color,
117 thickness=self.thickness,
118 )
119
120 annotated_frame = draw_text(
121 scene=annotated_frame,
122 text=str(self.zone.current_count) if label is None else label,
123 text_anchor=self.center,
124 background_color=self.color,
125 text_color=self.text_color,
126 text_scale=self.text_scale,
127 text_thickness=self.text_thickness,
128 text_padding=self.text_padding,
129 text_font=self.font,
130 )
131
132 return annotated_frame
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/supervision/detection/tools/polygon_zone.py b/supervision/detection/tools/polygon_zone.py
--- a/supervision/detection/tools/polygon_zone.py
+++ b/supervision/detection/tools/polygon_zone.py
@@ -17,7 +17,7 @@
A class for defining a polygon-shaped zone within a frame for detecting objects.
Attributes:
- polygon (np.ndarray): A numpy array defining the polygon vertices
+ polygon (np.ndarray): A polygon represented by a numpy array of shape `(N, 2)`, containing the `x`, `y` coordinates of the points.
frame_resolution_wh (Tuple[int, int]): The frame resolution (width, height)
triggering_position (Position): The position within the bounding box that triggers the zone (default: Position.BOTTOM_CENTER)
current_count (int): The current count of detected objects within the zone
@@ -30,7 +30,7 @@
frame_resolution_wh: Tuple[int, int],
triggering_position: Position = Position.BOTTOM_CENTER,
):
- self.polygon = polygon
+ self.polygon = polygon.astype(int)
self.frame_resolution_wh = frame_resolution_wh
self.triggering_position = triggering_position
self.current_count = 0
| {"golden_diff": "diff --git a/supervision/detection/tools/polygon_zone.py b/supervision/detection/tools/polygon_zone.py\n--- a/supervision/detection/tools/polygon_zone.py\n+++ b/supervision/detection/tools/polygon_zone.py\n@@ -17,7 +17,7 @@\n A class for defining a polygon-shaped zone within a frame for detecting objects.\n \n Attributes:\n- polygon (np.ndarray): A numpy array defining the polygon vertices\n+ polygon (np.ndarray): A polygon represented by a numpy array of shape `(N, 2)`, containing the `x`, `y` coordinates of the points.\n frame_resolution_wh (Tuple[int, int]): The frame resolution (width, height)\n triggering_position (Position): The position within the bounding box that triggers the zone (default: Position.BOTTOM_CENTER)\n current_count (int): The current count of detected objects within the zone\n@@ -30,7 +30,7 @@\n frame_resolution_wh: Tuple[int, int],\n triggering_position: Position = Position.BOTTOM_CENTER,\n ):\n- self.polygon = polygon\n+ self.polygon = polygon.astype(int)\n self.frame_resolution_wh = frame_resolution_wh\n self.triggering_position = triggering_position\n self.current_count = 0\n", "issue": "Better handling of wrong data types of mask for sv.PolygonZone()\n### Search before asking\n\n- [X] I have searched the Supervision [issues](https://github.com/roboflow/supervision/issues) and found no similar feature requests.\n\n\n### Description\n\n<img width=\"898\" alt=\"image\" src=\"https://github.com/roboflow/supervision/assets/47161914/6eebe556-97f1-452b-8757-17062548ca12\">\r\n\r\nOpenCV errors out with an uninformative error when the dtype of the mask is wrong. \r\n\r\nWould be good to add catch if dtype != np.int32 and raise an Exception.\n\n### Use case\n\nI was trying to create a mask in CVAT, the exported segmentation had decimal places, np.array(coords) did not convert to np.int32, and I was confused by the bad error message.\r\n\r\nThis is likely to happen to other users too .\n\n### Additional\n\nHappy to submit PR\n\n### Are you willing to submit a PR?\n\n- [X] Yes I'd like to help by submitting a PR!\n", "before_files": [{"content": "from dataclasses import replace\nfrom typing import Optional, Tuple\n\nimport cv2\nimport numpy as np\n\nfrom supervision import Detections\nfrom supervision.detection.utils import clip_boxes, polygon_to_mask\nfrom supervision.draw.color import Color\nfrom supervision.draw.utils import draw_polygon, draw_text\nfrom supervision.geometry.core import Position\nfrom supervision.geometry.utils import get_polygon_center\n\n\nclass PolygonZone:\n \"\"\"\n A class for defining a polygon-shaped zone within a frame for detecting objects.\n\n Attributes:\n polygon (np.ndarray): A numpy array defining the polygon vertices\n frame_resolution_wh (Tuple[int, int]): The frame resolution (width, height)\n triggering_position (Position): The position within the bounding box that triggers the zone (default: Position.BOTTOM_CENTER)\n current_count (int): The current count of detected objects within the zone\n mask (np.ndarray): The 2D bool mask for the polygon zone\n \"\"\"\n\n def __init__(\n self,\n polygon: np.ndarray,\n frame_resolution_wh: Tuple[int, int],\n triggering_position: Position = Position.BOTTOM_CENTER,\n ):\n self.polygon = polygon\n self.frame_resolution_wh = frame_resolution_wh\n self.triggering_position = triggering_position\n self.current_count = 0\n\n width, height = frame_resolution_wh\n self.mask = polygon_to_mask(\n polygon=polygon, resolution_wh=(width + 1, height + 1)\n )\n\n def trigger(self, detections: Detections) -> np.ndarray:\n \"\"\"\n Determines if the detections are within the polygon zone.\n\n Parameters:\n detections (Detections): The detections to be checked against the polygon zone\n\n Returns:\n np.ndarray: A boolean numpy array indicating if each detection is within the polygon zone\n \"\"\"\n\n clipped_xyxy = clip_boxes(\n boxes_xyxy=detections.xyxy, frame_resolution_wh=self.frame_resolution_wh\n )\n clipped_detections = replace(detections, xyxy=clipped_xyxy)\n clipped_anchors = np.ceil(\n clipped_detections.get_anchor_coordinates(anchor=self.triggering_position)\n ).astype(int)\n is_in_zone = self.mask[clipped_anchors[:, 1], clipped_anchors[:, 0]]\n self.current_count = np.sum(is_in_zone)\n return is_in_zone.astype(bool)\n\n\nclass PolygonZoneAnnotator:\n \"\"\"\n A class for annotating a polygon-shaped zone within a frame with a count of detected objects.\n\n Attributes:\n zone (PolygonZone): The polygon zone to be annotated\n color (Color): The color to draw the polygon lines\n thickness (int): The thickness of the polygon lines, default is 2\n text_color (Color): The color of the text on the polygon, default is black\n text_scale (float): The scale of the text on the polygon, default is 0.5\n text_thickness (int): The thickness of the text on the polygon, default is 1\n text_padding (int): The padding around the text on the polygon, default is 10\n font (int): The font type for the text on the polygon, default is cv2.FONT_HERSHEY_SIMPLEX\n center (Tuple[int, int]): The center of the polygon for text placement\n \"\"\"\n\n def __init__(\n self,\n zone: PolygonZone,\n color: Color,\n thickness: int = 2,\n text_color: Color = Color.black(),\n text_scale: float = 0.5,\n text_thickness: int = 1,\n text_padding: int = 10,\n ):\n self.zone = zone\n self.color = color\n self.thickness = thickness\n self.text_color = text_color\n self.text_scale = text_scale\n self.text_thickness = text_thickness\n self.text_padding = text_padding\n self.font = cv2.FONT_HERSHEY_SIMPLEX\n self.center = get_polygon_center(polygon=zone.polygon)\n\n def annotate(self, scene: np.ndarray, label: Optional[str] = None) -> np.ndarray:\n \"\"\"\n Annotates the polygon zone within a frame with a count of detected objects.\n\n Parameters:\n scene (np.ndarray): The image on which the polygon zone will be annotated\n label (Optional[str]): An optional label for the count of detected objects within the polygon zone (default: None)\n\n Returns:\n np.ndarray: The image with the polygon zone and count of detected objects\n \"\"\"\n annotated_frame = draw_polygon(\n scene=scene,\n polygon=self.zone.polygon,\n color=self.color,\n thickness=self.thickness,\n )\n\n annotated_frame = draw_text(\n scene=annotated_frame,\n text=str(self.zone.current_count) if label is None else label,\n text_anchor=self.center,\n background_color=self.color,\n text_color=self.text_color,\n text_scale=self.text_scale,\n text_thickness=self.text_thickness,\n text_padding=self.text_padding,\n text_font=self.font,\n )\n\n return annotated_frame\n", "path": "supervision/detection/tools/polygon_zone.py"}], "after_files": [{"content": "from dataclasses import replace\nfrom typing import Optional, Tuple\n\nimport cv2\nimport numpy as np\n\nfrom supervision import Detections\nfrom supervision.detection.utils import clip_boxes, polygon_to_mask\nfrom supervision.draw.color import Color\nfrom supervision.draw.utils import draw_polygon, draw_text\nfrom supervision.geometry.core import Position\nfrom supervision.geometry.utils import get_polygon_center\n\n\nclass PolygonZone:\n \"\"\"\n A class for defining a polygon-shaped zone within a frame for detecting objects.\n\n Attributes:\n polygon (np.ndarray): A polygon represented by a numpy array of shape `(N, 2)`, containing the `x`, `y` coordinates of the points.\n frame_resolution_wh (Tuple[int, int]): The frame resolution (width, height)\n triggering_position (Position): The position within the bounding box that triggers the zone (default: Position.BOTTOM_CENTER)\n current_count (int): The current count of detected objects within the zone\n mask (np.ndarray): The 2D bool mask for the polygon zone\n \"\"\"\n\n def __init__(\n self,\n polygon: np.ndarray,\n frame_resolution_wh: Tuple[int, int],\n triggering_position: Position = Position.BOTTOM_CENTER,\n ):\n self.polygon = polygon.astype(int)\n self.frame_resolution_wh = frame_resolution_wh\n self.triggering_position = triggering_position\n self.current_count = 0\n\n width, height = frame_resolution_wh\n self.mask = polygon_to_mask(\n polygon=polygon, resolution_wh=(width + 1, height + 1)\n )\n\n def trigger(self, detections: Detections) -> np.ndarray:\n \"\"\"\n Determines if the detections are within the polygon zone.\n\n Parameters:\n detections (Detections): The detections to be checked against the polygon zone\n\n Returns:\n np.ndarray: A boolean numpy array indicating if each detection is within the polygon zone\n \"\"\"\n\n clipped_xyxy = clip_boxes(\n boxes_xyxy=detections.xyxy, frame_resolution_wh=self.frame_resolution_wh\n )\n clipped_detections = replace(detections, xyxy=clipped_xyxy)\n clipped_anchors = np.ceil(\n clipped_detections.get_anchor_coordinates(anchor=self.triggering_position)\n ).astype(int)\n is_in_zone = self.mask[clipped_anchors[:, 1], clipped_anchors[:, 0]]\n self.current_count = np.sum(is_in_zone)\n return is_in_zone.astype(bool)\n\n\nclass PolygonZoneAnnotator:\n \"\"\"\n A class for annotating a polygon-shaped zone within a frame with a count of detected objects.\n\n Attributes:\n zone (PolygonZone): The polygon zone to be annotated\n color (Color): The color to draw the polygon lines\n thickness (int): The thickness of the polygon lines, default is 2\n text_color (Color): The color of the text on the polygon, default is black\n text_scale (float): The scale of the text on the polygon, default is 0.5\n text_thickness (int): The thickness of the text on the polygon, default is 1\n text_padding (int): The padding around the text on the polygon, default is 10\n font (int): The font type for the text on the polygon, default is cv2.FONT_HERSHEY_SIMPLEX\n center (Tuple[int, int]): The center of the polygon for text placement\n \"\"\"\n\n def __init__(\n self,\n zone: PolygonZone,\n color: Color,\n thickness: int = 2,\n text_color: Color = Color.black(),\n text_scale: float = 0.5,\n text_thickness: int = 1,\n text_padding: int = 10,\n ):\n self.zone = zone\n self.color = color\n self.thickness = thickness\n self.text_color = text_color\n self.text_scale = text_scale\n self.text_thickness = text_thickness\n self.text_padding = text_padding\n self.font = cv2.FONT_HERSHEY_SIMPLEX\n self.center = get_polygon_center(polygon=zone.polygon)\n\n def annotate(self, scene: np.ndarray, label: Optional[str] = None) -> np.ndarray:\n \"\"\"\n Annotates the polygon zone within a frame with a count of detected objects.\n\n Parameters:\n scene (np.ndarray): The image on which the polygon zone will be annotated\n label (Optional[str]): An optional label for the count of detected objects within the polygon zone (default: None)\n\n Returns:\n np.ndarray: The image with the polygon zone and count of detected objects\n \"\"\"\n annotated_frame = draw_polygon(\n scene=scene,\n polygon=self.zone.polygon,\n color=self.color,\n thickness=self.thickness,\n )\n\n annotated_frame = draw_text(\n scene=annotated_frame,\n text=str(self.zone.current_count) if label is None else label,\n text_anchor=self.center,\n background_color=self.color,\n text_color=self.text_color,\n text_scale=self.text_scale,\n text_thickness=self.text_thickness,\n text_padding=self.text_padding,\n text_font=self.font,\n )\n\n return annotated_frame\n", "path": "supervision/detection/tools/polygon_zone.py"}]} | 1,868 | 275 |
gh_patches_debug_17927 | rasdani/github-patches | git_diff | mathesar-foundation__mathesar-2583 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installation issues when running on mac (silicon)
## Description
I'm facing these issues while trying to install using the latest `install.sh` script from master. I'm running 'macOS Big Sur', on a mac with Apple M1 processor.
1. Error thrown, while the line `sudo docker compose --profile prod up -d --wait` runs in the install script. Any command with `sudo docker` throws an error.
```
2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root
2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root
2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root
2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root
[+] Running 0/0
⠋ watchtower Pulling 0.1s
⠋ db Pulling 0.1s
⠋ caddy-reverse-proxy Pulling 0.1s
⠋ service Pulling 0.1s
error getting credentials - err: exit status 1, out: ``
```
This is because docker cannot run as root (or with sudo privileges) in mac.
If possible, we should avoid `sudo` generally, while running on a mac.
2. The images don't run after downloading because the platforms do not match.
```
⠙ caddy-reverse-proxy The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested 0.0s
⠿ Container mathesar_service Waiting 19.1s
⠏ service The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested 0.0s
container for service "service" exited (3)
```
We should be publishing an arm image along with the existing amd image. I sent a mail regarding this.
3. Installation fails because wget is not installed by default. We need to check if it is present during installation.
4. Startup (i.e. `docker compose --profile prod up -d --wait`) fails because `SECRET_KEY` in `.env` file is empty.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/install.py`
Content:
```
1 from sqlalchemy import text
2 from sqlalchemy.exc import OperationalError
3
4 from db import engine
5 from db.types import install
6
7
8 def install_mathesar(
9 database_name, username, password, hostname, port, skip_confirm
10 ):
11 """Create database and install Mathesar on it."""
12 user_db_engine = engine.create_future_engine(
13 username, password, hostname, database_name, port
14 )
15 try:
16 user_db_engine.connect()
17 print(f"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...")
18 install.install_mathesar_on_database(user_db_engine)
19 user_db_engine.dispose()
20 except OperationalError:
21 database_created = _create_database(
22 database_name=database_name,
23 hostname=hostname,
24 username=username,
25 password=password,
26 port=port,
27 skip_confirm=skip_confirm
28 )
29 if database_created:
30 print(f"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...")
31 install.install_mathesar_on_database(user_db_engine)
32 user_db_engine.dispose()
33 else:
34 print(f"Skipping installing on DB with key {database_name}.")
35
36
37 def _create_database(database_name, hostname, username, password, port, skip_confirm=True):
38 if skip_confirm is True:
39 create_database = "y"
40 else:
41 create_database = input(
42 f"Create a new Database called {database_name}? (y/n) > "
43 )
44 if create_database.lower() in ["y", "yes"]:
45 # We need to connect to an existing database inorder to create a new Database.
46 # So we use the default Database `postgres` that comes with postgres.
47 # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)
48 root_database = "postgres"
49 root_db_engine = engine.create_future_engine(
50 username, password, hostname, root_database, port,
51 )
52 with root_db_engine.connect() as conn:
53 conn.execution_options(isolation_level="AUTOCOMMIT")
54 conn.execute(text(f"CREATE DATABASE {database_name}"))
55 root_db_engine.dispose()
56 print(f"Created DB is {database_name}.")
57 return True
58 else:
59 print(f"Database {database_name} not created!")
60 return False
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/db/install.py b/db/install.py
--- a/db/install.py
+++ b/db/install.py
@@ -10,7 +10,8 @@
):
"""Create database and install Mathesar on it."""
user_db_engine = engine.create_future_engine(
- username, password, hostname, database_name, port
+ username, password, hostname, database_name, port,
+ connect_args={"connect_timeout": 10}
)
try:
user_db_engine.connect()
@@ -48,6 +49,7 @@
root_database = "postgres"
root_db_engine = engine.create_future_engine(
username, password, hostname, root_database, port,
+ connect_args={"connect_timeout": 10}
)
with root_db_engine.connect() as conn:
conn.execution_options(isolation_level="AUTOCOMMIT")
| {"golden_diff": "diff --git a/db/install.py b/db/install.py\n--- a/db/install.py\n+++ b/db/install.py\n@@ -10,7 +10,8 @@\n ):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n- username, password, hostname, database_name, port\n+ username, password, hostname, database_name, port,\n+ connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n@@ -48,6 +49,7 @@\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n+ connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n", "issue": "Installation issues when running on mac (silicon)\n## Description\r\nI'm facing these issues while trying to install using the latest `install.sh` script from master. I'm running 'macOS Big Sur', on a mac with Apple M1 processor.\r\n\r\n1. Error thrown, while the line `sudo docker compose --profile prod up -d --wait` runs in the install script. Any command with `sudo docker` throws an error.\r\n\r\n ```\r\n 2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root\r\n 2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root\r\n 2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root\r\n 2023/02/25 23:34:45 must use ASL logging (which requires CGO) if running as root\r\n [+] Running 0/0\r\n \u280b watchtower Pulling 0.1s\r\n \u280b db Pulling 0.1s\r\n \u280b caddy-reverse-proxy Pulling 0.1s\r\n \u280b service Pulling 0.1s\r\n error getting credentials - err: exit status 1, out: ``\r\n ``` \r\n This is because docker cannot run as root (or with sudo privileges) in mac.\r\n If possible, we should avoid `sudo` generally, while running on a mac.\r\n\r\n2. The images don't run after downloading because the platforms do not match.\r\n ```\r\n \u2819 caddy-reverse-proxy The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested 0.0s\r\n \u283f Container mathesar_service Waiting 19.1s\r\n \u280f service The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested 0.0s\r\n container for service \"service\" exited (3)\r\n ```\r\n We should be publishing an arm image along with the existing amd image. I sent a mail regarding this.\r\n\r\n3. Installation fails because wget is not installed by default. We need to check if it is present during installation.\r\n\r\n4. Startup (i.e. `docker compose --profile prod up -d --wait`) fails because `SECRET_KEY` in `.env` file is empty.\n", "before_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.types import install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}], "after_files": [{"content": "from sqlalchemy import text\nfrom sqlalchemy.exc import OperationalError\n\nfrom db import engine\nfrom db.types import install\n\n\ndef install_mathesar(\n database_name, username, password, hostname, port, skip_confirm\n):\n \"\"\"Create database and install Mathesar on it.\"\"\"\n user_db_engine = engine.create_future_engine(\n username, password, hostname, database_name, port,\n connect_args={\"connect_timeout\": 10}\n )\n try:\n user_db_engine.connect()\n print(f\"Installing Mathesar on preexisting PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n except OperationalError:\n database_created = _create_database(\n database_name=database_name,\n hostname=hostname,\n username=username,\n password=password,\n port=port,\n skip_confirm=skip_confirm\n )\n if database_created:\n print(f\"Installing Mathesar on PostgreSQL database {database_name} at host {hostname}...\")\n install.install_mathesar_on_database(user_db_engine)\n user_db_engine.dispose()\n else:\n print(f\"Skipping installing on DB with key {database_name}.\")\n\n\ndef _create_database(database_name, hostname, username, password, port, skip_confirm=True):\n if skip_confirm is True:\n create_database = \"y\"\n else:\n create_database = input(\n f\"Create a new Database called {database_name}? (y/n) > \"\n )\n if create_database.lower() in [\"y\", \"yes\"]:\n # We need to connect to an existing database inorder to create a new Database.\n # So we use the default Database `postgres` that comes with postgres.\n # TODO Throw correct error when the default postgres database does not exists(which is very rare but still possible)\n root_database = \"postgres\"\n root_db_engine = engine.create_future_engine(\n username, password, hostname, root_database, port,\n connect_args={\"connect_timeout\": 10}\n )\n with root_db_engine.connect() as conn:\n conn.execution_options(isolation_level=\"AUTOCOMMIT\")\n conn.execute(text(f\"CREATE DATABASE {database_name}\"))\n root_db_engine.dispose()\n print(f\"Created DB is {database_name}.\")\n return True\n else:\n print(f\"Database {database_name} not created!\")\n return False\n", "path": "db/install.py"}]} | 1,439 | 185 |
gh_patches_debug_31573 | rasdani/github-patches | git_diff | litestar-org__litestar-1483 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlite/cli/commands/core.py`
Content:
```
1 import inspect
2
3 from click import command, option
4 from rich.tree import Tree
5
6 from starlite import HTTPRoute, Starlite, WebSocketRoute
7 from starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info
8 from starlite.utils.helpers import unwrap_partial
9
10
11 @command(name="info")
12 def info_command(app: Starlite) -> None:
13 """Show information about the detected Starlite app."""
14
15 show_app_info(app)
16
17
18 @command(name="run")
19 @option("-r", "--reload", help="Reload server on changes", default=False, is_flag=True)
20 @option("-p", "--port", help="Serve under this port", type=int, default=8000, show_default=True)
21 @option("--host", help="Server under this host", default="127.0.0.1", show_default=True)
22 @option("--debug", help="Run app in debug mode", is_flag=True)
23 def run_command(
24 reload: bool,
25 port: int,
26 host: str,
27 debug: bool,
28 env: StarliteEnv,
29 app: Starlite,
30 ) -> None:
31 """Run a Starlite app.
32
33 The app can be either passed as a module path in the form of <module name>.<submodule>:<app instance or factory>,
34 set as an environment variable STARLITE_APP with the same format or automatically discovered from one of these
35 canonical paths: app.py, asgi.py, application.py or app/__init__.py. When autodiscovering application factories,
36 functions with the name ``create_app`` are considered, or functions that are annotated as returning a ``Starlite``
37 instance.
38 """
39
40 try:
41 import uvicorn
42 except ImportError:
43 raise StarliteCLIException("Uvicorn needs to be installed to run an app") # pylint: disable=W0707
44
45 if debug or env.debug:
46 app.debug = True
47
48 show_app_info(app)
49
50 console.rule("[yellow]Starting server process", align="left")
51
52 uvicorn.run(
53 env.app_path,
54 reload=env.reload or reload,
55 host=env.host or host,
56 port=env.port or port,
57 factory=env.is_app_factory,
58 )
59
60
61 @command(name="routes")
62 def routes_command(app: Starlite) -> None: # pragma: no cover
63 """Display information about the application's routes."""
64
65 tree = Tree("", hide_root=True)
66
67 for route in sorted(app.routes, key=lambda r: r.path):
68 if isinstance(route, HTTPRoute):
69 branch = tree.add(f"[green]{route.path}[/green] (HTTP)")
70 for handler in route.route_handlers:
71 handler_info = [
72 f"[blue]{handler.name or handler.handler_name}[/blue]",
73 ]
74
75 if inspect.iscoroutinefunction(unwrap_partial(handler.fn.value)):
76 handler_info.append("[magenta]async[/magenta]")
77 else:
78 handler_info.append("[yellow]sync[/yellow]")
79
80 handler_info.append(f'[cyan]{", ".join(sorted(handler.http_methods))}[/cyan]')
81
82 if len(handler.paths) > 1:
83 for path in handler.paths:
84 branch.add(" ".join([f"[green]{path}[green]", *handler_info]))
85 else:
86 branch.add(" ".join(handler_info))
87
88 else:
89 if isinstance(route, WebSocketRoute):
90 route_type = "WS"
91 else:
92 route_type = "ASGI"
93 branch = tree.add(f"[green]{route.path}[/green] ({route_type})")
94 branch.add(f"[blue]{route.route_handler.name or route.route_handler.handler_name}[/blue]")
95
96 console.print(tree)
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/starlite/cli/commands/core.py b/starlite/cli/commands/core.py
--- a/starlite/cli/commands/core.py
+++ b/starlite/cli/commands/core.py
@@ -1,4 +1,6 @@
import inspect
+import subprocess
+from typing import Any, Dict, List
from click import command, option
from rich.tree import Tree
@@ -8,6 +10,18 @@
from starlite.utils.helpers import unwrap_partial
+def _convert_uvicorn_args(args: Dict[str, Any]) -> List[str]:
+ process_args = []
+ for arg, value in args.items():
+ if isinstance(value, bool):
+ if value:
+ process_args.append(f"--{arg}")
+ else:
+ process_args.append(f"--{arg}={value}")
+
+ return process_args
+
+
@command(name="info")
def info_command(app: Starlite) -> None:
"""Show information about the detected Starlite app."""
@@ -38,24 +52,24 @@
"""
try:
- import uvicorn
+ import uvicorn # noqa: F401
except ImportError:
raise StarliteCLIException("Uvicorn needs to be installed to run an app") # pylint: disable=W0707
if debug or env.debug:
app.debug = True
- show_app_info(app)
+ # invoke uvicorn in a subprocess to be able to use the --reload flag. see
+ # https://github.com/litestar-org/litestar/issues/1191 and https://github.com/encode/uvicorn/issues/1045
- console.rule("[yellow]Starting server process", align="left")
+ process_args = {
+ "reload": env.reload or reload,
+ "host": env.host or host,
+ "port": env.port or port,
+ "factory": env.is_app_factory,
+ }
- uvicorn.run(
- env.app_path,
- reload=env.reload or reload,
- host=env.host or host,
- port=env.port or port,
- factory=env.is_app_factory,
- )
+ subprocess.run(["uvicorn", env.app_path, *_convert_uvicorn_args(process_args)], check=True)
@command(name="routes")
| {"golden_diff": "diff --git a/starlite/cli/commands/core.py b/starlite/cli/commands/core.py\n--- a/starlite/cli/commands/core.py\n+++ b/starlite/cli/commands/core.py\n@@ -1,4 +1,6 @@\n import inspect\n+import subprocess\n+from typing import Any, Dict, List\n \n from click import command, option\n from rich.tree import Tree\n@@ -8,6 +10,18 @@\n from starlite.utils.helpers import unwrap_partial\n \n \n+def _convert_uvicorn_args(args: Dict[str, Any]) -> List[str]:\n+ process_args = []\n+ for arg, value in args.items():\n+ if isinstance(value, bool):\n+ if value:\n+ process_args.append(f\"--{arg}\")\n+ else:\n+ process_args.append(f\"--{arg}={value}\")\n+\n+ return process_args\n+\n+\n @command(name=\"info\")\n def info_command(app: Starlite) -> None:\n \"\"\"Show information about the detected Starlite app.\"\"\"\n@@ -38,24 +52,24 @@\n \"\"\"\n \n try:\n- import uvicorn\n+ import uvicorn # noqa: F401\n except ImportError:\n raise StarliteCLIException(\"Uvicorn needs to be installed to run an app\") # pylint: disable=W0707\n \n if debug or env.debug:\n app.debug = True\n \n- show_app_info(app)\n+ # invoke uvicorn in a subprocess to be able to use the --reload flag. see\n+ # https://github.com/litestar-org/litestar/issues/1191 and https://github.com/encode/uvicorn/issues/1045\n \n- console.rule(\"[yellow]Starting server process\", align=\"left\")\n+ process_args = {\n+ \"reload\": env.reload or reload,\n+ \"host\": env.host or host,\n+ \"port\": env.port or port,\n+ \"factory\": env.is_app_factory,\n+ }\n \n- uvicorn.run(\n- env.app_path,\n- reload=env.reload or reload,\n- host=env.host or host,\n- port=env.port or port,\n- factory=env.is_app_factory,\n- )\n+ subprocess.run([\"uvicorn\", env.app_path, *_convert_uvicorn_args(process_args)], check=True)\n \n \n @command(name=\"routes\")\n", "issue": "StaticFilesConfig and virtual directories\nI'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem. \r\n\r\nThis is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.\r\n\r\nhttps://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32\n", "before_files": [{"content": "import inspect\n\nfrom click import command, option\nfrom rich.tree import Tree\n\nfrom starlite import HTTPRoute, Starlite, WebSocketRoute\nfrom starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info\nfrom starlite.utils.helpers import unwrap_partial\n\n\n@command(name=\"info\")\ndef info_command(app: Starlite) -> None:\n \"\"\"Show information about the detected Starlite app.\"\"\"\n\n show_app_info(app)\n\n\n@command(name=\"run\")\n@option(\"-r\", \"--reload\", help=\"Reload server on changes\", default=False, is_flag=True)\n@option(\"-p\", \"--port\", help=\"Serve under this port\", type=int, default=8000, show_default=True)\n@option(\"--host\", help=\"Server under this host\", default=\"127.0.0.1\", show_default=True)\n@option(\"--debug\", help=\"Run app in debug mode\", is_flag=True)\ndef run_command(\n reload: bool,\n port: int,\n host: str,\n debug: bool,\n env: StarliteEnv,\n app: Starlite,\n) -> None:\n \"\"\"Run a Starlite app.\n\n The app can be either passed as a module path in the form of <module name>.<submodule>:<app instance or factory>,\n set as an environment variable STARLITE_APP with the same format or automatically discovered from one of these\n canonical paths: app.py, asgi.py, application.py or app/__init__.py. When autodiscovering application factories,\n functions with the name ``create_app`` are considered, or functions that are annotated as returning a ``Starlite``\n instance.\n \"\"\"\n\n try:\n import uvicorn\n except ImportError:\n raise StarliteCLIException(\"Uvicorn needs to be installed to run an app\") # pylint: disable=W0707\n\n if debug or env.debug:\n app.debug = True\n\n show_app_info(app)\n\n console.rule(\"[yellow]Starting server process\", align=\"left\")\n\n uvicorn.run(\n env.app_path,\n reload=env.reload or reload,\n host=env.host or host,\n port=env.port or port,\n factory=env.is_app_factory,\n )\n\n\n@command(name=\"routes\")\ndef routes_command(app: Starlite) -> None: # pragma: no cover\n \"\"\"Display information about the application's routes.\"\"\"\n\n tree = Tree(\"\", hide_root=True)\n\n for route in sorted(app.routes, key=lambda r: r.path):\n if isinstance(route, HTTPRoute):\n branch = tree.add(f\"[green]{route.path}[/green] (HTTP)\")\n for handler in route.route_handlers:\n handler_info = [\n f\"[blue]{handler.name or handler.handler_name}[/blue]\",\n ]\n\n if inspect.iscoroutinefunction(unwrap_partial(handler.fn.value)):\n handler_info.append(\"[magenta]async[/magenta]\")\n else:\n handler_info.append(\"[yellow]sync[/yellow]\")\n\n handler_info.append(f'[cyan]{\", \".join(sorted(handler.http_methods))}[/cyan]')\n\n if len(handler.paths) > 1:\n for path in handler.paths:\n branch.add(\" \".join([f\"[green]{path}[green]\", *handler_info]))\n else:\n branch.add(\" \".join(handler_info))\n\n else:\n if isinstance(route, WebSocketRoute):\n route_type = \"WS\"\n else:\n route_type = \"ASGI\"\n branch = tree.add(f\"[green]{route.path}[/green] ({route_type})\")\n branch.add(f\"[blue]{route.route_handler.name or route.route_handler.handler_name}[/blue]\")\n\n console.print(tree)\n", "path": "starlite/cli/commands/core.py"}], "after_files": [{"content": "import inspect\nimport subprocess\nfrom typing import Any, Dict, List\n\nfrom click import command, option\nfrom rich.tree import Tree\n\nfrom starlite import HTTPRoute, Starlite, WebSocketRoute\nfrom starlite.cli.utils import StarliteCLIException, StarliteEnv, console, show_app_info\nfrom starlite.utils.helpers import unwrap_partial\n\n\ndef _convert_uvicorn_args(args: Dict[str, Any]) -> List[str]:\n process_args = []\n for arg, value in args.items():\n if isinstance(value, bool):\n if value:\n process_args.append(f\"--{arg}\")\n else:\n process_args.append(f\"--{arg}={value}\")\n\n return process_args\n\n\n@command(name=\"info\")\ndef info_command(app: Starlite) -> None:\n \"\"\"Show information about the detected Starlite app.\"\"\"\n\n show_app_info(app)\n\n\n@command(name=\"run\")\n@option(\"-r\", \"--reload\", help=\"Reload server on changes\", default=False, is_flag=True)\n@option(\"-p\", \"--port\", help=\"Serve under this port\", type=int, default=8000, show_default=True)\n@option(\"--host\", help=\"Server under this host\", default=\"127.0.0.1\", show_default=True)\n@option(\"--debug\", help=\"Run app in debug mode\", is_flag=True)\ndef run_command(\n reload: bool,\n port: int,\n host: str,\n debug: bool,\n env: StarliteEnv,\n app: Starlite,\n) -> None:\n \"\"\"Run a Starlite app.\n\n The app can be either passed as a module path in the form of <module name>.<submodule>:<app instance or factory>,\n set as an environment variable STARLITE_APP with the same format or automatically discovered from one of these\n canonical paths: app.py, asgi.py, application.py or app/__init__.py. When autodiscovering application factories,\n functions with the name ``create_app`` are considered, or functions that are annotated as returning a ``Starlite``\n instance.\n \"\"\"\n\n try:\n import uvicorn # noqa: F401\n except ImportError:\n raise StarliteCLIException(\"Uvicorn needs to be installed to run an app\") # pylint: disable=W0707\n\n if debug or env.debug:\n app.debug = True\n\n # invoke uvicorn in a subprocess to be able to use the --reload flag. see\n # https://github.com/litestar-org/litestar/issues/1191 and https://github.com/encode/uvicorn/issues/1045\n\n process_args = {\n \"reload\": env.reload or reload,\n \"host\": env.host or host,\n \"port\": env.port or port,\n \"factory\": env.is_app_factory,\n }\n\n subprocess.run([\"uvicorn\", env.app_path, *_convert_uvicorn_args(process_args)], check=True)\n\n\n@command(name=\"routes\")\ndef routes_command(app: Starlite) -> None: # pragma: no cover\n \"\"\"Display information about the application's routes.\"\"\"\n\n tree = Tree(\"\", hide_root=True)\n\n for route in sorted(app.routes, key=lambda r: r.path):\n if isinstance(route, HTTPRoute):\n branch = tree.add(f\"[green]{route.path}[/green] (HTTP)\")\n for handler in route.route_handlers:\n handler_info = [\n f\"[blue]{handler.name or handler.handler_name}[/blue]\",\n ]\n\n if inspect.iscoroutinefunction(unwrap_partial(handler.fn.value)):\n handler_info.append(\"[magenta]async[/magenta]\")\n else:\n handler_info.append(\"[yellow]sync[/yellow]\")\n\n handler_info.append(f'[cyan]{\", \".join(sorted(handler.http_methods))}[/cyan]')\n\n if len(handler.paths) > 1:\n for path in handler.paths:\n branch.add(\" \".join([f\"[green]{path}[green]\", *handler_info]))\n else:\n branch.add(\" \".join(handler_info))\n\n else:\n if isinstance(route, WebSocketRoute):\n route_type = \"WS\"\n else:\n route_type = \"ASGI\"\n branch = tree.add(f\"[green]{route.path}[/green] ({route_type})\")\n branch.add(f\"[blue]{route.route_handler.name or route.route_handler.handler_name}[/blue]\")\n\n console.print(tree)\n", "path": "starlite/cli/commands/core.py"}]} | 1,404 | 513 |
gh_patches_debug_33762 | rasdani/github-patches | git_diff | sunpy__sunpy-3818 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reduce import time for `sunpy.image.transform`
(I was getting annoyed by our import times, and was inspired by astropy/astropy#4598 to look deeper.)
This one's easy. Importing `sunpy.image.transform` takes 1.7 seconds on my machine (see below). 0.7 seconds is spent importing `skimage.transform`. We should defer that import to run-time.
(see #3445 for dealing with `pkg_resources`)
```
python -X importtime -c "import sunpy.image.transform" 2> transform.log
tuna transform.log
```

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/image/transform.py`
Content:
```
1 """
2 Functions for geometrical image transformation and warping.
3 """
4 import warnings
5
6 import numpy as np
7 import scipy.ndimage.interpolation
8
9 from sunpy.util.exceptions import SunpyUserWarning
10
11 try:
12 import skimage.transform
13 scikit_image_not_found = False
14 except ImportError:
15 warnings.warn("scikit-image could not be imported. Image rotation will use scipy",
16 ImportWarning)
17 scikit_image_not_found = True
18
19
20 __all__ = ['affine_transform']
21
22
23 def affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,
24 recenter=False, missing=0.0, use_scipy=False):
25 """
26 Rotates, shifts and scales an image.
27
28 Will use `skimage.transform.warp` unless scikit-image can't be imported
29 then it will use`scipy.ndimage.interpolation.affine_transform`.
30
31 Parameters
32 ----------
33 image : `numpy.ndarray`
34 2D image to be rotated.
35 rmatrix : `numpy.ndarray` that is 2x2
36 Linear transformation rotation matrix.
37 order : `int` 0-5, optional
38 Interpolation order to be used, defaults to 3. When using scikit-image this parameter
39 is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).
40 When using scipy it is passed into
41 `scipy.ndimage.interpolation.affine_transform` where it controls the order of the spline.
42 scale : `float`
43 A scale factor for the image with the default being no scaling.
44 image_center : tuple, optional
45 The point in the image to rotate around (axis of rotation).
46 Defaults to the center of the array.
47 recenter : `bool` or array-like, optional
48 Move the axis of rotation to the center of the array or recenter coords.
49 Defaults to `True` i.e., recenter to the center of the array.
50 missing : `float`, optional
51 The value to replace any missing data after the transformation.
52 use_scipy : `bool`, optional
53 Force use of `scipy.ndimage.interpolation.affine_transform`.
54 Will set all "NaNs" in image to zero before doing the transform.
55 Defaults to `False`, unless scikit-image can't be imported.
56
57 Returns
58 -------
59 `numpy.ndarray`:
60 New rotated, scaled and translated image.
61
62 Notes
63 -----
64 This algorithm uses an affine transformation as opposed to a polynomial
65 geometrical transformation, which by default is `skimage.transform.warp`.
66 One can specify using `scipy.ndimage.interpolation.affine_transform` as
67 an alternative affine transformation. The two transformations use different
68 algorithms and thus do not give identical output.
69
70 When using for `skimage.transform.warp` with order >= 4 or using
71 `scipy.ndimage.interpolation.affine_transform` at all, "NaN" values will
72 replaced with zero prior to rotation. No attempt is made to retain the NaN
73 values.
74
75 Input arrays with integer data are cast to float 64 and can be re-cast using
76 `numpy.ndarray.astype` if desired.
77
78 Although this function is analogous to the IDL's ``rot`` function, it does not
79 use the same algorithm as the IDL ``rot`` function.
80 IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__
81 method to calculate the inverse mapping of original to target pixel
82 coordinates. This is a polynomial geometrical transformation.
83 Then optionally it uses a bicubic convolution interpolation
84 algorithm to map the original to target pixel values.
85 """
86 rmatrix = rmatrix / scale
87 array_center = (np.array(image.shape)[::-1]-1)/2.0
88
89 # Make sure the image center is an array and is where it's supposed to be
90 if image_center is not None:
91 image_center = np.asanyarray(image_center)
92 else:
93 image_center = array_center
94
95 # Determine center of rotation based on use (or not) of the recenter keyword
96 if recenter:
97 rot_center = array_center
98 else:
99 rot_center = image_center
100
101 displacement = np.dot(rmatrix, rot_center)
102 shift = image_center - displacement
103
104 if use_scipy or scikit_image_not_found:
105 if np.any(np.isnan(image)):
106 warnings.warn("Setting NaNs to 0 for SciPy rotation.", SunpyUserWarning)
107 # Transform the image using the scipy affine transform
108 rotated_image = scipy.ndimage.interpolation.affine_transform(
109 np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
110 mode='constant', cval=missing).T
111 else:
112 # Make the rotation matrix 3x3 to include translation of the image
113 skmatrix = np.zeros((3, 3))
114 skmatrix[:2, :2] = rmatrix
115 skmatrix[2, 2] = 1.0
116 skmatrix[:2, 2] = shift
117 tform = skimage.transform.AffineTransform(skmatrix)
118
119 # Transform the image using the skimage function
120 if not np.issubdtype(image.dtype, np.float64):
121 warnings.warn("Input data has been cast to float64.", SunpyUserWarning)
122 adjusted_image = image.astype(np.float64)
123 else:
124 adjusted_image = image.copy()
125 if np.any(np.isnan(adjusted_image)) and order >= 4:
126 warnings.warn("Setting NaNs to 0 for higher-order scikit-image rotation.", SunpyUserWarning)
127 adjusted_image = np.nan_to_num(adjusted_image)
128
129 rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,
130 mode='constant', cval=missing)
131
132 return rotated_image
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sunpy/image/transform.py b/sunpy/image/transform.py
--- a/sunpy/image/transform.py
+++ b/sunpy/image/transform.py
@@ -8,15 +8,6 @@
from sunpy.util.exceptions import SunpyUserWarning
-try:
- import skimage.transform
- scikit_image_not_found = False
-except ImportError:
- warnings.warn("scikit-image could not be imported. Image rotation will use scipy",
- ImportWarning)
- scikit_image_not_found = True
-
-
__all__ = ['affine_transform']
@@ -84,7 +75,7 @@
algorithm to map the original to target pixel values.
"""
rmatrix = rmatrix / scale
- array_center = (np.array(image.shape)[::-1]-1)/2.0
+ array_center = (np.array(image.shape)[::-1] - 1) / 2.0
# Make sure the image center is an array and is where it's supposed to be
if image_center is not None:
@@ -100,14 +91,20 @@
displacement = np.dot(rmatrix, rot_center)
shift = image_center - displacement
-
- if use_scipy or scikit_image_not_found:
+ if not use_scipy:
+ try:
+ import skimage.transform
+ except ImportError:
+ warnings.warn("scikit-image could not be imported. Image rotation will use scipy",
+ ImportWarning)
+ use_scipy = True
+ if use_scipy:
if np.any(np.isnan(image)):
warnings.warn("Setting NaNs to 0 for SciPy rotation.", SunpyUserWarning)
# Transform the image using the scipy affine transform
rotated_image = scipy.ndimage.interpolation.affine_transform(
- np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
- mode='constant', cval=missing).T
+ np.nan_to_num(image).T, rmatrix, offset=shift, order=order,
+ mode='constant', cval=missing).T
else:
# Make the rotation matrix 3x3 to include translation of the image
skmatrix = np.zeros((3, 3))
| {"golden_diff": "diff --git a/sunpy/image/transform.py b/sunpy/image/transform.py\n--- a/sunpy/image/transform.py\n+++ b/sunpy/image/transform.py\n@@ -8,15 +8,6 @@\n \n from sunpy.util.exceptions import SunpyUserWarning\n \n-try:\n- import skimage.transform\n- scikit_image_not_found = False\n-except ImportError:\n- warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n- ImportWarning)\n- scikit_image_not_found = True\n-\n-\n __all__ = ['affine_transform']\n \n \n@@ -84,7 +75,7 @@\n algorithm to map the original to target pixel values.\n \"\"\"\n rmatrix = rmatrix / scale\n- array_center = (np.array(image.shape)[::-1]-1)/2.0\n+ array_center = (np.array(image.shape)[::-1] - 1) / 2.0\n \n # Make sure the image center is an array and is where it's supposed to be\n if image_center is not None:\n@@ -100,14 +91,20 @@\n \n displacement = np.dot(rmatrix, rot_center)\n shift = image_center - displacement\n-\n- if use_scipy or scikit_image_not_found:\n+ if not use_scipy:\n+ try:\n+ import skimage.transform\n+ except ImportError:\n+ warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n+ ImportWarning)\n+ use_scipy = True\n+ if use_scipy:\n if np.any(np.isnan(image)):\n warnings.warn(\"Setting NaNs to 0 for SciPy rotation.\", SunpyUserWarning)\n # Transform the image using the scipy affine transform\n rotated_image = scipy.ndimage.interpolation.affine_transform(\n- np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n- mode='constant', cval=missing).T\n+ np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n+ mode='constant', cval=missing).T\n else:\n # Make the rotation matrix 3x3 to include translation of the image\n skmatrix = np.zeros((3, 3))\n", "issue": "Reduce import time for `sunpy.image.transform`\n(I was getting annoyed by our import times, and was inspired by astropy/astropy#4598 to look deeper.)\r\n\r\nThis one's easy. Importing `sunpy.image.transform` takes 1.7 seconds on my machine (see below). 0.7 seconds is spent importing `skimage.transform`. We should defer that import to run-time.\r\n\r\n(see #3445 for dealing with `pkg_resources`)\r\n\r\n```\r\npython -X importtime -c \"import sunpy.image.transform\" 2> transform.log\r\ntuna transform.log\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nFunctions for geometrical image transformation and warping.\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport scipy.ndimage.interpolation\n\nfrom sunpy.util.exceptions import SunpyUserWarning\n\ntry:\n import skimage.transform\n scikit_image_not_found = False\nexcept ImportError:\n warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n ImportWarning)\n scikit_image_not_found = True\n\n\n__all__ = ['affine_transform']\n\n\ndef affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,\n recenter=False, missing=0.0, use_scipy=False):\n \"\"\"\n Rotates, shifts and scales an image.\n\n Will use `skimage.transform.warp` unless scikit-image can't be imported\n then it will use`scipy.ndimage.interpolation.affine_transform`.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n 2D image to be rotated.\n rmatrix : `numpy.ndarray` that is 2x2\n Linear transformation rotation matrix.\n order : `int` 0-5, optional\n Interpolation order to be used, defaults to 3. When using scikit-image this parameter\n is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).\n When using scipy it is passed into\n `scipy.ndimage.interpolation.affine_transform` where it controls the order of the spline.\n scale : `float`\n A scale factor for the image with the default being no scaling.\n image_center : tuple, optional\n The point in the image to rotate around (axis of rotation).\n Defaults to the center of the array.\n recenter : `bool` or array-like, optional\n Move the axis of rotation to the center of the array or recenter coords.\n Defaults to `True` i.e., recenter to the center of the array.\n missing : `float`, optional\n The value to replace any missing data after the transformation.\n use_scipy : `bool`, optional\n Force use of `scipy.ndimage.interpolation.affine_transform`.\n Will set all \"NaNs\" in image to zero before doing the transform.\n Defaults to `False`, unless scikit-image can't be imported.\n\n Returns\n -------\n `numpy.ndarray`:\n New rotated, scaled and translated image.\n\n Notes\n -----\n This algorithm uses an affine transformation as opposed to a polynomial\n geometrical transformation, which by default is `skimage.transform.warp`.\n One can specify using `scipy.ndimage.interpolation.affine_transform` as\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n\n When using for `skimage.transform.warp` with order >= 4 or using\n `scipy.ndimage.interpolation.affine_transform` at all, \"NaN\" values will\n replaced with zero prior to rotation. No attempt is made to retain the NaN\n values.\n\n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n method to calculate the inverse mapping of original to target pixel\n coordinates. This is a polynomial geometrical transformation.\n Then optionally it uses a bicubic convolution interpolation\n algorithm to map the original to target pixel values.\n \"\"\"\n rmatrix = rmatrix / scale\n array_center = (np.array(image.shape)[::-1]-1)/2.0\n\n # Make sure the image center is an array and is where it's supposed to be\n if image_center is not None:\n image_center = np.asanyarray(image_center)\n else:\n image_center = array_center\n\n # Determine center of rotation based on use (or not) of the recenter keyword\n if recenter:\n rot_center = array_center\n else:\n rot_center = image_center\n\n displacement = np.dot(rmatrix, rot_center)\n shift = image_center - displacement\n\n if use_scipy or scikit_image_not_found:\n if np.any(np.isnan(image)):\n warnings.warn(\"Setting NaNs to 0 for SciPy rotation.\", SunpyUserWarning)\n # Transform the image using the scipy affine transform\n rotated_image = scipy.ndimage.interpolation.affine_transform(\n np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n mode='constant', cval=missing).T\n else:\n # Make the rotation matrix 3x3 to include translation of the image\n skmatrix = np.zeros((3, 3))\n skmatrix[:2, :2] = rmatrix\n skmatrix[2, 2] = 1.0\n skmatrix[:2, 2] = shift\n tform = skimage.transform.AffineTransform(skmatrix)\n\n # Transform the image using the skimage function\n if not np.issubdtype(image.dtype, np.float64):\n warnings.warn(\"Input data has been cast to float64.\", SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n adjusted_image = image.copy()\n if np.any(np.isnan(adjusted_image)) and order >= 4:\n warnings.warn(\"Setting NaNs to 0 for higher-order scikit-image rotation.\", SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n mode='constant', cval=missing)\n\n return rotated_image\n", "path": "sunpy/image/transform.py"}], "after_files": [{"content": "\"\"\"\nFunctions for geometrical image transformation and warping.\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport scipy.ndimage.interpolation\n\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['affine_transform']\n\n\ndef affine_transform(image, rmatrix, order=3, scale=1.0, image_center=None,\n recenter=False, missing=0.0, use_scipy=False):\n \"\"\"\n Rotates, shifts and scales an image.\n\n Will use `skimage.transform.warp` unless scikit-image can't be imported\n then it will use`scipy.ndimage.interpolation.affine_transform`.\n\n Parameters\n ----------\n image : `numpy.ndarray`\n 2D image to be rotated.\n rmatrix : `numpy.ndarray` that is 2x2\n Linear transformation rotation matrix.\n order : `int` 0-5, optional\n Interpolation order to be used, defaults to 3. When using scikit-image this parameter\n is passed into `skimage.transform.warp` (e.g., 3 corresponds to bi-cubic interpolation).\n When using scipy it is passed into\n `scipy.ndimage.interpolation.affine_transform` where it controls the order of the spline.\n scale : `float`\n A scale factor for the image with the default being no scaling.\n image_center : tuple, optional\n The point in the image to rotate around (axis of rotation).\n Defaults to the center of the array.\n recenter : `bool` or array-like, optional\n Move the axis of rotation to the center of the array or recenter coords.\n Defaults to `True` i.e., recenter to the center of the array.\n missing : `float`, optional\n The value to replace any missing data after the transformation.\n use_scipy : `bool`, optional\n Force use of `scipy.ndimage.interpolation.affine_transform`.\n Will set all \"NaNs\" in image to zero before doing the transform.\n Defaults to `False`, unless scikit-image can't be imported.\n\n Returns\n -------\n `numpy.ndarray`:\n New rotated, scaled and translated image.\n\n Notes\n -----\n This algorithm uses an affine transformation as opposed to a polynomial\n geometrical transformation, which by default is `skimage.transform.warp`.\n One can specify using `scipy.ndimage.interpolation.affine_transform` as\n an alternative affine transformation. The two transformations use different\n algorithms and thus do not give identical output.\n\n When using for `skimage.transform.warp` with order >= 4 or using\n `scipy.ndimage.interpolation.affine_transform` at all, \"NaN\" values will\n replaced with zero prior to rotation. No attempt is made to retain the NaN\n values.\n\n Input arrays with integer data are cast to float 64 and can be re-cast using\n `numpy.ndarray.astype` if desired.\n\n Although this function is analogous to the IDL's ``rot`` function, it does not\n use the same algorithm as the IDL ``rot`` function.\n IDL's ``rot`` calls the `POLY_2D <https://www.harrisgeospatial.com/docs/poly_2d.html>`__\n method to calculate the inverse mapping of original to target pixel\n coordinates. This is a polynomial geometrical transformation.\n Then optionally it uses a bicubic convolution interpolation\n algorithm to map the original to target pixel values.\n \"\"\"\n rmatrix = rmatrix / scale\n array_center = (np.array(image.shape)[::-1] - 1) / 2.0\n\n # Make sure the image center is an array and is where it's supposed to be\n if image_center is not None:\n image_center = np.asanyarray(image_center)\n else:\n image_center = array_center\n\n # Determine center of rotation based on use (or not) of the recenter keyword\n if recenter:\n rot_center = array_center\n else:\n rot_center = image_center\n\n displacement = np.dot(rmatrix, rot_center)\n shift = image_center - displacement\n if not use_scipy:\n try:\n import skimage.transform\n except ImportError:\n warnings.warn(\"scikit-image could not be imported. Image rotation will use scipy\",\n ImportWarning)\n use_scipy = True\n if use_scipy:\n if np.any(np.isnan(image)):\n warnings.warn(\"Setting NaNs to 0 for SciPy rotation.\", SunpyUserWarning)\n # Transform the image using the scipy affine transform\n rotated_image = scipy.ndimage.interpolation.affine_transform(\n np.nan_to_num(image).T, rmatrix, offset=shift, order=order,\n mode='constant', cval=missing).T\n else:\n # Make the rotation matrix 3x3 to include translation of the image\n skmatrix = np.zeros((3, 3))\n skmatrix[:2, :2] = rmatrix\n skmatrix[2, 2] = 1.0\n skmatrix[:2, 2] = shift\n tform = skimage.transform.AffineTransform(skmatrix)\n\n # Transform the image using the skimage function\n if not np.issubdtype(image.dtype, np.float64):\n warnings.warn(\"Input data has been cast to float64.\", SunpyUserWarning)\n adjusted_image = image.astype(np.float64)\n else:\n adjusted_image = image.copy()\n if np.any(np.isnan(adjusted_image)) and order >= 4:\n warnings.warn(\"Setting NaNs to 0 for higher-order scikit-image rotation.\", SunpyUserWarning)\n adjusted_image = np.nan_to_num(adjusted_image)\n\n rotated_image = skimage.transform.warp(adjusted_image, tform, order=order,\n mode='constant', cval=missing)\n\n return rotated_image\n", "path": "sunpy/image/transform.py"}]} | 2,028 | 501 |
gh_patches_debug_67477 | rasdani/github-patches | git_diff | scverse__scanpy-721 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Give `external` higher billing in the docs?
At the moment external modules are kind of hidden in the docs. I think it'd be worth making them more visible (at least on the same page as everything else). I've been giving this a shot, but have hit the limit of my sphinx/ rst abilities.
Two ideas for how they could be more discoverable:
* They get their own heading under `api`
* They're mixed in with everything else (so everything stays organized by topic), but their names are prepended with `sce` while scanpy functions are prepended with `sc`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scanpy/external/__init__.py`
Content:
```
1 from . import tl
2 from . import pl
3 from . import pp
4
5 from .. import _exporting as exporting
6
7 import sys
8 from .. import utils
9 utils.annotate_doc_types(sys.modules[__name__], 'scanpy')
10 del sys, utils
11
12
13 __doc__ = """\
14 External API
15 ============
16
17
18 Import Scanpy's wrappers to external tools as::
19
20 import scanpy.external as sce
21
22 Preprocessing: PP
23 ------------------
24
25 Batch effect correction
26 ~~~~~~~~~~~~~~~~~~~~~~~
27
28 .. autosummary::
29 :toctree: .
30
31 pp.bbknn
32 pp.mnn_correct
33
34 Imputation
35 ~~~~~~~~~~
36
37 Note that the fundamental limitations of imputation are still under `debate
38 <https://github.com/theislab/scanpy/issues/189>`__.
39
40 .. autosummary::
41 :toctree: .
42
43 pp.dca
44 pp.magic
45
46
47 Tools: TL
48 ----------
49
50 Embeddings
51 ~~~~~~~~~~
52
53 .. autosummary::
54 :toctree: .
55
56 tl.phate
57 tl.palantir
58
59 Clustering and trajectory inference
60 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
61
62 .. autosummary::
63 :toctree: .
64
65 tl.phenograph
66
67 Gene scores, Cell cycle
68 ~~~~~~~~~~~~~~~~~~~~~~~
69
70 .. autosummary::
71 :toctree: .
72
73 tl.sandbag
74 tl.cyclone
75
76
77 Plotting: PL
78 ------------
79
80 .. autosummary::
81 :toctree: .
82
83 pl.phate
84 tl.palantir
85
86
87 Exporting
88 ---------
89
90 .. autosummary::
91 :toctree: .
92
93 exporting.spring_project
94 exporting.cellbrowser
95 """
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py
--- a/scanpy/external/__init__.py
+++ b/scanpy/external/__init__.py
@@ -19,6 +19,8 @@
import scanpy.external as sce
+If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!
+
Preprocessing: PP
------------------
| {"golden_diff": "diff --git a/scanpy/external/__init__.py b/scanpy/external/__init__.py\n--- a/scanpy/external/__init__.py\n+++ b/scanpy/external/__init__.py\n@@ -19,6 +19,8 @@\n \n import scanpy.external as sce\n \n+If you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!\n+\n Preprocessing: PP\n ------------------\n", "issue": "Give `external` higher billing in the docs?\nAt the moment external modules are kind of hidden in the docs. I think it'd be worth making them more visible (at least on the same page as everything else). I've been giving this a shot, but have hit the limit of my sphinx/ rst abilities.\r\n\r\nTwo ideas for how they could be more discoverable:\r\n\r\n* They get their own heading under `api`\r\n* They're mixed in with everything else (so everything stays organized by topic), but their names are prepended with `sce` while scanpy functions are prepended with `sc`.\n", "before_files": [{"content": "from . import tl\nfrom . import pl\nfrom . import pp\n\nfrom .. import _exporting as exporting\n\nimport sys\nfrom .. import utils\nutils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nPreprocessing: PP\n------------------\n\nBatch effect correction\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.mnn_correct\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n tl.palantir\n\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py"}], "after_files": [{"content": "from . import tl\nfrom . import pl\nfrom . import pp\n\nfrom .. import _exporting as exporting\n\nimport sys\nfrom .. import utils\nutils.annotate_doc_types(sys.modules[__name__], 'scanpy')\ndel sys, utils\n\n\n__doc__ = \"\"\"\\\nExternal API\n============\n\n\nImport Scanpy's wrappers to external tools as::\n\n import scanpy.external as sce\n\nIf you'd like to see your tool included here, please open a `pull request <https://github.com/theislab/scanpy>`_!\n\nPreprocessing: PP\n------------------\n\nBatch effect correction\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n pp.bbknn\n pp.mnn_correct\n\nImputation\n~~~~~~~~~~\n\nNote that the fundamental limitations of imputation are still under `debate\n<https://github.com/theislab/scanpy/issues/189>`__.\n\n.. autosummary::\n :toctree: .\n\n pp.dca\n pp.magic\n\n\nTools: TL\n----------\n\nEmbeddings\n~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phate\n tl.palantir\n\nClustering and trajectory inference\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.phenograph\n\nGene scores, Cell cycle\n~~~~~~~~~~~~~~~~~~~~~~~\n\n.. autosummary::\n :toctree: .\n\n tl.sandbag\n tl.cyclone\n\n\nPlotting: PL\n------------\n\n.. autosummary::\n :toctree: .\n\n pl.phate\n tl.palantir\n\n\nExporting\n---------\n\n.. autosummary::\n :toctree: .\n\n exporting.spring_project\n exporting.cellbrowser\n\"\"\"\n", "path": "scanpy/external/__init__.py"}]} | 941 | 109 |
gh_patches_debug_4507 | rasdani/github-patches | git_diff | gratipay__gratipay.com-1875 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Linking accounts with an OpenStreetMap account does not work if confirmation is required.
Log in with OpenStreetMap account and log out.
Log in with GitHub account and link it with the previous OpenStreetMap account.
Before #1857 works, but not after commit f963d20321e368de89f892b33ea4bce829ebc59d
```
Internal server error, program!
Traceback (most recent call last):
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/algorithm.py", line 288, in run
new_state = function(**deps.as_kwargs)
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/algorithms/website.py", line 88, in get_response_for_resource
return {'response': resource.respond(request)}
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py", line 52, in respond
exec self.pages[1] in context
File "/home/sim6/www.gittip.com/www/on/openstreetmap/associate.spt", line 97, in
raise request.resource.respond(request)
File "/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py", line 52, in respond
exec self.pages[1] in context
File "/home/sim6/www.gittip.com/www/on/confirm.html.spt", line 45, in
username = account.get_user_name()
AttributeError: 'OpenStreetMapAccount' object has no attribute 'get_user_name'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gittip/elsewhere/openstreetmap.py`
Content:
```
1 import logging
2
3 import gittip
4 import requests
5 from aspen import json, log, Response
6 from aspen.http.request import PathPart
7 from aspen.utils import typecheck
8 from gittip.elsewhere import AccountElsewhere
9
10
11
12 class OpenStreetMapAccount(AccountElsewhere):
13 platform = u'openstreetmap'
14
15 def get_url(self):
16 return self.user_info['html_url']
17
18
19 def oauth_url(website, action, then=""):
20 """Return a URL to start oauth dancing with OpenStreetMap.
21
22 For GitHub we can pass action and then through a querystring. For OpenStreetMap
23 we can't, so we send people through a local URL first where we stash this
24 info in an in-memory cache (eep! needs refactoring to scale).
25
26 Not sure why website is here. Vestige from GitHub forebear?
27
28 """
29 then = then.encode('base64').strip()
30 return "/on/openstreetmap/redirect?action=%s&then=%s" % (action, then)
31
32
33 def get_user_info(db, username, osm_api_url):
34 """Get the given user's information from the DB or failing that, openstreetmap.
35
36 :param username:
37 A unicode string representing a username in OpenStreetMap.
38
39 :param osm_api_url:
40 URL of OpenStreetMap API.
41
42 :returns:
43 A dictionary containing OpenStreetMap specific information for the user.
44 """
45 typecheck(username, (unicode, PathPart))
46 rec = db.one("""
47 SELECT user_info FROM elsewhere
48 WHERE platform='openstreetmap'
49 AND user_info->'username' = %s
50 """, (username,))
51 if rec is not None:
52 user_info = rec
53 else:
54 osm_user = requests.get("%s/user/%s" % (osm_api_url, username))
55 if osm_user.status_code == 200:
56 log("User %s found in OpenStreetMap but not in gittip." % username)
57 user_info = None
58 elif osm_user.status_code == 404:
59 raise Response(404,
60 "OpenStreetMap identity '{0}' not found.".format(username))
61 else:
62 log("OpenStreetMap api responded with {0}: {1}".format(status, content),
63 level=logging.WARNING)
64 raise Response(502, "OpenStreetMap lookup failed with %d." % status)
65
66 return user_info
67
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gittip/elsewhere/openstreetmap.py b/gittip/elsewhere/openstreetmap.py
--- a/gittip/elsewhere/openstreetmap.py
+++ b/gittip/elsewhere/openstreetmap.py
@@ -15,6 +15,12 @@
def get_url(self):
return self.user_info['html_url']
+ def get_user_name(self):
+ return self.user_info['username']
+
+ def get_platform_icon(self):
+ return "/assets/icons/openstreetmap.12.png"
+
def oauth_url(website, action, then=""):
"""Return a URL to start oauth dancing with OpenStreetMap.
| {"golden_diff": "diff --git a/gittip/elsewhere/openstreetmap.py b/gittip/elsewhere/openstreetmap.py\n--- a/gittip/elsewhere/openstreetmap.py\n+++ b/gittip/elsewhere/openstreetmap.py\n@@ -15,6 +15,12 @@\n def get_url(self):\n return self.user_info['html_url']\n \n+ def get_user_name(self):\n+ return self.user_info['username']\n+\n+ def get_platform_icon(self):\n+ return \"/assets/icons/openstreetmap.12.png\"\n+\n \n def oauth_url(website, action, then=\"\"):\n \"\"\"Return a URL to start oauth dancing with OpenStreetMap.\n", "issue": "Linking accounts with an OpenStreetMap account does not work if confirmation is required.\nLog in with OpenStreetMap account and log out.\nLog in with GitHub account and link it with the previous OpenStreetMap account.\n\nBefore #1857 works, but not after commit f963d20321e368de89f892b33ea4bce829ebc59d\n\n```\nInternal server error, program!\n\nTraceback (most recent call last):\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/algorithm.py\", line 288, in run\n new_state = function(**deps.as_kwargs)\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/algorithms/website.py\", line 88, in get_response_for_resource\n return {'response': resource.respond(request)}\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py\", line 52, in respond\n exec self.pages[1] in context\n File \"/home/sim6/www.gittip.com/www/on/openstreetmap/associate.spt\", line 97, in \n raise request.resource.respond(request)\n File \"/home/sim6/www.gittip.com/env/local/lib/python2.7/site-packages/aspen/resources/dynamic_resource.py\", line 52, in respond\n exec self.pages[1] in context\n File \"/home/sim6/www.gittip.com/www/on/confirm.html.spt\", line 45, in \n username = account.get_user_name()\nAttributeError: 'OpenStreetMapAccount' object has no attribute 'get_user_name'\n```\n\n", "before_files": [{"content": "import logging\n\nimport gittip\nimport requests\nfrom aspen import json, log, Response\nfrom aspen.http.request import PathPart\nfrom aspen.utils import typecheck\nfrom gittip.elsewhere import AccountElsewhere\n\n\n\nclass OpenStreetMapAccount(AccountElsewhere):\n platform = u'openstreetmap'\n\n def get_url(self):\n return self.user_info['html_url']\n\n\ndef oauth_url(website, action, then=\"\"):\n \"\"\"Return a URL to start oauth dancing with OpenStreetMap.\n\n For GitHub we can pass action and then through a querystring. For OpenStreetMap\n we can't, so we send people through a local URL first where we stash this\n info in an in-memory cache (eep! needs refactoring to scale).\n\n Not sure why website is here. Vestige from GitHub forebear?\n\n \"\"\"\n then = then.encode('base64').strip()\n return \"/on/openstreetmap/redirect?action=%s&then=%s\" % (action, then)\n\n\ndef get_user_info(db, username, osm_api_url):\n \"\"\"Get the given user's information from the DB or failing that, openstreetmap.\n\n :param username:\n A unicode string representing a username in OpenStreetMap.\n\n :param osm_api_url:\n\tURL of OpenStreetMap API.\n\n :returns:\n A dictionary containing OpenStreetMap specific information for the user.\n \"\"\"\n typecheck(username, (unicode, PathPart))\n rec = db.one(\"\"\"\n SELECT user_info FROM elsewhere\n WHERE platform='openstreetmap'\n AND user_info->'username' = %s\n \"\"\", (username,))\n if rec is not None:\n user_info = rec\n else:\n osm_user = requests.get(\"%s/user/%s\" % (osm_api_url, username))\n if osm_user.status_code == 200:\n log(\"User %s found in OpenStreetMap but not in gittip.\" % username)\n user_info = None\n elif osm_user.status_code == 404:\n raise Response(404,\n \"OpenStreetMap identity '{0}' not found.\".format(username))\n else:\n log(\"OpenStreetMap api responded with {0}: {1}\".format(status, content),\n level=logging.WARNING)\n raise Response(502, \"OpenStreetMap lookup failed with %d.\" % status)\n\n return user_info\n", "path": "gittip/elsewhere/openstreetmap.py"}], "after_files": [{"content": "import logging\n\nimport gittip\nimport requests\nfrom aspen import json, log, Response\nfrom aspen.http.request import PathPart\nfrom aspen.utils import typecheck\nfrom gittip.elsewhere import AccountElsewhere\n\n\n\nclass OpenStreetMapAccount(AccountElsewhere):\n platform = u'openstreetmap'\n\n def get_url(self):\n return self.user_info['html_url']\n\n def get_user_name(self):\n return self.user_info['username']\n\n def get_platform_icon(self):\n return \"/assets/icons/openstreetmap.12.png\"\n\n\ndef oauth_url(website, action, then=\"\"):\n \"\"\"Return a URL to start oauth dancing with OpenStreetMap.\n\n For GitHub we can pass action and then through a querystring. For OpenStreetMap\n we can't, so we send people through a local URL first where we stash this\n info in an in-memory cache (eep! needs refactoring to scale).\n\n Not sure why website is here. Vestige from GitHub forebear?\n\n \"\"\"\n then = then.encode('base64').strip()\n return \"/on/openstreetmap/redirect?action=%s&then=%s\" % (action, then)\n\n\ndef get_user_info(db, username, osm_api_url):\n \"\"\"Get the given user's information from the DB or failing that, openstreetmap.\n\n :param username:\n A unicode string representing a username in OpenStreetMap.\n\n :param osm_api_url:\n\tURL of OpenStreetMap API.\n\n :returns:\n A dictionary containing OpenStreetMap specific information for the user.\n \"\"\"\n typecheck(username, (unicode, PathPart))\n rec = db.one(\"\"\"\n SELECT user_info FROM elsewhere\n WHERE platform='openstreetmap'\n AND user_info->'username' = %s\n \"\"\", (username,))\n if rec is not None:\n user_info = rec\n else:\n osm_user = requests.get(\"%s/user/%s\" % (osm_api_url, username))\n if osm_user.status_code == 200:\n log(\"User %s found in OpenStreetMap but not in gittip.\" % username)\n user_info = None\n elif osm_user.status_code == 404:\n raise Response(404,\n \"OpenStreetMap identity '{0}' not found.\".format(username))\n else:\n log(\"OpenStreetMap api responded with {0}: {1}\".format(status, content),\n level=logging.WARNING)\n raise Response(502, \"OpenStreetMap lookup failed with %d.\" % status)\n\n return user_info\n", "path": "gittip/elsewhere/openstreetmap.py"}]} | 1,309 | 148 |
gh_patches_debug_14384 | rasdani/github-patches | git_diff | zestedesavoir__zds-site-2915 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Impossible de rechercher certainnes informations au delà de la première page
**Scénario 1:**
- Aller sur la page de recherche : `http://zestedesavoir.com/rechercher/`
- Saisir dans la zone de recherche : `&ab`
- Constatez qu'il y'a des résultats sur plusieurs pages
- Cliquer sur suivant
- **Pouf : une erreur 404**
**Scénario 2:**
- Aller sur la page de recherche : `http://zestedesavoir.com/rechercher/`
- Saisir dans la zone de recherche : `#1`
- Constatez qu'il y'a des résultats sur plusieurs pages
- Cliquer sur suivant
- **Pouf : le vide s'empare de nous**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `zds/utils/templatetags/append_to_get.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 from django import template
4 from functools import wraps
5
6 register = template.Library()
7
8 """
9 Decorator to facilitate template tag creation.
10 """
11
12
13 def easy_tag(func):
14 """
15 Deal with the repetitive parts of parsing template tags :
16
17 - Wraps functions attributes;
18 - Raise `TemplateSyntaxError` if arguments are not well formatted.
19
20 :rtype: function
21 :param func: Function to wraps.
22 :type func: function
23 """
24
25 @wraps(func)
26 def inner(_, token):
27 split_arg = token.split_contents()
28 try:
29 return func(*split_arg)
30 except TypeError:
31 import inspect
32 args = inspect.getargspec(func).args[1:]
33
34 err_msg = 'Bad arguments for tag "{0}".\nThe tag "{0}" take {1} arguments ({2}).\n {3} were provided ({4}).'
35 fstring = err_msg.format(split_arg[0],
36 len(args),
37 ", ".join(args),
38 len(split_arg),
39 ", ".join(split_arg))
40 raise template.TemplateSyntaxError(fstring)
41 return inner
42
43
44 class AppendGetNode(template.Node):
45 """
46 Template node allowing to render an URL appending argument to current GET address.
47
48 Parse a string like `key1=var1,key2=var2` and generate a new URL with the provided parameters appended to current
49 parameters.
50 """
51
52 def __init__(self, arg_list):
53 """
54 Create a template node which append `arg_list` to GET URL.
55
56 :param str arg_list: the argument list to append.
57 """
58
59 self.__dict_pairs = {}
60 for pair in arg_list.split(','):
61 if pair:
62 try:
63 key, val = pair.split('=')
64 if not val:
65 raise template.TemplateSyntaxError(
66 "Bad argument format. Empty value for key '{}".format(key))
67 self.__dict_pairs[key] = template.Variable(val)
68 except ValueError:
69 raise template.TemplateSyntaxError(
70 "Bad argument format.\n'{}' must use the format 'key1=var1,key2=var2'".format(arg_list))
71
72 def render(self, context):
73 """
74 Render the new URL according to the current context.
75
76 :param context: Current context.
77 :return: New URL with arguments appended.
78 :rtype: str
79 """
80 get = context['request'].GET.copy()
81 path = context['request'].META['PATH_INFO']
82
83 for key in self.__dict_pairs:
84 get[key] = self.__dict_pairs[key].resolve(context)
85
86 if len(get) > 0:
87 list_arg = [u"{0}={1}".format(key, value) for key in get.keys() for value in get.getlist(key)]
88 path += u"?" + u"&".join(list_arg)
89
90 return path
91
92
93 @register.tag()
94 @easy_tag
95 def append_to_get(_, arg_list):
96 """Render an URL appending argument to current GET address.
97
98 :param _: Tag name (not used)
99 :param arg_list: Argument list like `key1=var1,key2=var2`
100 :return: Template node.
101 """
102 return AppendGetNode(arg_list)
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/zds/utils/templatetags/append_to_get.py b/zds/utils/templatetags/append_to_get.py
--- a/zds/utils/templatetags/append_to_get.py
+++ b/zds/utils/templatetags/append_to_get.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from django import template
+from django.utils.http import urlquote
from functools import wraps
register = template.Library()
@@ -84,7 +85,7 @@
get[key] = self.__dict_pairs[key].resolve(context)
if len(get) > 0:
- list_arg = [u"{0}={1}".format(key, value) for key in get.keys() for value in get.getlist(key)]
+ list_arg = [u"{0}={1}".format(key, urlquote(value)) for key in get.keys() for value in get.getlist(key)]
path += u"?" + u"&".join(list_arg)
return path
| {"golden_diff": "diff --git a/zds/utils/templatetags/append_to_get.py b/zds/utils/templatetags/append_to_get.py\n--- a/zds/utils/templatetags/append_to_get.py\n+++ b/zds/utils/templatetags/append_to_get.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n \n from django import template\n+from django.utils.http import urlquote\n from functools import wraps\n \n register = template.Library()\n@@ -84,7 +85,7 @@\n get[key] = self.__dict_pairs[key].resolve(context)\n \n if len(get) > 0:\n- list_arg = [u\"{0}={1}\".format(key, value) for key in get.keys() for value in get.getlist(key)]\n+ list_arg = [u\"{0}={1}\".format(key, urlquote(value)) for key in get.keys() for value in get.getlist(key)]\n path += u\"?\" + u\"&\".join(list_arg)\n \n return path\n", "issue": "Impossible de rechercher certainnes informations au del\u00e0 de la premi\u00e8re page\n**Sc\u00e9nario 1:**\n- Aller sur la page de recherche : `http://zestedesavoir.com/rechercher/`\n- Saisir dans la zone de recherche : `&ab`\n- Constatez qu'il y'a des r\u00e9sultats sur plusieurs pages\n- Cliquer sur suivant\n- **Pouf : une erreur 404**\n\n**Sc\u00e9nario 2:**\n- Aller sur la page de recherche : `http://zestedesavoir.com/rechercher/`\n- Saisir dans la zone de recherche : `#1`\n- Constatez qu'il y'a des r\u00e9sultats sur plusieurs pages\n- Cliquer sur suivant\n- **Pouf : le vide s'empare de nous**\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import template\nfrom functools import wraps\n\nregister = template.Library()\n\n\"\"\"\nDecorator to facilitate template tag creation.\n\"\"\"\n\n\ndef easy_tag(func):\n \"\"\"\n Deal with the repetitive parts of parsing template tags :\n\n - Wraps functions attributes;\n - Raise `TemplateSyntaxError` if arguments are not well formatted.\n\n :rtype: function\n :param func: Function to wraps.\n :type func: function\n \"\"\"\n\n @wraps(func)\n def inner(_, token):\n split_arg = token.split_contents()\n try:\n return func(*split_arg)\n except TypeError:\n import inspect\n args = inspect.getargspec(func).args[1:]\n\n err_msg = 'Bad arguments for tag \"{0}\".\\nThe tag \"{0}\" take {1} arguments ({2}).\\n {3} were provided ({4}).'\n fstring = err_msg.format(split_arg[0],\n len(args),\n \", \".join(args),\n len(split_arg),\n \", \".join(split_arg))\n raise template.TemplateSyntaxError(fstring)\n return inner\n\n\nclass AppendGetNode(template.Node):\n \"\"\"\n Template node allowing to render an URL appending argument to current GET address.\n\n Parse a string like `key1=var1,key2=var2` and generate a new URL with the provided parameters appended to current\n parameters.\n \"\"\"\n\n def __init__(self, arg_list):\n \"\"\"\n Create a template node which append `arg_list` to GET URL.\n\n :param str arg_list: the argument list to append.\n \"\"\"\n\n self.__dict_pairs = {}\n for pair in arg_list.split(','):\n if pair:\n try:\n key, val = pair.split('=')\n if not val:\n raise template.TemplateSyntaxError(\n \"Bad argument format. Empty value for key '{}\".format(key))\n self.__dict_pairs[key] = template.Variable(val)\n except ValueError:\n raise template.TemplateSyntaxError(\n \"Bad argument format.\\n'{}' must use the format 'key1=var1,key2=var2'\".format(arg_list))\n\n def render(self, context):\n \"\"\"\n Render the new URL according to the current context.\n\n :param context: Current context.\n :return: New URL with arguments appended.\n :rtype: str\n \"\"\"\n get = context['request'].GET.copy()\n path = context['request'].META['PATH_INFO']\n\n for key in self.__dict_pairs:\n get[key] = self.__dict_pairs[key].resolve(context)\n\n if len(get) > 0:\n list_arg = [u\"{0}={1}\".format(key, value) for key in get.keys() for value in get.getlist(key)]\n path += u\"?\" + u\"&\".join(list_arg)\n\n return path\n\n\[email protected]()\n@easy_tag\ndef append_to_get(_, arg_list):\n \"\"\"Render an URL appending argument to current GET address.\n\n :param _: Tag name (not used)\n :param arg_list: Argument list like `key1=var1,key2=var2`\n :return: Template node.\n \"\"\"\n return AppendGetNode(arg_list)\n", "path": "zds/utils/templatetags/append_to_get.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom django import template\nfrom django.utils.http import urlquote\nfrom functools import wraps\n\nregister = template.Library()\n\n\"\"\"\nDecorator to facilitate template tag creation.\n\"\"\"\n\n\ndef easy_tag(func):\n \"\"\"\n Deal with the repetitive parts of parsing template tags :\n\n - Wraps functions attributes;\n - Raise `TemplateSyntaxError` if arguments are not well formatted.\n\n :rtype: function\n :param func: Function to wraps.\n :type func: function\n \"\"\"\n\n @wraps(func)\n def inner(_, token):\n split_arg = token.split_contents()\n try:\n return func(*split_arg)\n except TypeError:\n import inspect\n args = inspect.getargspec(func).args[1:]\n\n err_msg = 'Bad arguments for tag \"{0}\".\\nThe tag \"{0}\" take {1} arguments ({2}).\\n {3} were provided ({4}).'\n fstring = err_msg.format(split_arg[0],\n len(args),\n \", \".join(args),\n len(split_arg),\n \", \".join(split_arg))\n raise template.TemplateSyntaxError(fstring)\n return inner\n\n\nclass AppendGetNode(template.Node):\n \"\"\"\n Template node allowing to render an URL appending argument to current GET address.\n\n Parse a string like `key1=var1,key2=var2` and generate a new URL with the provided parameters appended to current\n parameters.\n \"\"\"\n\n def __init__(self, arg_list):\n \"\"\"\n Create a template node which append `arg_list` to GET URL.\n\n :param str arg_list: the argument list to append.\n \"\"\"\n\n self.__dict_pairs = {}\n for pair in arg_list.split(','):\n if pair:\n try:\n key, val = pair.split('=')\n if not val:\n raise template.TemplateSyntaxError(\n \"Bad argument format. Empty value for key '{}\".format(key))\n self.__dict_pairs[key] = template.Variable(val)\n except ValueError:\n raise template.TemplateSyntaxError(\n \"Bad argument format.\\n'{}' must use the format 'key1=var1,key2=var2'\".format(arg_list))\n\n def render(self, context):\n \"\"\"\n Render the new URL according to the current context.\n\n :param context: Current context.\n :return: New URL with arguments appended.\n :rtype: str\n \"\"\"\n get = context['request'].GET.copy()\n path = context['request'].META['PATH_INFO']\n\n for key in self.__dict_pairs:\n get[key] = self.__dict_pairs[key].resolve(context)\n\n if len(get) > 0:\n list_arg = [u\"{0}={1}\".format(key, urlquote(value)) for key in get.keys() for value in get.getlist(key)]\n path += u\"?\" + u\"&\".join(list_arg)\n\n return path\n\n\[email protected]()\n@easy_tag\ndef append_to_get(_, arg_list):\n \"\"\"Render an URL appending argument to current GET address.\n\n :param _: Tag name (not used)\n :param arg_list: Argument list like `key1=var1,key2=var2`\n :return: Template node.\n \"\"\"\n return AppendGetNode(arg_list)\n", "path": "zds/utils/templatetags/append_to_get.py"}]} | 1,338 | 228 |
gh_patches_debug_30253 | rasdani/github-patches | git_diff | streamlink__streamlink-1663 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to validate result: <_sre.SRE_Match object; ... should be 'list' but is 'str'
### Checklist
- [x] This is a bug report.
- [ ] This is a feature request.
- [ ] This is a plugin (improvement) request.
- [ ] I have read the contribution guidelines.
### Description
Hi, something's failing when trying to fetch a video from INE, mac OS 10.12.6:
```
$ streamlink -o ./streamlink.mp4 https://streaming.ine.com/play/97c49b6f-5cda-4e66-859d-627ba2e9e26e/lab-setup 720p --loglevel debug --http-cookie laravel_session=<removed>
[cli][debug] OS: macOS 10.12.6
[cli][debug] Python: 3.5.5
[cli][debug] Streamlink: 0.12.1
[cli][debug] Requests(2.18.1), Socks(1.6.7), Websocket(0.47.0)
[cli][info] Found matching plugin ine for URL https://streaming.ine.com/play/97c49b6f-5cda-4e66-859d-627ba2e9e26e/lab-setup
[plugin.ine][debug] Found video ID: 97c49b6f-5cda-4e66-859d-627ba2e9e26e
[plugin.ine][debug] Loading player JS: https://content.jwplatform.com/players/gdH3hfpy-p4NBeNN0.js?exp=1527771621&sig=<removed>
error: Unable to validate result: <_sre.SRE_Match object; span=(100223, 101420), match='jwConfig = {\n "aspectratio": "16:9",\n "autost> does not equal None or Unable to validate key 'playlist': Type of '//content.jwplatform.com/v2/media/gdH3hfpy?token=<removed>' should be 'list' but is 'str'
$
$ python --version
Python 3.5.5
$ streamlink --version-check
[cli][info] Your Streamlink version (0.12.1) is up to date!
$
```
Any ideas? Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/ine.py`
Content:
```
1 from __future__ import print_function
2
3 import json
4 import re
5
6 from streamlink.plugin import Plugin
7 from streamlink.plugin.api import http
8 from streamlink.plugin.api import validate
9 from streamlink.stream import HLSStream
10
11
12 class INE(Plugin):
13 url_re = re.compile(r"""https://streaming.ine.com/play\#?/
14 ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/?
15 (.*?)""", re.VERBOSE)
16 play_url = "https://streaming.ine.com/play/{vid}/watch"
17 js_re = re.compile(r'''script type="text/javascript" src="(https://content.jwplatform.com/players/.*?)"''')
18 jwplayer_re = re.compile(r'''jwConfig\s*=\s*(\{.*\});''', re.DOTALL)
19 setup_schema = validate.Schema(
20 validate.transform(jwplayer_re.search),
21 validate.any(
22 None,
23 validate.all(
24 validate.get(1),
25 validate.transform(json.loads),
26 {"playlist": [
27 {"sources": [{"file": validate.text,
28 "type": validate.text}]}
29 ]}
30 )
31 )
32 )
33
34 @classmethod
35 def can_handle_url(cls, url):
36 return cls.url_re.match(url) is not None
37
38 def _get_streams(self):
39 vid = self.url_re.match(self.url).group(1)
40 self.logger.debug("Found video ID: {0}", vid)
41
42 page = http.get(self.play_url.format(vid=vid))
43 js_url_m = self.js_re.search(page.text)
44 if js_url_m:
45 js_url = js_url_m.group(1)
46 self.logger.debug("Loading player JS: {0}", js_url)
47
48 res = http.get(js_url)
49 data = self.setup_schema.validate(res.text)
50 for source in data["playlist"][0]["sources"]:
51 if source["type"] == "hls":
52 return HLSStream.parse_variant_playlist(self.session, "https:" + source["file"])
53
54
55 __plugin__ = INE
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/ine.py b/src/streamlink/plugins/ine.py
--- a/src/streamlink/plugins/ine.py
+++ b/src/streamlink/plugins/ine.py
@@ -6,7 +6,8 @@
from streamlink.plugin import Plugin
from streamlink.plugin.api import http
from streamlink.plugin.api import validate
-from streamlink.stream import HLSStream
+from streamlink.stream import HLSStream, HTTPStream
+from streamlink.utils import update_scheme
class INE(Plugin):
@@ -23,10 +24,8 @@
validate.all(
validate.get(1),
validate.transform(json.loads),
- {"playlist": [
- {"sources": [{"file": validate.text,
- "type": validate.text}]}
- ]}
+ {"playlist": str},
+ validate.get("playlist")
)
)
)
@@ -46,10 +45,15 @@
self.logger.debug("Loading player JS: {0}", js_url)
res = http.get(js_url)
- data = self.setup_schema.validate(res.text)
+ metadata_url = update_scheme(self.url, self.setup_schema.validate(res.text))
+ data = http.json(http.get(metadata_url))
+
for source in data["playlist"][0]["sources"]:
- if source["type"] == "hls":
- return HLSStream.parse_variant_playlist(self.session, "https:" + source["file"])
+ if source["type"] == "application/vnd.apple.mpegurl":
+ for s in HLSStream.parse_variant_playlist(self.session, source["file"]).items():
+ yield s
+ elif source["type"] == "video/mp4":
+ yield "{0}p".format(source["height"]), HTTPStream(self.session, source["file"])
__plugin__ = INE
| {"golden_diff": "diff --git a/src/streamlink/plugins/ine.py b/src/streamlink/plugins/ine.py\n--- a/src/streamlink/plugins/ine.py\n+++ b/src/streamlink/plugins/ine.py\n@@ -6,7 +6,8 @@\n from streamlink.plugin import Plugin\n from streamlink.plugin.api import http\n from streamlink.plugin.api import validate\n-from streamlink.stream import HLSStream\n+from streamlink.stream import HLSStream, HTTPStream\n+from streamlink.utils import update_scheme\n \n \n class INE(Plugin):\n@@ -23,10 +24,8 @@\n validate.all(\n validate.get(1),\n validate.transform(json.loads),\n- {\"playlist\": [\n- {\"sources\": [{\"file\": validate.text,\n- \"type\": validate.text}]}\n- ]}\n+ {\"playlist\": str},\n+ validate.get(\"playlist\")\n )\n )\n )\n@@ -46,10 +45,15 @@\n self.logger.debug(\"Loading player JS: {0}\", js_url)\n \n res = http.get(js_url)\n- data = self.setup_schema.validate(res.text)\n+ metadata_url = update_scheme(self.url, self.setup_schema.validate(res.text))\n+ data = http.json(http.get(metadata_url))\n+\n for source in data[\"playlist\"][0][\"sources\"]:\n- if source[\"type\"] == \"hls\":\n- return HLSStream.parse_variant_playlist(self.session, \"https:\" + source[\"file\"])\n+ if source[\"type\"] == \"application/vnd.apple.mpegurl\":\n+ for s in HLSStream.parse_variant_playlist(self.session, source[\"file\"]).items():\n+ yield s\n+ elif source[\"type\"] == \"video/mp4\":\n+ yield \"{0}p\".format(source[\"height\"]), HTTPStream(self.session, source[\"file\"])\n \n \n __plugin__ = INE\n", "issue": "Unable to validate result: <_sre.SRE_Match object; ... should be 'list' but is 'str'\n### Checklist\r\n\r\n- [x] This is a bug report.\r\n- [ ] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [ ] I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\nHi, something's failing when trying to fetch a video from INE, mac OS 10.12.6:\r\n\r\n```\r\n$ streamlink -o ./streamlink.mp4 https://streaming.ine.com/play/97c49b6f-5cda-4e66-859d-627ba2e9e26e/lab-setup 720p --loglevel debug --http-cookie laravel_session=<removed>\r\n[cli][debug] OS: macOS 10.12.6\r\n[cli][debug] Python: 3.5.5\r\n[cli][debug] Streamlink: 0.12.1\r\n[cli][debug] Requests(2.18.1), Socks(1.6.7), Websocket(0.47.0)\r\n[cli][info] Found matching plugin ine for URL https://streaming.ine.com/play/97c49b6f-5cda-4e66-859d-627ba2e9e26e/lab-setup\r\n[plugin.ine][debug] Found video ID: 97c49b6f-5cda-4e66-859d-627ba2e9e26e\r\n[plugin.ine][debug] Loading player JS: https://content.jwplatform.com/players/gdH3hfpy-p4NBeNN0.js?exp=1527771621&sig=<removed>\r\nerror: Unable to validate result: <_sre.SRE_Match object; span=(100223, 101420), match='jwConfig = {\\n \"aspectratio\": \"16:9\",\\n \"autost> does not equal None or Unable to validate key 'playlist': Type of '//content.jwplatform.com/v2/media/gdH3hfpy?token=<removed>' should be 'list' but is 'str'\r\n$ \r\n$ python --version\r\nPython 3.5.5\r\n$ streamlink --version-check\r\n[cli][info] Your Streamlink version (0.12.1) is up to date!\r\n$ \r\n```\r\nAny ideas? Thanks!\r\n\n", "before_files": [{"content": "from __future__ import print_function\n\nimport json\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\n\n\nclass INE(Plugin):\n url_re = re.compile(r\"\"\"https://streaming.ine.com/play\\#?/\n ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/?\n (.*?)\"\"\", re.VERBOSE)\n play_url = \"https://streaming.ine.com/play/{vid}/watch\"\n js_re = re.compile(r'''script type=\"text/javascript\" src=\"(https://content.jwplatform.com/players/.*?)\"''')\n jwplayer_re = re.compile(r'''jwConfig\\s*=\\s*(\\{.*\\});''', re.DOTALL)\n setup_schema = validate.Schema(\n validate.transform(jwplayer_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(json.loads),\n {\"playlist\": [\n {\"sources\": [{\"file\": validate.text,\n \"type\": validate.text}]}\n ]}\n )\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n vid = self.url_re.match(self.url).group(1)\n self.logger.debug(\"Found video ID: {0}\", vid)\n\n page = http.get(self.play_url.format(vid=vid))\n js_url_m = self.js_re.search(page.text)\n if js_url_m:\n js_url = js_url_m.group(1)\n self.logger.debug(\"Loading player JS: {0}\", js_url)\n\n res = http.get(js_url)\n data = self.setup_schema.validate(res.text)\n for source in data[\"playlist\"][0][\"sources\"]:\n if source[\"type\"] == \"hls\":\n return HLSStream.parse_variant_playlist(self.session, \"https:\" + source[\"file\"])\n\n\n__plugin__ = INE\n", "path": "src/streamlink/plugins/ine.py"}], "after_files": [{"content": "from __future__ import print_function\n\nimport json\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream, HTTPStream\nfrom streamlink.utils import update_scheme\n\n\nclass INE(Plugin):\n url_re = re.compile(r\"\"\"https://streaming.ine.com/play\\#?/\n ([0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/?\n (.*?)\"\"\", re.VERBOSE)\n play_url = \"https://streaming.ine.com/play/{vid}/watch\"\n js_re = re.compile(r'''script type=\"text/javascript\" src=\"(https://content.jwplatform.com/players/.*?)\"''')\n jwplayer_re = re.compile(r'''jwConfig\\s*=\\s*(\\{.*\\});''', re.DOTALL)\n setup_schema = validate.Schema(\n validate.transform(jwplayer_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(json.loads),\n {\"playlist\": str},\n validate.get(\"playlist\")\n )\n )\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls.url_re.match(url) is not None\n\n def _get_streams(self):\n vid = self.url_re.match(self.url).group(1)\n self.logger.debug(\"Found video ID: {0}\", vid)\n\n page = http.get(self.play_url.format(vid=vid))\n js_url_m = self.js_re.search(page.text)\n if js_url_m:\n js_url = js_url_m.group(1)\n self.logger.debug(\"Loading player JS: {0}\", js_url)\n\n res = http.get(js_url)\n metadata_url = update_scheme(self.url, self.setup_schema.validate(res.text))\n data = http.json(http.get(metadata_url))\n\n for source in data[\"playlist\"][0][\"sources\"]:\n if source[\"type\"] == \"application/vnd.apple.mpegurl\":\n for s in HLSStream.parse_variant_playlist(self.session, source[\"file\"]).items():\n yield s\n elif source[\"type\"] == \"video/mp4\":\n yield \"{0}p\".format(source[\"height\"]), HTTPStream(self.session, source[\"file\"])\n\n\n__plugin__ = INE\n", "path": "src/streamlink/plugins/ine.py"}]} | 1,409 | 391 |
gh_patches_debug_21861 | rasdani/github-patches | git_diff | facebookresearch__hydra-352 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Bug] system.exit(code) is not respected in Hydra
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `hydra/main.py`
Content:
```
1 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2 import functools
3 import sys
4 from typing import Callable, Optional
5
6 from ._internal.utils import get_args_parser, run_hydra
7 from .types import TaskFunction
8
9 # TODO: change config_path to Optional[str]
10
11
12 def main(
13 config_path: str = "", strict: Optional[bool] = None
14 ) -> Callable[[TaskFunction], Callable[[], None]]:
15 """
16 :param config_path: the config path, can be a directory in which it's used as the config root
17 or a file to load
18 :param strict: strict mode, will throw an error if command line overrides are not changing an
19 existing key or
20 if the code is accessing a non existent key
21 """
22
23 def main_decorator(task_function: TaskFunction) -> Callable[[], None]:
24 @functools.wraps(task_function)
25 def decorated_main() -> None:
26 try:
27 run_hydra(
28 args_parser=get_args_parser(),
29 task_function=task_function,
30 config_path=config_path,
31 strict=strict,
32 )
33 except KeyboardInterrupt:
34 sys.exit(-1)
35 except SystemExit:
36 pass
37
38 return decorated_main
39
40 return main_decorator
41
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/hydra/main.py b/hydra/main.py
--- a/hydra/main.py
+++ b/hydra/main.py
@@ -1,6 +1,5 @@
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import functools
-import sys
from typing import Callable, Optional
from ._internal.utils import get_args_parser, run_hydra
@@ -23,17 +22,12 @@
def main_decorator(task_function: TaskFunction) -> Callable[[], None]:
@functools.wraps(task_function)
def decorated_main() -> None:
- try:
- run_hydra(
- args_parser=get_args_parser(),
- task_function=task_function,
- config_path=config_path,
- strict=strict,
- )
- except KeyboardInterrupt:
- sys.exit(-1)
- except SystemExit:
- pass
+ run_hydra(
+ args_parser=get_args_parser(),
+ task_function=task_function,
+ config_path=config_path,
+ strict=strict,
+ )
return decorated_main
| {"golden_diff": "diff --git a/hydra/main.py b/hydra/main.py\n--- a/hydra/main.py\n+++ b/hydra/main.py\n@@ -1,6 +1,5 @@\n # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n import functools\n-import sys\n from typing import Callable, Optional\n \n from ._internal.utils import get_args_parser, run_hydra\n@@ -23,17 +22,12 @@\n def main_decorator(task_function: TaskFunction) -> Callable[[], None]:\n @functools.wraps(task_function)\n def decorated_main() -> None:\n- try:\n- run_hydra(\n- args_parser=get_args_parser(),\n- task_function=task_function,\n- config_path=config_path,\n- strict=strict,\n- )\n- except KeyboardInterrupt:\n- sys.exit(-1)\n- except SystemExit:\n- pass\n+ run_hydra(\n+ args_parser=get_args_parser(),\n+ task_function=task_function,\n+ config_path=config_path,\n+ strict=strict,\n+ )\n \n return decorated_main\n", "issue": "[Bug] system.exit(code) is not respected in Hydra\n\n", "before_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport functools\nimport sys\nfrom typing import Callable, Optional\n\nfrom ._internal.utils import get_args_parser, run_hydra\nfrom .types import TaskFunction\n\n# TODO: change config_path to Optional[str]\n\n\ndef main(\n config_path: str = \"\", strict: Optional[bool] = None\n) -> Callable[[TaskFunction], Callable[[], None]]:\n \"\"\"\n :param config_path: the config path, can be a directory in which it's used as the config root\n or a file to load\n :param strict: strict mode, will throw an error if command line overrides are not changing an\n existing key or\n if the code is accessing a non existent key\n \"\"\"\n\n def main_decorator(task_function: TaskFunction) -> Callable[[], None]:\n @functools.wraps(task_function)\n def decorated_main() -> None:\n try:\n run_hydra(\n args_parser=get_args_parser(),\n task_function=task_function,\n config_path=config_path,\n strict=strict,\n )\n except KeyboardInterrupt:\n sys.exit(-1)\n except SystemExit:\n pass\n\n return decorated_main\n\n return main_decorator\n", "path": "hydra/main.py"}], "after_files": [{"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport functools\nfrom typing import Callable, Optional\n\nfrom ._internal.utils import get_args_parser, run_hydra\nfrom .types import TaskFunction\n\n# TODO: change config_path to Optional[str]\n\n\ndef main(\n config_path: str = \"\", strict: Optional[bool] = None\n) -> Callable[[TaskFunction], Callable[[], None]]:\n \"\"\"\n :param config_path: the config path, can be a directory in which it's used as the config root\n or a file to load\n :param strict: strict mode, will throw an error if command line overrides are not changing an\n existing key or\n if the code is accessing a non existent key\n \"\"\"\n\n def main_decorator(task_function: TaskFunction) -> Callable[[], None]:\n @functools.wraps(task_function)\n def decorated_main() -> None:\n run_hydra(\n args_parser=get_args_parser(),\n task_function=task_function,\n config_path=config_path,\n strict=strict,\n )\n\n return decorated_main\n\n return main_decorator\n", "path": "hydra/main.py"}]} | 614 | 244 |
gh_patches_debug_9471 | rasdani/github-patches | git_diff | vispy__vispy-1084 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IPython WebGL Examples not working.
The IPython notebook examples are not working with the latest IPython(Jupyter) 4.0 release.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vispy/app/backends/ipython/_widget.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014, 2015, Vispy Development Team.
3 # Distributed under the (new) BSD License. See LICENSE.txt for more info.
4
5 try:
6 from IPython.html.widgets import DOMWidget
7 from IPython.utils.traitlets import Unicode, Int, Bool
8 except Exception as exp:
9 # Init dummy objects needed to import this module withour errors.
10 # These are all overwritten with imports from IPython (on success)
11 DOMWidget = object
12 Unicode = Int = Float = Bool = lambda *args, **kwargs: None
13 available, testable, why_not, which = False, False, str(exp), None
14 else:
15 available, testable, why_not, which = True, False, None, None
16 from vispy.app.backends._ipynb_util import create_glir_message
17 from vispy.app import Timer
18
19
20 # ---------------------------------------------------------- IPython Widget ---
21 def _stop_timers(canvas):
22 """Stop all timers in a canvas."""
23 for attr in dir(canvas):
24 try:
25 attr_obj = getattr(canvas, attr)
26 except NotImplementedError:
27 # This try/except is needed because canvas.position raises
28 # an error (it is not implemented in this backend).
29 attr_obj = None
30 if isinstance(attr_obj, Timer):
31 attr_obj.stop()
32
33
34 class VispyWidget(DOMWidget):
35 _view_name = Unicode("VispyView", sync=True)
36 _view_module = Unicode('/nbextensions/vispy/webgl-backend.js', sync=True)
37
38 #height/width of the widget is managed by IPython.
39 #it's a string and can be anything valid in CSS.
40 #here we only manage the size of the viewport.
41 width = Int(sync=True)
42 height = Int(sync=True)
43 resizable = Bool(value=True, sync=True)
44
45 def __init__(self, **kwargs):
46 super(VispyWidget, self).__init__(**kwargs)
47 self.on_msg(self.events_received)
48 self.canvas = None
49 self.canvas_backend = None
50 self.gen_event = None
51
52 def set_canvas(self, canvas):
53 self.width, self.height = canvas._backend._default_size
54 self.canvas = canvas
55 self.canvas_backend = self.canvas._backend
56 self.canvas_backend.set_widget(self)
57 self.gen_event = self.canvas_backend._gen_event
58 #setup the backend widget then.
59
60 def events_received(self, _, msg):
61 if msg['msg_type'] == 'init':
62 self.canvas_backend._reinit_widget()
63 elif msg['msg_type'] == 'events':
64 events = msg['contents']
65 for ev in events:
66 self.gen_event(ev)
67 elif msg['msg_type'] == 'status':
68 if msg['contents'] == 'removed':
69 # Stop all timers associated to the widget.
70 _stop_timers(self.canvas_backend._vispy_canvas)
71
72 def send_glir_commands(self, commands):
73 # TODO: check whether binary websocket is available (ipython >= 3)
74 # Until IPython 3.0 is released, use base64.
75 array_serialization = 'base64'
76 # array_serialization = 'binary'
77 if array_serialization == 'base64':
78 msg = create_glir_message(commands, 'base64')
79 msg['array_serialization'] = 'base64'
80 self.send(msg)
81 elif array_serialization == 'binary':
82 msg = create_glir_message(commands, 'binary')
83 msg['array_serialization'] = 'binary'
84 # Remove the buffers from the JSON message: they will be sent
85 # independently via binary WebSocket.
86 buffers = msg.pop('buffers')
87 self.comm.send({"method": "custom", "content": msg},
88 buffers=buffers)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vispy/app/backends/ipython/_widget.py b/vispy/app/backends/ipython/_widget.py
--- a/vispy/app/backends/ipython/_widget.py
+++ b/vispy/app/backends/ipython/_widget.py
@@ -57,7 +57,10 @@
self.gen_event = self.canvas_backend._gen_event
#setup the backend widget then.
- def events_received(self, _, msg):
+ # In IPython < 4, these callbacks are given two arguments; in
+ # IPython/jupyter 4, they take 3. events_received is variadic to
+ # accommodate both cases.
+ def events_received(self, _, msg, *args):
if msg['msg_type'] == 'init':
self.canvas_backend._reinit_widget()
elif msg['msg_type'] == 'events':
| {"golden_diff": "diff --git a/vispy/app/backends/ipython/_widget.py b/vispy/app/backends/ipython/_widget.py\n--- a/vispy/app/backends/ipython/_widget.py\n+++ b/vispy/app/backends/ipython/_widget.py\n@@ -57,7 +57,10 @@\n self.gen_event = self.canvas_backend._gen_event\n #setup the backend widget then.\n \n- def events_received(self, _, msg):\n+ # In IPython < 4, these callbacks are given two arguments; in\n+ # IPython/jupyter 4, they take 3. events_received is variadic to\n+ # accommodate both cases.\n+ def events_received(self, _, msg, *args):\n if msg['msg_type'] == 'init':\n self.canvas_backend._reinit_widget()\n elif msg['msg_type'] == 'events':\n", "issue": "IPython WebGL Examples not working.\nThe IPython notebook examples are not working with the latest IPython(Jupyter) 4.0 release.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\ntry:\n from IPython.html.widgets import DOMWidget\n from IPython.utils.traitlets import Unicode, Int, Bool\nexcept Exception as exp:\n # Init dummy objects needed to import this module withour errors.\n # These are all overwritten with imports from IPython (on success)\n DOMWidget = object\n Unicode = Int = Float = Bool = lambda *args, **kwargs: None\n available, testable, why_not, which = False, False, str(exp), None\nelse:\n available, testable, why_not, which = True, False, None, None\nfrom vispy.app.backends._ipynb_util import create_glir_message\nfrom vispy.app import Timer\n\n\n# ---------------------------------------------------------- IPython Widget ---\ndef _stop_timers(canvas):\n \"\"\"Stop all timers in a canvas.\"\"\"\n for attr in dir(canvas):\n try:\n attr_obj = getattr(canvas, attr)\n except NotImplementedError:\n # This try/except is needed because canvas.position raises\n # an error (it is not implemented in this backend).\n attr_obj = None\n if isinstance(attr_obj, Timer):\n attr_obj.stop()\n\n\nclass VispyWidget(DOMWidget):\n _view_name = Unicode(\"VispyView\", sync=True)\n _view_module = Unicode('/nbextensions/vispy/webgl-backend.js', sync=True)\n\n #height/width of the widget is managed by IPython.\n #it's a string and can be anything valid in CSS.\n #here we only manage the size of the viewport.\n width = Int(sync=True)\n height = Int(sync=True)\n resizable = Bool(value=True, sync=True)\n\n def __init__(self, **kwargs):\n super(VispyWidget, self).__init__(**kwargs)\n self.on_msg(self.events_received)\n self.canvas = None\n self.canvas_backend = None\n self.gen_event = None\n\n def set_canvas(self, canvas):\n self.width, self.height = canvas._backend._default_size\n self.canvas = canvas\n self.canvas_backend = self.canvas._backend\n self.canvas_backend.set_widget(self)\n self.gen_event = self.canvas_backend._gen_event\n #setup the backend widget then.\n\n def events_received(self, _, msg):\n if msg['msg_type'] == 'init':\n self.canvas_backend._reinit_widget()\n elif msg['msg_type'] == 'events':\n events = msg['contents']\n for ev in events:\n self.gen_event(ev)\n elif msg['msg_type'] == 'status':\n if msg['contents'] == 'removed':\n # Stop all timers associated to the widget.\n _stop_timers(self.canvas_backend._vispy_canvas)\n\n def send_glir_commands(self, commands):\n # TODO: check whether binary websocket is available (ipython >= 3)\n # Until IPython 3.0 is released, use base64.\n array_serialization = 'base64'\n # array_serialization = 'binary'\n if array_serialization == 'base64':\n msg = create_glir_message(commands, 'base64')\n msg['array_serialization'] = 'base64'\n self.send(msg)\n elif array_serialization == 'binary':\n msg = create_glir_message(commands, 'binary')\n msg['array_serialization'] = 'binary'\n # Remove the buffers from the JSON message: they will be sent\n # independently via binary WebSocket.\n buffers = msg.pop('buffers')\n self.comm.send({\"method\": \"custom\", \"content\": msg},\n buffers=buffers)\n", "path": "vispy/app/backends/ipython/_widget.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014, 2015, Vispy Development Team.\n# Distributed under the (new) BSD License. See LICENSE.txt for more info.\n\ntry:\n from IPython.html.widgets import DOMWidget\n from IPython.utils.traitlets import Unicode, Int, Bool\nexcept Exception as exp:\n # Init dummy objects needed to import this module withour errors.\n # These are all overwritten with imports from IPython (on success)\n DOMWidget = object\n Unicode = Int = Float = Bool = lambda *args, **kwargs: None\n available, testable, why_not, which = False, False, str(exp), None\nelse:\n available, testable, why_not, which = True, False, None, None\nfrom vispy.app.backends._ipynb_util import create_glir_message\nfrom vispy.app import Timer\n\n\n# ---------------------------------------------------------- IPython Widget ---\ndef _stop_timers(canvas):\n \"\"\"Stop all timers in a canvas.\"\"\"\n for attr in dir(canvas):\n try:\n attr_obj = getattr(canvas, attr)\n except NotImplementedError:\n # This try/except is needed because canvas.position raises\n # an error (it is not implemented in this backend).\n attr_obj = None\n if isinstance(attr_obj, Timer):\n attr_obj.stop()\n\n\nclass VispyWidget(DOMWidget):\n _view_name = Unicode(\"VispyView\", sync=True)\n _view_module = Unicode('/nbextensions/vispy/webgl-backend.js', sync=True)\n\n #height/width of the widget is managed by IPython.\n #it's a string and can be anything valid in CSS.\n #here we only manage the size of the viewport.\n width = Int(sync=True)\n height = Int(sync=True)\n resizable = Bool(value=True, sync=True)\n\n def __init__(self, **kwargs):\n super(VispyWidget, self).__init__(**kwargs)\n self.on_msg(self.events_received)\n self.canvas = None\n self.canvas_backend = None\n self.gen_event = None\n\n def set_canvas(self, canvas):\n self.width, self.height = canvas._backend._default_size\n self.canvas = canvas\n self.canvas_backend = self.canvas._backend\n self.canvas_backend.set_widget(self)\n self.gen_event = self.canvas_backend._gen_event\n #setup the backend widget then.\n\n # In IPython < 4, these callbacks are given two arguments; in\n # IPython/jupyter 4, they take 3. events_received is variadic to\n # accommodate both cases.\n def events_received(self, _, msg, *args):\n if msg['msg_type'] == 'init':\n self.canvas_backend._reinit_widget()\n elif msg['msg_type'] == 'events':\n events = msg['contents']\n for ev in events:\n self.gen_event(ev)\n elif msg['msg_type'] == 'status':\n if msg['contents'] == 'removed':\n # Stop all timers associated to the widget.\n _stop_timers(self.canvas_backend._vispy_canvas)\n\n def send_glir_commands(self, commands):\n # TODO: check whether binary websocket is available (ipython >= 3)\n # Until IPython 3.0 is released, use base64.\n array_serialization = 'base64'\n # array_serialization = 'binary'\n if array_serialization == 'base64':\n msg = create_glir_message(commands, 'base64')\n msg['array_serialization'] = 'base64'\n self.send(msg)\n elif array_serialization == 'binary':\n msg = create_glir_message(commands, 'binary')\n msg['array_serialization'] = 'binary'\n # Remove the buffers from the JSON message: they will be sent\n # independently via binary WebSocket.\n buffers = msg.pop('buffers')\n self.comm.send({\"method\": \"custom\", \"content\": msg},\n buffers=buffers)\n", "path": "vispy/app/backends/ipython/_widget.py"}]} | 1,276 | 196 |
gh_patches_debug_29933 | rasdani/github-patches | git_diff | jazzband__pip-tools-1912 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The latest and the stable documentation may have been built using the development version
Shouldn't be [there](https://pip-tools.readthedocs.io/) the latest released version?
<img width="788" alt="Screenshot 2023-04-07 at 01 17 17" src="https://user-images.githubusercontent.com/7377671/230510654-fd15e934-4243-4ee3-85c6-bb8d55e656d4.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # https://www.sphinx-doc.org/en/master/usage/configuration.html
2 """Configuration file for the Sphinx documentation builder."""
3
4 from __future__ import annotations
5
6 from functools import partial
7 from pathlib import Path
8
9 from setuptools_scm import get_version
10
11 # -- Path setup --------------------------------------------------------------
12
13 PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
14 get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
15
16
17 # -- Project information -----------------------------------------------------
18
19 project = "pip-tools"
20 author = f"{project} Contributors"
21 copyright = f"The {author}"
22
23 # The short X.Y version
24 version = ".".join(
25 get_scm_version(
26 local_scheme="no-local-version",
27 ).split(
28 "."
29 )[:3],
30 )
31
32 # The full version, including alpha/beta/rc tags
33 release = get_scm_version()
34
35
36 # -- General configuration ---------------------------------------------------
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = ["myst_parser"]
42
43
44 # -- Options for HTML output -------------------------------------------------
45
46 # The theme to use for HTML and HTML Help pages. See the documentation for
47 # a list of builtin themes.
48 #
49 html_theme = "furo"
50
51
52 # -------------------------------------------------------------------------
53 default_role = "any"
54 nitpicky = True
55
56 linkcheck_ignore = [
57 r"^https://matrix\.to/#",
58 ]
59
60 suppress_warnings = ["myst.xref_missing"]
61
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -3,15 +3,17 @@
from __future__ import annotations
-from functools import partial
+from importlib.metadata import version as get_version
from pathlib import Path
-from setuptools_scm import get_version
+from sphinx.util import logging
+from sphinx.util.console import bold
+
+logger = logging.getLogger(__name__)
# -- Path setup --------------------------------------------------------------
PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()
-get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)
# -- Project information -----------------------------------------------------
@@ -20,18 +22,14 @@
author = f"{project} Contributors"
copyright = f"The {author}"
-# The short X.Y version
-version = ".".join(
- get_scm_version(
- local_scheme="no-local-version",
- ).split(
- "."
- )[:3],
-)
-
# The full version, including alpha/beta/rc tags
-release = get_scm_version()
+release = get_version(project)
+
+# The short X.Y version
+version = ".".join(release.split(".")[:3])
+logger.info(bold("%s version: %s"), project, version)
+logger.info(bold("%s release: %s"), project, release)
# -- General configuration ---------------------------------------------------
@@ -47,6 +45,7 @@
# a list of builtin themes.
#
html_theme = "furo"
+html_title = f"<nobr>{project}</nobr> documentation v{release}"
# -------------------------------------------------------------------------
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -3,15 +3,17 @@\n \n from __future__ import annotations\n \n-from functools import partial\n+from importlib.metadata import version as get_version\n from pathlib import Path\n \n-from setuptools_scm import get_version\n+from sphinx.util import logging\n+from sphinx.util.console import bold\n+\n+logger = logging.getLogger(__name__)\n \n # -- Path setup --------------------------------------------------------------\n \n PROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\n-get_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n \n \n # -- Project information -----------------------------------------------------\n@@ -20,18 +22,14 @@\n author = f\"{project} Contributors\"\n copyright = f\"The {author}\"\n \n-# The short X.Y version\n-version = \".\".join(\n- get_scm_version(\n- local_scheme=\"no-local-version\",\n- ).split(\n- \".\"\n- )[:3],\n-)\n-\n # The full version, including alpha/beta/rc tags\n-release = get_scm_version()\n+release = get_version(project)\n+\n+# The short X.Y version\n+version = \".\".join(release.split(\".\")[:3])\n \n+logger.info(bold(\"%s version: %s\"), project, version)\n+logger.info(bold(\"%s release: %s\"), project, release)\n \n # -- General configuration ---------------------------------------------------\n \n@@ -47,6 +45,7 @@\n # a list of builtin themes.\n #\n html_theme = \"furo\"\n+html_title = f\"<nobr>{project}</nobr> documentation v{release}\"\n \n \n # -------------------------------------------------------------------------\n", "issue": "The latest and the stable documentation may have been built using the development version\nShouldn't be [there](https://pip-tools.readthedocs.io/) the latest released version?\r\n\r\n<img width=\"788\" alt=\"Screenshot 2023-04-07 at 01 17 17\" src=\"https://user-images.githubusercontent.com/7377671/230510654-fd15e934-4243-4ee3-85c6-bb8d55e656d4.png\">\r\n\r\n\n", "before_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom functools import partial\nfrom pathlib import Path\n\nfrom setuptools_scm import get_version\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\nget_scm_version = partial(get_version, root=PROJECT_ROOT_DIR)\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The short X.Y version\nversion = \".\".join(\n get_scm_version(\n local_scheme=\"no-local-version\",\n ).split(\n \".\"\n )[:3],\n)\n\n# The full version, including alpha/beta/rc tags\nrelease = get_scm_version()\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n\nlinkcheck_ignore = [\n r\"^https://matrix\\.to/#\",\n]\n\nsuppress_warnings = [\"myst.xref_missing\"]\n", "path": "docs/conf.py"}], "after_files": [{"content": "# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\"\"\"Configuration file for the Sphinx documentation builder.\"\"\"\n\nfrom __future__ import annotations\n\nfrom importlib.metadata import version as get_version\nfrom pathlib import Path\n\nfrom sphinx.util import logging\nfrom sphinx.util.console import bold\n\nlogger = logging.getLogger(__name__)\n\n# -- Path setup --------------------------------------------------------------\n\nPROJECT_ROOT_DIR = Path(__file__).parents[1].resolve()\n\n\n# -- Project information -----------------------------------------------------\n\nproject = \"pip-tools\"\nauthor = f\"{project} Contributors\"\ncopyright = f\"The {author}\"\n\n# The full version, including alpha/beta/rc tags\nrelease = get_version(project)\n\n# The short X.Y version\nversion = \".\".join(release.split(\".\")[:3])\n\nlogger.info(bold(\"%s version: %s\"), project, version)\nlogger.info(bold(\"%s release: %s\"), project, release)\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\"myst_parser\"]\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"furo\"\nhtml_title = f\"<nobr>{project}</nobr> documentation v{release}\"\n\n\n# -------------------------------------------------------------------------\ndefault_role = \"any\"\nnitpicky = True\n\nlinkcheck_ignore = [\n r\"^https://matrix\\.to/#\",\n]\n\nsuppress_warnings = [\"myst.xref_missing\"]\n", "path": "docs/conf.py"}]} | 823 | 358 |
gh_patches_debug_27526 | rasdani/github-patches | git_diff | pantsbuild__pants-11274 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Target Hitting Recursion Limit During Pants Setup (with workaround)
# Description of Problem
We’re in the process of migrating from 1.25 to 2.1.0., and hit an issue trying to run a test on specific target. The target is large and results in a max recursion limit exceeded.
I tried hacking on `sys.setrecursionlimit` and found for our use case 1021 was the min that would allow the test to succeed.
We can try breaking that target up, but the app it is testing is kind of a monolith so i don’t know how successful that would be.
Can you make a runtime limit in pants to handle?
This error happens in the pants setup before our pytest is run.
# Workaround
In one of our plugin's `register.py` we added `sys.setrecursionlimit(1021)` and this resolved our problem.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/python/pants/bin/pants_loader.py`
Content:
```
1 # Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 import importlib
5 import locale
6 import os
7 import warnings
8 from textwrap import dedent
9
10
11 class PantsLoader:
12 """Loads and executes entrypoints."""
13
14 ENTRYPOINT_ENV_VAR = "PANTS_ENTRYPOINT"
15 DEFAULT_ENTRYPOINT = "pants.bin.pants_exe:main"
16
17 ENCODING_IGNORE_ENV_VAR = "PANTS_IGNORE_UNRECOGNIZED_ENCODING"
18
19 class InvalidLocaleError(Exception):
20 """Raised when a valid locale can't be found."""
21
22 @staticmethod
23 def setup_warnings():
24 # We want to present warnings to the user, set this up before importing any of our own code,
25 # to ensure all deprecation warnings are seen, including module deprecations.
26 # The "default" action displays a warning for a particular file and line number exactly once.
27 # See https://docs.python.org/3/library/warnings.html#the-warnings-filter for the complete list.
28 #
29 # However, we do turn off deprecation warnings for libraries that Pants uses for which we do
30 # not have a fixed upstream version, typically because the library is no longer maintained.
31 warnings.simplefilter("default", category=DeprecationWarning)
32 # TODO: Eric-Arellano has emailed the author to see if he is willing to accept a PR fixing the
33 # deprecation warnings and to release the fix. If he says yes, remove this once fixed.
34 warnings.filterwarnings("ignore", category=DeprecationWarning, module="ansicolors")
35 # Silence this ubiquitous warning. Several of our 3rd party deps incur this.
36 warnings.filterwarnings(
37 "ignore",
38 category=DeprecationWarning,
39 message="Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated",
40 )
41
42 @classmethod
43 def ensure_locale(cls):
44 # Sanity check for locale, See https://github.com/pantsbuild/pants/issues/2465.
45 # This check is done early to give good feedback to user on how to fix the problem. Other
46 # libraries called by Pants may fail with more obscure errors.
47 encoding = locale.getpreferredencoding()
48 if (
49 encoding.lower() != "utf-8"
50 and os.environ.get(cls.ENCODING_IGNORE_ENV_VAR, None) is None
51 ):
52 raise cls.InvalidLocaleError(
53 dedent(
54 """
55 Your system's preferred encoding is `{}`, but Pants requires `UTF-8`.
56 Specifically, Python's `locale.getpreferredencoding()` must resolve to `UTF-8`.
57
58 Fix it by setting the LC_* and LANG environment settings. Example:
59 LC_ALL=en_US.UTF-8
60 LANG=en_US.UTF-8
61 Or, bypass it by setting the below environment variable.
62 {}=1
63 Note: we cannot guarantee consistent behavior with this bypass enabled.
64 """.format(
65 encoding, cls.ENCODING_IGNORE_ENV_VAR
66 )
67 )
68 )
69
70 @staticmethod
71 def determine_entrypoint(env_var, default):
72 return os.environ.pop(env_var, default)
73
74 @staticmethod
75 def load_and_execute(entrypoint):
76 assert ":" in entrypoint, "ERROR: entrypoint must be of the form `module.path:callable`"
77 module_path, func_name = entrypoint.split(":", 1)
78 module = importlib.import_module(module_path)
79 entrypoint_main = getattr(module, func_name)
80 assert callable(entrypoint_main), "ERROR: entrypoint `{}` is not callable".format(
81 entrypoint
82 )
83 entrypoint_main()
84
85 @classmethod
86 def run(cls):
87 cls.setup_warnings()
88 cls.ensure_locale()
89 entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)
90 cls.load_and_execute(entrypoint)
91
92
93 def main():
94 PantsLoader.run()
95
96
97 if __name__ == "__main__":
98 main()
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/python/pants/bin/pants_loader.py b/src/python/pants/bin/pants_loader.py
--- a/src/python/pants/bin/pants_loader.py
+++ b/src/python/pants/bin/pants_loader.py
@@ -4,6 +4,7 @@
import importlib
import locale
import os
+import sys
import warnings
from textwrap import dedent
@@ -14,6 +15,8 @@
ENTRYPOINT_ENV_VAR = "PANTS_ENTRYPOINT"
DEFAULT_ENTRYPOINT = "pants.bin.pants_exe:main"
+ RECURSION_LIMIT_ENV_VAR = "PANTS_RECURSION_LIMIT"
+
ENCODING_IGNORE_ENV_VAR = "PANTS_IGNORE_UNRECOGNIZED_ENCODING"
class InvalidLocaleError(Exception):
@@ -67,6 +70,10 @@
)
)
+ @classmethod
+ def set_recursion_limit(cls):
+ sys.setrecursionlimit(int(os.environ.get(cls.RECURSION_LIMIT_ENV_VAR, "10000")))
+
@staticmethod
def determine_entrypoint(env_var, default):
return os.environ.pop(env_var, default)
@@ -86,6 +93,7 @@
def run(cls):
cls.setup_warnings()
cls.ensure_locale()
+ cls.set_recursion_limit()
entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)
cls.load_and_execute(entrypoint)
| {"golden_diff": "diff --git a/src/python/pants/bin/pants_loader.py b/src/python/pants/bin/pants_loader.py\n--- a/src/python/pants/bin/pants_loader.py\n+++ b/src/python/pants/bin/pants_loader.py\n@@ -4,6 +4,7 @@\n import importlib\n import locale\n import os\n+import sys\n import warnings\n from textwrap import dedent\n \n@@ -14,6 +15,8 @@\n ENTRYPOINT_ENV_VAR = \"PANTS_ENTRYPOINT\"\n DEFAULT_ENTRYPOINT = \"pants.bin.pants_exe:main\"\n \n+ RECURSION_LIMIT_ENV_VAR = \"PANTS_RECURSION_LIMIT\"\n+\n ENCODING_IGNORE_ENV_VAR = \"PANTS_IGNORE_UNRECOGNIZED_ENCODING\"\n \n class InvalidLocaleError(Exception):\n@@ -67,6 +70,10 @@\n )\n )\n \n+ @classmethod\n+ def set_recursion_limit(cls):\n+ sys.setrecursionlimit(int(os.environ.get(cls.RECURSION_LIMIT_ENV_VAR, \"10000\")))\n+\n @staticmethod\n def determine_entrypoint(env_var, default):\n return os.environ.pop(env_var, default)\n@@ -86,6 +93,7 @@\n def run(cls):\n cls.setup_warnings()\n cls.ensure_locale()\n+ cls.set_recursion_limit()\n entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)\n cls.load_and_execute(entrypoint)\n", "issue": "Target Hitting Recursion Limit During Pants Setup (with workaround)\n# Description of Problem\r\nWe\u2019re in the process of migrating from 1.25 to 2.1.0., and hit an issue trying to run a test on specific target. The target is large and results in a max recursion limit exceeded.\r\n\r\nI tried hacking on `sys.setrecursionlimit` and found for our use case 1021 was the min that would allow the test to succeed.\r\n\r\nWe can try breaking that target up, but the app it is testing is kind of a monolith so i don\u2019t know how successful that would be.\r\n\r\nCan you make a runtime limit in pants to handle?\r\n\r\nThis error happens in the pants setup before our pytest is run.\r\n\r\n# Workaround\r\nIn one of our plugin's `register.py` we added `sys.setrecursionlimit(1021)` and this resolved our problem.\n", "before_files": [{"content": "# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport importlib\nimport locale\nimport os\nimport warnings\nfrom textwrap import dedent\n\n\nclass PantsLoader:\n \"\"\"Loads and executes entrypoints.\"\"\"\n\n ENTRYPOINT_ENV_VAR = \"PANTS_ENTRYPOINT\"\n DEFAULT_ENTRYPOINT = \"pants.bin.pants_exe:main\"\n\n ENCODING_IGNORE_ENV_VAR = \"PANTS_IGNORE_UNRECOGNIZED_ENCODING\"\n\n class InvalidLocaleError(Exception):\n \"\"\"Raised when a valid locale can't be found.\"\"\"\n\n @staticmethod\n def setup_warnings():\n # We want to present warnings to the user, set this up before importing any of our own code,\n # to ensure all deprecation warnings are seen, including module deprecations.\n # The \"default\" action displays a warning for a particular file and line number exactly once.\n # See https://docs.python.org/3/library/warnings.html#the-warnings-filter for the complete list.\n #\n # However, we do turn off deprecation warnings for libraries that Pants uses for which we do\n # not have a fixed upstream version, typically because the library is no longer maintained.\n warnings.simplefilter(\"default\", category=DeprecationWarning)\n # TODO: Eric-Arellano has emailed the author to see if he is willing to accept a PR fixing the\n # deprecation warnings and to release the fix. If he says yes, remove this once fixed.\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, module=\"ansicolors\")\n # Silence this ubiquitous warning. Several of our 3rd party deps incur this.\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated\",\n )\n\n @classmethod\n def ensure_locale(cls):\n # Sanity check for locale, See https://github.com/pantsbuild/pants/issues/2465.\n # This check is done early to give good feedback to user on how to fix the problem. Other\n # libraries called by Pants may fail with more obscure errors.\n encoding = locale.getpreferredencoding()\n if (\n encoding.lower() != \"utf-8\"\n and os.environ.get(cls.ENCODING_IGNORE_ENV_VAR, None) is None\n ):\n raise cls.InvalidLocaleError(\n dedent(\n \"\"\"\n Your system's preferred encoding is `{}`, but Pants requires `UTF-8`.\n Specifically, Python's `locale.getpreferredencoding()` must resolve to `UTF-8`.\n\n Fix it by setting the LC_* and LANG environment settings. Example:\n LC_ALL=en_US.UTF-8\n LANG=en_US.UTF-8\n Or, bypass it by setting the below environment variable.\n {}=1\n Note: we cannot guarantee consistent behavior with this bypass enabled.\n \"\"\".format(\n encoding, cls.ENCODING_IGNORE_ENV_VAR\n )\n )\n )\n\n @staticmethod\n def determine_entrypoint(env_var, default):\n return os.environ.pop(env_var, default)\n\n @staticmethod\n def load_and_execute(entrypoint):\n assert \":\" in entrypoint, \"ERROR: entrypoint must be of the form `module.path:callable`\"\n module_path, func_name = entrypoint.split(\":\", 1)\n module = importlib.import_module(module_path)\n entrypoint_main = getattr(module, func_name)\n assert callable(entrypoint_main), \"ERROR: entrypoint `{}` is not callable\".format(\n entrypoint\n )\n entrypoint_main()\n\n @classmethod\n def run(cls):\n cls.setup_warnings()\n cls.ensure_locale()\n entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)\n cls.load_and_execute(entrypoint)\n\n\ndef main():\n PantsLoader.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/python/pants/bin/pants_loader.py"}], "after_files": [{"content": "# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nimport importlib\nimport locale\nimport os\nimport sys\nimport warnings\nfrom textwrap import dedent\n\n\nclass PantsLoader:\n \"\"\"Loads and executes entrypoints.\"\"\"\n\n ENTRYPOINT_ENV_VAR = \"PANTS_ENTRYPOINT\"\n DEFAULT_ENTRYPOINT = \"pants.bin.pants_exe:main\"\n\n RECURSION_LIMIT_ENV_VAR = \"PANTS_RECURSION_LIMIT\"\n\n ENCODING_IGNORE_ENV_VAR = \"PANTS_IGNORE_UNRECOGNIZED_ENCODING\"\n\n class InvalidLocaleError(Exception):\n \"\"\"Raised when a valid locale can't be found.\"\"\"\n\n @staticmethod\n def setup_warnings():\n # We want to present warnings to the user, set this up before importing any of our own code,\n # to ensure all deprecation warnings are seen, including module deprecations.\n # The \"default\" action displays a warning for a particular file and line number exactly once.\n # See https://docs.python.org/3/library/warnings.html#the-warnings-filter for the complete list.\n #\n # However, we do turn off deprecation warnings for libraries that Pants uses for which we do\n # not have a fixed upstream version, typically because the library is no longer maintained.\n warnings.simplefilter(\"default\", category=DeprecationWarning)\n # TODO: Eric-Arellano has emailed the author to see if he is willing to accept a PR fixing the\n # deprecation warnings and to release the fix. If he says yes, remove this once fixed.\n warnings.filterwarnings(\"ignore\", category=DeprecationWarning, module=\"ansicolors\")\n # Silence this ubiquitous warning. Several of our 3rd party deps incur this.\n warnings.filterwarnings(\n \"ignore\",\n category=DeprecationWarning,\n message=\"Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated\",\n )\n\n @classmethod\n def ensure_locale(cls):\n # Sanity check for locale, See https://github.com/pantsbuild/pants/issues/2465.\n # This check is done early to give good feedback to user on how to fix the problem. Other\n # libraries called by Pants may fail with more obscure errors.\n encoding = locale.getpreferredencoding()\n if (\n encoding.lower() != \"utf-8\"\n and os.environ.get(cls.ENCODING_IGNORE_ENV_VAR, None) is None\n ):\n raise cls.InvalidLocaleError(\n dedent(\n \"\"\"\n Your system's preferred encoding is `{}`, but Pants requires `UTF-8`.\n Specifically, Python's `locale.getpreferredencoding()` must resolve to `UTF-8`.\n\n Fix it by setting the LC_* and LANG environment settings. Example:\n LC_ALL=en_US.UTF-8\n LANG=en_US.UTF-8\n Or, bypass it by setting the below environment variable.\n {}=1\n Note: we cannot guarantee consistent behavior with this bypass enabled.\n \"\"\".format(\n encoding, cls.ENCODING_IGNORE_ENV_VAR\n )\n )\n )\n\n @classmethod\n def set_recursion_limit(cls):\n sys.setrecursionlimit(int(os.environ.get(cls.RECURSION_LIMIT_ENV_VAR, \"10000\")))\n\n @staticmethod\n def determine_entrypoint(env_var, default):\n return os.environ.pop(env_var, default)\n\n @staticmethod\n def load_and_execute(entrypoint):\n assert \":\" in entrypoint, \"ERROR: entrypoint must be of the form `module.path:callable`\"\n module_path, func_name = entrypoint.split(\":\", 1)\n module = importlib.import_module(module_path)\n entrypoint_main = getattr(module, func_name)\n assert callable(entrypoint_main), \"ERROR: entrypoint `{}` is not callable\".format(\n entrypoint\n )\n entrypoint_main()\n\n @classmethod\n def run(cls):\n cls.setup_warnings()\n cls.ensure_locale()\n cls.set_recursion_limit()\n entrypoint = cls.determine_entrypoint(cls.ENTRYPOINT_ENV_VAR, cls.DEFAULT_ENTRYPOINT)\n cls.load_and_execute(entrypoint)\n\n\ndef main():\n PantsLoader.run()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "src/python/pants/bin/pants_loader.py"}]} | 1,495 | 313 |
gh_patches_debug_3638 | rasdani/github-patches | git_diff | ivy-llc__ivy-19452 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rfftn
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py`
Content:
```
1 import ivy
2 from ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back
3 from ivy.func_wrapper import with_unsupported_dtypes
4
5
6 _SWAP_DIRECTION_MAP = {
7 None: "forward",
8 "backward": "forward",
9 "ortho": "ortho",
10 "forward": "backward",
11 }
12
13
14 def _swap_direction(norm):
15 try:
16 return _SWAP_DIRECTION_MAP[norm]
17 except KeyError:
18 raise ValueError(
19 f'Invalid norm value {norm}; should be "backward", "ortho" or "forward".'
20 ) from None
21
22
23 @to_ivy_arrays_and_back
24 def ifft(a, n=None, axis=-1, norm=None):
25 a = ivy.array(a, dtype=ivy.complex128)
26 if norm is None:
27 norm = "backward"
28 return ivy.ifft(a, axis, norm=norm, n=n)
29
30
31 @to_ivy_arrays_and_back
32 @with_unsupported_dtypes({"1.25.1 and below": ("float16",)}, "numpy")
33 def ifftshift(x, axes=None):
34 x = ivy.asarray(x)
35
36 if axes is None:
37 axes = tuple(range(x.ndim))
38 shift = [-(dim // 2) for dim in x.shape]
39 elif isinstance(
40 axes,
41 (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),
42 ):
43 shift = -(x.shape[axes] // 2)
44 else:
45 shift = [-(x.shape[ax] // 2) for ax in axes]
46
47 roll = ivy.roll(x, shift, axis=axes)
48
49 return roll
50
51
52 @to_ivy_arrays_and_back
53 def fft(a, n=None, axis=-1, norm=None):
54 return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)
55
56
57 @to_ivy_arrays_and_back
58 @with_unsupported_dtypes({"1.25.1 and below": ("float16",)}, "numpy")
59 def fftshift(x, axes=None):
60 x = ivy.asarray(x)
61
62 if axes is None:
63 axes = tuple(range(x.ndim))
64 shift = [(dim // 2) for dim in x.shape]
65 elif isinstance(
66 axes,
67 (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),
68 ):
69 shift = x.shape[axes] // 2
70 else:
71 shift = [(x.shape[ax] // 2) for ax in axes]
72
73 roll = ivy.roll(x, shift, axis=axes)
74
75 return roll
76
77
78 @with_unsupported_dtypes({"1.25.1 and below": ("float16",)}, "numpy")
79 @to_ivy_arrays_and_back
80 def rfft(a, n=None, axis=-1, norm=None):
81 if norm is None:
82 norm = "backward"
83 a = ivy.array(a, dtype=ivy.float64)
84 return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)
85
86
87 @with_unsupported_dtypes({"1.25.1 and below": ("float16",)}, "numpy")
88 @to_ivy_arrays_and_back
89 def ihfft(a, n=None, axis=-1, norm=None):
90 if n is None:
91 n = a.shape[axis]
92 norm = _swap_direction(norm)
93 output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)
94 return output
95
96
97 @with_unsupported_dtypes({"1.25.1 and below": ("int",)}, "numpy")
98 @to_ivy_arrays_and_back
99 def fftfreq(n, d=1.0):
100 if not isinstance(
101 n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))
102 ):
103 raise ValueError("n should be an integer")
104
105 N = (n - 1) // 2 + 1
106 val = 1.0 / (n * d)
107 results = ivy.empty(tuple([n]), dtype=int)
108
109 p1 = ivy.arange(0, N, dtype=int)
110 results[:N] = p1
111 p2 = ivy.arange(-(n // 2), 0, dtype=int)
112 results[N:] = p2
113
114 return results * val
115
116
117 @to_ivy_arrays_and_back
118 def rfftfreq(n, d=1.0):
119 if not isinstance(
120 n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))
121 ):
122 raise ValueError("n should be an integer")
123
124 val = 1.0 / (n * d)
125 N = n // 2 + 1
126 results = ivy.arange(0, N, dtype=int)
127 return results * val
128
129
130 @with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
131 @to_ivy_arrays_and_back
132 def ifftn(a, s=None, axes=None, norm=None):
133 a = ivy.asarray(a, dtype=ivy.complex128)
134 a = ivy.ifftn(a, s=s, axes=axes, norm=norm)
135 return a
136
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py
--- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py
+++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py
@@ -133,3 +133,10 @@
a = ivy.asarray(a, dtype=ivy.complex128)
a = ivy.ifftn(a, s=s, axes=axes, norm=norm)
return a
+
+
+@with_unsupported_dtypes({"1.24.3 and below": ("float16",)}, "numpy")
+@to_ivy_arrays_and_back
+def rfftn(a, s=None, axes=None, norm=None):
+ a = ivy.asarray(a, dtype=ivy.complex128)
+ return ivy.rfftn(a, s=s, axes=axes, norm=norm)
| {"golden_diff": "diff --git a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n--- a/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n+++ b/ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py\n@@ -133,3 +133,10 @@\n a = ivy.asarray(a, dtype=ivy.complex128)\n a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n return a\n+\n+\n+@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n+@to_ivy_arrays_and_back\n+def rfftn(a, s=None, axes=None, norm=None):\n+ a = ivy.asarray(a, dtype=ivy.complex128)\n+ return ivy.rfftn(a, s=s, axes=axes, norm=norm)\n", "issue": "rfftn\n\n", "before_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n_SWAP_DIRECTION_MAP = {\n None: \"forward\",\n \"backward\": \"forward\",\n \"ortho\": \"ortho\",\n \"forward\": \"backward\",\n}\n\n\ndef _swap_direction(norm):\n try:\n return _SWAP_DIRECTION_MAP[norm]\n except KeyError:\n raise ValueError(\n f'Invalid norm value {norm}; should be \"backward\", \"ortho\" or \"forward\".'\n ) from None\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n a = ivy.array(a, dtype=ivy.complex128)\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"float16\",)}, \"numpy\")\ndef ifftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [-(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = -(x.shape[axes] // 2)\n else:\n shift = [-(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"float16\",)}, \"numpy\")\ndef fftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = x.shape[axes] // 2\n else:\n shift = [(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n a = ivy.array(a, dtype=ivy.float64)\n return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n\n\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ihfft(a, n=None, axis=-1, norm=None):\n if n is None:\n n = a.shape[axis]\n norm = _swap_direction(norm)\n output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)\n return output\n\n\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"int\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef fftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n N = (n - 1) // 2 + 1\n val = 1.0 / (n * d)\n results = ivy.empty(tuple([n]), dtype=int)\n\n p1 = ivy.arange(0, N, dtype=int)\n results[:N] = p1\n p2 = ivy.arange(-(n // 2), 0, dtype=int)\n results[N:] = p2\n\n return results * val\n\n\n@to_ivy_arrays_and_back\ndef rfftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n val = 1.0 / (n * d)\n N = n // 2 + 1\n results = ivy.arange(0, N, dtype=int)\n return results * val\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ifftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n return a\n", "path": "ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py"}], "after_files": [{"content": "import ivy\nfrom ivy.functional.frontends.numpy.func_wrapper import to_ivy_arrays_and_back\nfrom ivy.func_wrapper import with_unsupported_dtypes\n\n\n_SWAP_DIRECTION_MAP = {\n None: \"forward\",\n \"backward\": \"forward\",\n \"ortho\": \"ortho\",\n \"forward\": \"backward\",\n}\n\n\ndef _swap_direction(norm):\n try:\n return _SWAP_DIRECTION_MAP[norm]\n except KeyError:\n raise ValueError(\n f'Invalid norm value {norm}; should be \"backward\", \"ortho\" or \"forward\".'\n ) from None\n\n\n@to_ivy_arrays_and_back\ndef ifft(a, n=None, axis=-1, norm=None):\n a = ivy.array(a, dtype=ivy.complex128)\n if norm is None:\n norm = \"backward\"\n return ivy.ifft(a, axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"float16\",)}, \"numpy\")\ndef ifftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [-(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = -(x.shape[axes] // 2)\n else:\n shift = [-(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@to_ivy_arrays_and_back\ndef fft(a, n=None, axis=-1, norm=None):\n return ivy.fft(ivy.astype(a, ivy.complex128), axis, norm=norm, n=n)\n\n\n@to_ivy_arrays_and_back\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"float16\",)}, \"numpy\")\ndef fftshift(x, axes=None):\n x = ivy.asarray(x)\n\n if axes is None:\n axes = tuple(range(x.ndim))\n shift = [(dim // 2) for dim in x.shape]\n elif isinstance(\n axes,\n (int, type(ivy.uint8), type(ivy.uint16), type(ivy.uint32), type(ivy.uint64)),\n ):\n shift = x.shape[axes] // 2\n else:\n shift = [(x.shape[ax] // 2) for ax in axes]\n\n roll = ivy.roll(x, shift, axis=axes)\n\n return roll\n\n\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfft(a, n=None, axis=-1, norm=None):\n if norm is None:\n norm = \"backward\"\n a = ivy.array(a, dtype=ivy.float64)\n return ivy.dft(a, axis=axis, inverse=False, onesided=True, dft_length=n, norm=norm)\n\n\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ihfft(a, n=None, axis=-1, norm=None):\n if n is None:\n n = a.shape[axis]\n norm = _swap_direction(norm)\n output = ivy.conj(rfft(a, n, axis, norm=norm).ivy_array)\n return output\n\n\n@with_unsupported_dtypes({\"1.25.1 and below\": (\"int\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef fftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n N = (n - 1) // 2 + 1\n val = 1.0 / (n * d)\n results = ivy.empty(tuple([n]), dtype=int)\n\n p1 = ivy.arange(0, N, dtype=int)\n results[:N] = p1\n p2 = ivy.arange(-(n // 2), 0, dtype=int)\n results[N:] = p2\n\n return results * val\n\n\n@to_ivy_arrays_and_back\ndef rfftfreq(n, d=1.0):\n if not isinstance(\n n, (int, type(ivy.int8), type(ivy.int16), type(ivy.int32), type(ivy.int64))\n ):\n raise ValueError(\"n should be an integer\")\n\n val = 1.0 / (n * d)\n N = n // 2 + 1\n results = ivy.arange(0, N, dtype=int)\n return results * val\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef ifftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n a = ivy.ifftn(a, s=s, axes=axes, norm=norm)\n return a\n\n\n@with_unsupported_dtypes({\"1.24.3 and below\": (\"float16\",)}, \"numpy\")\n@to_ivy_arrays_and_back\ndef rfftn(a, s=None, axes=None, norm=None):\n a = ivy.asarray(a, dtype=ivy.complex128)\n return ivy.rfftn(a, s=s, axes=axes, norm=norm)\n", "path": "ivy/functional/frontends/numpy/fft/discrete_fourier_transform.py"}]} | 1,781 | 225 |
gh_patches_debug_29782 | rasdani/github-patches | git_diff | cupy__cupy-7693 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix `cupyx.scipy.sparse.csgraph` module to work with the latest RAPIDS cuGraph
Follow the API change introduced in RAPIDS 22.12.
https://github.com/cupy/cupy/pull/7647#discussion_r1244820097
cc/ @pentschev
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupyx/scipy/sparse/csgraph/_traversal.py`
Content:
```
1 import cupy
2 import cupyx.scipy.sparse
3 try:
4 import pylibcugraph
5 pylibcugraph_available = True
6 except ModuleNotFoundError:
7 pylibcugraph_available = False
8
9
10 def connected_components(csgraph, directed=True, connection='weak',
11 return_labels=True):
12 """Analyzes the connected components of a sparse graph
13
14 Args:
15 csgraph (cupy.ndarray of cupyx.scipy.sparse.csr_matrix): The adjacency
16 matrix representing connectivity among nodes.
17 directed (bool): If ``True``, it operates on a directed graph. If
18 ``False``, it operates on an undirected graph.
19 connection (str): ``'weak'`` or ``'strong'``. For directed graphs, the
20 type of connection to use. Nodes i and j are "strongly" connected
21 only when a path exists both from i to j and from j to i.
22 If ``directed`` is ``False``, this argument is ignored.
23 return_labels (bool): If ``True``, it returns the labels for each of
24 the connected components.
25
26 Returns:
27 tuple of int and cupy.ndarray, or int:
28 If ``return_labels`` == ``True``, returns a tuple ``(n, labels)``,
29 where ``n`` is the number of connected components and ``labels`` is
30 labels of each connected components. Otherwise, returns ``n``.
31
32 .. seealso:: :func:`scipy.sparse.csgraph.connected_components`
33 """
34 if not pylibcugraph_available:
35 raise RuntimeError('pylibcugraph is not available')
36
37 connection = connection.lower()
38 if connection not in ('weak', 'strong'):
39 raise ValueError("connection must be 'weak' or 'strong'")
40
41 if not directed:
42 connection = 'weak'
43
44 if csgraph.ndim != 2:
45 raise ValueError('graph should have two dimensions')
46
47 if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):
48 csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)
49 m, m1 = csgraph.shape
50 if m != m1:
51 raise ValueError('graph should be a square array')
52 if csgraph.nnz == 0:
53 return m, cupy.arange(m, dtype=csgraph.indices.dtype)
54 labels = cupy.empty(m, dtype=csgraph.indices.dtype)
55
56 if connection == 'strong':
57 pylibcugraph.strongly_connected_components(
58 offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
59 num_verts=m, num_edges=csgraph.nnz, labels=labels)
60 else:
61 csgraph += csgraph.T
62 if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):
63 csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)
64 pylibcugraph.weakly_connected_components(
65 offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
66 num_verts=m, num_edges=csgraph.nnz, labels=labels)
67 # Note: In the case of weak connection, cuGraph creates labels with a
68 # start number of 1, so decrement the label number.
69 labels -= 1
70
71 count = cupy.zeros((1,), dtype=csgraph.indices.dtype)
72 root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)
73 _cupy_count_components(labels, count, root_labels, size=m)
74 n = int(count[0])
75 if not return_labels:
76 return n
77 _cupy_adjust_labels(n, cupy.sort(root_labels[:n]), labels)
78 return n, labels
79
80
81 _cupy_count_components = cupy.ElementwiseKernel(
82 '',
83 'raw I labels, raw int32 count, raw int32 root_labels',
84 '''
85 int j = i;
86 while (j != labels[j]) { j = labels[j]; }
87 if (j != i) {
88 labels[i] = j;
89 } else {
90 int k = atomicAdd(&count[0], 1);
91 root_labels[k] = i;
92 }
93 ''',
94 '_cupy_count_components')
95
96
97 _cupy_adjust_labels = cupy.ElementwiseKernel(
98 'int32 n_root_labels, raw I root_labels',
99 'I labels',
100 '''
101 int cur_label = labels;
102 int j_min = 0;
103 int j_max = n_root_labels - 1;
104 int j = (j_min + j_max) / 2;
105 while (j_min < j_max) {
106 if (cur_label == root_labels[j]) break;
107 if (cur_label < root_labels[j]) {
108 j_max = j - 1;
109 } else {
110 j_min = j + 1;
111 }
112 j = (j_min + j_max) / 2;
113 }
114 labels = j;
115 ''',
116 '_cupy_adjust_labels')
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupyx/scipy/sparse/csgraph/_traversal.py b/cupyx/scipy/sparse/csgraph/_traversal.py
--- a/cupyx/scipy/sparse/csgraph/_traversal.py
+++ b/cupyx/scipy/sparse/csgraph/_traversal.py
@@ -51,9 +51,9 @@
raise ValueError('graph should be a square array')
if csgraph.nnz == 0:
return m, cupy.arange(m, dtype=csgraph.indices.dtype)
- labels = cupy.empty(m, dtype=csgraph.indices.dtype)
if connection == 'strong':
+ labels = cupy.empty(m, dtype=csgraph.indices.dtype)
pylibcugraph.strongly_connected_components(
offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
num_verts=m, num_edges=csgraph.nnz, labels=labels)
@@ -61,12 +61,15 @@
csgraph += csgraph.T
if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):
csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)
- pylibcugraph.weakly_connected_components(
- offsets=csgraph.indptr, indices=csgraph.indices, weights=None,
- num_verts=m, num_edges=csgraph.nnz, labels=labels)
- # Note: In the case of weak connection, cuGraph creates labels with a
- # start number of 1, so decrement the label number.
- labels -= 1
+ _, labels = pylibcugraph.weakly_connected_components(
+ resource_handle=None,
+ graph=None,
+ indices=csgraph.indices,
+ offsets=csgraph.indptr,
+ weights=None,
+ labels=None,
+ do_expensive_check=False,
+ )
count = cupy.zeros((1,), dtype=csgraph.indices.dtype)
root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)
| {"golden_diff": "diff --git a/cupyx/scipy/sparse/csgraph/_traversal.py b/cupyx/scipy/sparse/csgraph/_traversal.py\n--- a/cupyx/scipy/sparse/csgraph/_traversal.py\n+++ b/cupyx/scipy/sparse/csgraph/_traversal.py\n@@ -51,9 +51,9 @@\n raise ValueError('graph should be a square array')\n if csgraph.nnz == 0:\n return m, cupy.arange(m, dtype=csgraph.indices.dtype)\n- labels = cupy.empty(m, dtype=csgraph.indices.dtype)\n \n if connection == 'strong':\n+ labels = cupy.empty(m, dtype=csgraph.indices.dtype)\n pylibcugraph.strongly_connected_components(\n offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n num_verts=m, num_edges=csgraph.nnz, labels=labels)\n@@ -61,12 +61,15 @@\n csgraph += csgraph.T\n if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):\n csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)\n- pylibcugraph.weakly_connected_components(\n- offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n- num_verts=m, num_edges=csgraph.nnz, labels=labels)\n- # Note: In the case of weak connection, cuGraph creates labels with a\n- # start number of 1, so decrement the label number.\n- labels -= 1\n+ _, labels = pylibcugraph.weakly_connected_components(\n+ resource_handle=None,\n+ graph=None,\n+ indices=csgraph.indices,\n+ offsets=csgraph.indptr,\n+ weights=None,\n+ labels=None,\n+ do_expensive_check=False,\n+ )\n \n count = cupy.zeros((1,), dtype=csgraph.indices.dtype)\n root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)\n", "issue": "Fix `cupyx.scipy.sparse.csgraph` module to work with the latest RAPIDS cuGraph\nFollow the API change introduced in RAPIDS 22.12.\r\nhttps://github.com/cupy/cupy/pull/7647#discussion_r1244820097\r\n\r\ncc/ @pentschev \n", "before_files": [{"content": "import cupy\nimport cupyx.scipy.sparse\ntry:\n import pylibcugraph\n pylibcugraph_available = True\nexcept ModuleNotFoundError:\n pylibcugraph_available = False\n\n\ndef connected_components(csgraph, directed=True, connection='weak',\n return_labels=True):\n \"\"\"Analyzes the connected components of a sparse graph\n\n Args:\n csgraph (cupy.ndarray of cupyx.scipy.sparse.csr_matrix): The adjacency\n matrix representing connectivity among nodes.\n directed (bool): If ``True``, it operates on a directed graph. If\n ``False``, it operates on an undirected graph.\n connection (str): ``'weak'`` or ``'strong'``. For directed graphs, the\n type of connection to use. Nodes i and j are \"strongly\" connected\n only when a path exists both from i to j and from j to i.\n If ``directed`` is ``False``, this argument is ignored.\n return_labels (bool): If ``True``, it returns the labels for each of\n the connected components.\n\n Returns:\n tuple of int and cupy.ndarray, or int:\n If ``return_labels`` == ``True``, returns a tuple ``(n, labels)``,\n where ``n`` is the number of connected components and ``labels`` is\n labels of each connected components. Otherwise, returns ``n``.\n\n .. seealso:: :func:`scipy.sparse.csgraph.connected_components`\n \"\"\"\n if not pylibcugraph_available:\n raise RuntimeError('pylibcugraph is not available')\n\n connection = connection.lower()\n if connection not in ('weak', 'strong'):\n raise ValueError(\"connection must be 'weak' or 'strong'\")\n\n if not directed:\n connection = 'weak'\n\n if csgraph.ndim != 2:\n raise ValueError('graph should have two dimensions')\n\n if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):\n csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)\n m, m1 = csgraph.shape\n if m != m1:\n raise ValueError('graph should be a square array')\n if csgraph.nnz == 0:\n return m, cupy.arange(m, dtype=csgraph.indices.dtype)\n labels = cupy.empty(m, dtype=csgraph.indices.dtype)\n\n if connection == 'strong':\n pylibcugraph.strongly_connected_components(\n offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n num_verts=m, num_edges=csgraph.nnz, labels=labels)\n else:\n csgraph += csgraph.T\n if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):\n csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)\n pylibcugraph.weakly_connected_components(\n offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n num_verts=m, num_edges=csgraph.nnz, labels=labels)\n # Note: In the case of weak connection, cuGraph creates labels with a\n # start number of 1, so decrement the label number.\n labels -= 1\n\n count = cupy.zeros((1,), dtype=csgraph.indices.dtype)\n root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)\n _cupy_count_components(labels, count, root_labels, size=m)\n n = int(count[0])\n if not return_labels:\n return n\n _cupy_adjust_labels(n, cupy.sort(root_labels[:n]), labels)\n return n, labels\n\n\n_cupy_count_components = cupy.ElementwiseKernel(\n '',\n 'raw I labels, raw int32 count, raw int32 root_labels',\n '''\n int j = i;\n while (j != labels[j]) { j = labels[j]; }\n if (j != i) {\n labels[i] = j;\n } else {\n int k = atomicAdd(&count[0], 1);\n root_labels[k] = i;\n }\n ''',\n '_cupy_count_components')\n\n\n_cupy_adjust_labels = cupy.ElementwiseKernel(\n 'int32 n_root_labels, raw I root_labels',\n 'I labels',\n '''\n int cur_label = labels;\n int j_min = 0;\n int j_max = n_root_labels - 1;\n int j = (j_min + j_max) / 2;\n while (j_min < j_max) {\n if (cur_label == root_labels[j]) break;\n if (cur_label < root_labels[j]) {\n j_max = j - 1;\n } else {\n j_min = j + 1;\n }\n j = (j_min + j_max) / 2;\n }\n labels = j;\n ''',\n '_cupy_adjust_labels')\n", "path": "cupyx/scipy/sparse/csgraph/_traversal.py"}], "after_files": [{"content": "import cupy\nimport cupyx.scipy.sparse\ntry:\n import pylibcugraph\n pylibcugraph_available = True\nexcept ModuleNotFoundError:\n pylibcugraph_available = False\n\n\ndef connected_components(csgraph, directed=True, connection='weak',\n return_labels=True):\n \"\"\"Analyzes the connected components of a sparse graph\n\n Args:\n csgraph (cupy.ndarray of cupyx.scipy.sparse.csr_matrix): The adjacency\n matrix representing connectivity among nodes.\n directed (bool): If ``True``, it operates on a directed graph. If\n ``False``, it operates on an undirected graph.\n connection (str): ``'weak'`` or ``'strong'``. For directed graphs, the\n type of connection to use. Nodes i and j are \"strongly\" connected\n only when a path exists both from i to j and from j to i.\n If ``directed`` is ``False``, this argument is ignored.\n return_labels (bool): If ``True``, it returns the labels for each of\n the connected components.\n\n Returns:\n tuple of int and cupy.ndarray, or int:\n If ``return_labels`` == ``True``, returns a tuple ``(n, labels)``,\n where ``n`` is the number of connected components and ``labels`` is\n labels of each connected components. Otherwise, returns ``n``.\n\n .. seealso:: :func:`scipy.sparse.csgraph.connected_components`\n \"\"\"\n if not pylibcugraph_available:\n raise RuntimeError('pylibcugraph is not available')\n\n connection = connection.lower()\n if connection not in ('weak', 'strong'):\n raise ValueError(\"connection must be 'weak' or 'strong'\")\n\n if not directed:\n connection = 'weak'\n\n if csgraph.ndim != 2:\n raise ValueError('graph should have two dimensions')\n\n if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):\n csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)\n m, m1 = csgraph.shape\n if m != m1:\n raise ValueError('graph should be a square array')\n if csgraph.nnz == 0:\n return m, cupy.arange(m, dtype=csgraph.indices.dtype)\n\n if connection == 'strong':\n labels = cupy.empty(m, dtype=csgraph.indices.dtype)\n pylibcugraph.strongly_connected_components(\n offsets=csgraph.indptr, indices=csgraph.indices, weights=None,\n num_verts=m, num_edges=csgraph.nnz, labels=labels)\n else:\n csgraph += csgraph.T\n if not cupyx.scipy.sparse.isspmatrix_csr(csgraph):\n csgraph = cupyx.scipy.sparse.csr_matrix(csgraph)\n _, labels = pylibcugraph.weakly_connected_components(\n resource_handle=None,\n graph=None,\n indices=csgraph.indices,\n offsets=csgraph.indptr,\n weights=None,\n labels=None,\n do_expensive_check=False,\n )\n\n count = cupy.zeros((1,), dtype=csgraph.indices.dtype)\n root_labels = cupy.empty((m,), dtype=csgraph.indices.dtype)\n _cupy_count_components(labels, count, root_labels, size=m)\n n = int(count[0])\n if not return_labels:\n return n\n _cupy_adjust_labels(n, cupy.sort(root_labels[:n]), labels)\n return n, labels\n\n\n_cupy_count_components = cupy.ElementwiseKernel(\n '',\n 'raw I labels, raw int32 count, raw int32 root_labels',\n '''\n int j = i;\n while (j != labels[j]) { j = labels[j]; }\n if (j != i) {\n labels[i] = j;\n } else {\n int k = atomicAdd(&count[0], 1);\n root_labels[k] = i;\n }\n ''',\n '_cupy_count_components')\n\n\n_cupy_adjust_labels = cupy.ElementwiseKernel(\n 'int32 n_root_labels, raw I root_labels',\n 'I labels',\n '''\n int cur_label = labels;\n int j_min = 0;\n int j_max = n_root_labels - 1;\n int j = (j_min + j_max) / 2;\n while (j_min < j_max) {\n if (cur_label == root_labels[j]) break;\n if (cur_label < root_labels[j]) {\n j_max = j - 1;\n } else {\n j_min = j + 1;\n }\n j = (j_min + j_max) / 2;\n }\n labels = j;\n ''',\n '_cupy_adjust_labels')\n", "path": "cupyx/scipy/sparse/csgraph/_traversal.py"}]} | 1,634 | 439 |
gh_patches_debug_2892 | rasdani/github-patches | git_diff | joke2k__faker-435 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Published packages include docs/ as a module
The published wheel and sdist on PyPI for at least version 0.7.5 include `docs/__init__.py` as a top-level module in addition to `faker`. This conflicts with some other packages we use (PyICU) and seems like bad package hygiene, especially since the `docs` dir in this repository is definitely not a module. My guess is that a `__init__.py` made it in there on the maintainer's machine before running `setup.py` and it was erroneously discovered as a module.
We're going to republish the package to our own internal repository, but I think it would help the community to `git clean` as necessary and re-publish a new version, and consider adding necessary exclusions to the `setup.py` or `MANIFEST.in`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # coding=utf-8
3
4 import os
5 import io
6
7 from setuptools import setup, find_packages
8
9 here = os.path.abspath(os.path.dirname(__file__))
10 README = io.open(os.path.join(here, 'README.rst'), encoding="utf8").read()
11
12
13 version = '0.7.5'
14
15 # this module can be zip-safe if the zipimporter implements iter_modules or if
16 # pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.
17 try:
18 import pkgutil
19 import zipimport
20 zip_safe = hasattr(zipimport.zipimporter, "iter_modules") or \
21 zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()
22 except (ImportError, AttributeError):
23 zip_safe = False
24
25 setup(
26 name='Faker',
27 version=version,
28 description="Faker is a Python package that generates fake data for you.",
29 long_description=README,
30 entry_points={
31 'console_scripts': ['faker=faker.cli:execute_from_command_line'],
32 },
33 classifiers=[
34 # See https://pypi.python.org/pypi?%3Aaction=list_classifiers
35 'Development Status :: 3 - Alpha',
36 'Environment :: Console',
37 'Intended Audience :: Developers',
38 'Programming Language :: Python',
39 'Programming Language :: Python :: 2',
40 'Programming Language :: Python :: 2.7',
41 'Programming Language :: Python :: 3',
42 'Programming Language :: Python :: 3.4',
43 'Programming Language :: Python :: 3.5',
44 'Topic :: Software Development :: Libraries :: Python Modules',
45 'Topic :: Software Development :: Testing',
46 'Topic :: Utilities',
47 'License :: OSI Approved :: MIT License'
48 ],
49 keywords='faker fixtures data test mock generator',
50 author='joke2k',
51 author_email='[email protected]',
52 url='https://github.com/joke2k/faker',
53 license='MIT License',
54 packages=find_packages(),
55 platforms=["any"],
56 test_suite='faker.tests',
57 zip_safe=zip_safe,
58 install_requires=[
59 "python-dateutil>=2.4",
60 "six",
61 ],
62 extras_require={
63 ':python_version=="2.7"': [
64 'ipaddress',
65 ],
66 ':python_version=="3.0"': [
67 'importlib',
68 ],
69 ':python_version=="3.2"': [
70 'ipaddress',
71 ],
72 }
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -51,7 +51,7 @@
author_email='[email protected]',
url='https://github.com/joke2k/faker',
license='MIT License',
- packages=find_packages(),
+ packages=find_packages(exclude=("docs",)),
platforms=["any"],
test_suite='faker.tests',
zip_safe=zip_safe,
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -51,7 +51,7 @@\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n- packages=find_packages(),\n+ packages=find_packages(exclude=(\"docs\",)),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n", "issue": "Published packages include docs/ as a module\nThe published wheel and sdist on PyPI for at least version 0.7.5 include `docs/__init__.py` as a top-level module in addition to `faker`. This conflicts with some other packages we use (PyICU) and seems like bad package hygiene, especially since the `docs` dir in this repository is definitely not a module. My guess is that a `__init__.py` made it in there on the maintainer's machine before running `setup.py` and it was erroneously discovered as a module.\r\n\r\nWe're going to republish the package to our own internal repository, but I think it would help the community to `git clean` as necessary and re-publish a new version, and consider adding necessary exclusions to the `setup.py` or `MANIFEST.in`.\n", "before_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.7.5'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six\",\n ],\n extras_require={\n ':python_version==\"2.7\"': [\n 'ipaddress',\n ],\n ':python_version==\"3.0\"': [\n 'importlib',\n ],\n ':python_version==\"3.2\"': [\n 'ipaddress',\n ],\n }\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# coding=utf-8\n\nimport os\nimport io\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nREADME = io.open(os.path.join(here, 'README.rst'), encoding=\"utf8\").read()\n\n\nversion = '0.7.5'\n\n# this module can be zip-safe if the zipimporter implements iter_modules or if\n# pkgutil.iter_importer_modules has registered a dispatch for the zipimporter.\ntry:\n import pkgutil\n import zipimport\n zip_safe = hasattr(zipimport.zipimporter, \"iter_modules\") or \\\n zipimport.zipimporter in pkgutil.iter_importer_modules.registry.keys()\nexcept (ImportError, AttributeError):\n zip_safe = False\n\nsetup(\n name='Faker',\n version=version,\n description=\"Faker is a Python package that generates fake data for you.\",\n long_description=README,\n entry_points={\n 'console_scripts': ['faker=faker.cli:execute_from_command_line'],\n },\n classifiers=[\n # See https://pypi.python.org/pypi?%3Aaction=list_classifiers\n 'Development Status :: 3 - Alpha',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n 'Topic :: Software Development :: Testing',\n 'Topic :: Utilities',\n 'License :: OSI Approved :: MIT License'\n ],\n keywords='faker fixtures data test mock generator',\n author='joke2k',\n author_email='[email protected]',\n url='https://github.com/joke2k/faker',\n license='MIT License',\n packages=find_packages(exclude=(\"docs\",)),\n platforms=[\"any\"],\n test_suite='faker.tests',\n zip_safe=zip_safe,\n install_requires=[\n \"python-dateutil>=2.4\",\n \"six\",\n ],\n extras_require={\n ':python_version==\"2.7\"': [\n 'ipaddress',\n ],\n ':python_version==\"3.0\"': [\n 'importlib',\n ],\n ':python_version==\"3.2\"': [\n 'ipaddress',\n ],\n }\n)\n", "path": "setup.py"}]} | 1,098 | 99 |
gh_patches_debug_13673 | rasdani/github-patches | git_diff | meltano__meltano-6779 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug: exit code null on snowplow telemetry
In investigating https://github.com/meltano/internal-data/issues/26 I saw a large increase in bad events. Looks like all of the ones from 2.6.0 and 2.5.0 are from:
`$.exit_code: null found, integer expected`
And I dove in on each one and it's from `add` and `discover` events.
queried using:
```sql
select *
from "RAW"."SNOWPLOW"."EVENTS_BAD"
where date_trunc('week', uploaded_at) > '2022-08-22'
and jsontext ilike '%2.5.0%';
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/meltano/core/tracking/schemas.py`
Content:
```
1 """Meltano Iglu schemas metadata & utilities."""
2
3 from __future__ import annotations
4
5 from dataclasses import dataclass
6
7 DEFAULT_VENDOR = "com.meltano"
8
9
10 @dataclass
11 class IgluSchema:
12 """Dataclass to store the name, version, vendor, and URL for an Iglu schema."""
13
14 name: str
15 version: str
16 vendor: str = DEFAULT_VENDOR
17
18 @property
19 def url(self) -> str:
20 """Construct an iglu schema URL.
21
22 Returns:
23 The URL to the schema.
24 """
25 return f"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}"
26
27
28 CliContextSchema = IgluSchema("cli_context", "1-1-0")
29 CliEventSchema = IgluSchema("cli_event", "1-0-1")
30 BlockEventSchema = IgluSchema("block_event", "1-0-0")
31 EnvironmentContextSchema = IgluSchema("environment_context", "1-0-0")
32 ExceptionContextSchema = IgluSchema("exception_context", "1-0-0")
33 ExitEventSchema = IgluSchema("exit_event", "1-0-0")
34 PluginsContextSchema = IgluSchema("plugins_context", "1-0-0")
35 ProjectContextSchema = IgluSchema("project_context", "1-1-0")
36 TelemetryStateChangeEventSchema = IgluSchema("telemetry_state_change_event", "1-0-0")
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/meltano/core/tracking/schemas.py b/src/meltano/core/tracking/schemas.py
--- a/src/meltano/core/tracking/schemas.py
+++ b/src/meltano/core/tracking/schemas.py
@@ -30,7 +30,7 @@
BlockEventSchema = IgluSchema("block_event", "1-0-0")
EnvironmentContextSchema = IgluSchema("environment_context", "1-0-0")
ExceptionContextSchema = IgluSchema("exception_context", "1-0-0")
-ExitEventSchema = IgluSchema("exit_event", "1-0-0")
+ExitEventSchema = IgluSchema("exit_event", "1-0-1")
PluginsContextSchema = IgluSchema("plugins_context", "1-0-0")
ProjectContextSchema = IgluSchema("project_context", "1-1-0")
TelemetryStateChangeEventSchema = IgluSchema("telemetry_state_change_event", "1-0-0")
| {"golden_diff": "diff --git a/src/meltano/core/tracking/schemas.py b/src/meltano/core/tracking/schemas.py\n--- a/src/meltano/core/tracking/schemas.py\n+++ b/src/meltano/core/tracking/schemas.py\n@@ -30,7 +30,7 @@\n BlockEventSchema = IgluSchema(\"block_event\", \"1-0-0\")\n EnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-0-0\")\n ExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\n-ExitEventSchema = IgluSchema(\"exit_event\", \"1-0-0\")\n+ExitEventSchema = IgluSchema(\"exit_event\", \"1-0-1\")\n PluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\n ProjectContextSchema = IgluSchema(\"project_context\", \"1-1-0\")\n TelemetryStateChangeEventSchema = IgluSchema(\"telemetry_state_change_event\", \"1-0-0\")\n", "issue": "bug: exit code null on snowplow telemetry\nIn investigating https://github.com/meltano/internal-data/issues/26 I saw a large increase in bad events. Looks like all of the ones from 2.6.0 and 2.5.0 are from:\r\n\r\n`$.exit_code: null found, integer expected`\r\n\r\nAnd I dove in on each one and it's from `add` and `discover` events.\r\n\r\nqueried using:\r\n\r\n```sql\r\nselect *\r\nfrom \"RAW\".\"SNOWPLOW\".\"EVENTS_BAD\"\r\nwhere date_trunc('week', uploaded_at) > '2022-08-22'\r\nand jsontext ilike '%2.5.0%';\r\n```\n", "before_files": [{"content": "\"\"\"Meltano Iglu schemas metadata & utilities.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nDEFAULT_VENDOR = \"com.meltano\"\n\n\n@dataclass\nclass IgluSchema:\n \"\"\"Dataclass to store the name, version, vendor, and URL for an Iglu schema.\"\"\"\n\n name: str\n version: str\n vendor: str = DEFAULT_VENDOR\n\n @property\n def url(self) -> str:\n \"\"\"Construct an iglu schema URL.\n\n Returns:\n The URL to the schema.\n \"\"\"\n return f\"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}\"\n\n\nCliContextSchema = IgluSchema(\"cli_context\", \"1-1-0\")\nCliEventSchema = IgluSchema(\"cli_event\", \"1-0-1\")\nBlockEventSchema = IgluSchema(\"block_event\", \"1-0-0\")\nEnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-0-0\")\nExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\nExitEventSchema = IgluSchema(\"exit_event\", \"1-0-0\")\nPluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\nProjectContextSchema = IgluSchema(\"project_context\", \"1-1-0\")\nTelemetryStateChangeEventSchema = IgluSchema(\"telemetry_state_change_event\", \"1-0-0\")\n", "path": "src/meltano/core/tracking/schemas.py"}], "after_files": [{"content": "\"\"\"Meltano Iglu schemas metadata & utilities.\"\"\"\n\nfrom __future__ import annotations\n\nfrom dataclasses import dataclass\n\nDEFAULT_VENDOR = \"com.meltano\"\n\n\n@dataclass\nclass IgluSchema:\n \"\"\"Dataclass to store the name, version, vendor, and URL for an Iglu schema.\"\"\"\n\n name: str\n version: str\n vendor: str = DEFAULT_VENDOR\n\n @property\n def url(self) -> str:\n \"\"\"Construct an iglu schema URL.\n\n Returns:\n The URL to the schema.\n \"\"\"\n return f\"iglu:{self.vendor}/{self.name}/jsonschema/{self.version}\"\n\n\nCliContextSchema = IgluSchema(\"cli_context\", \"1-1-0\")\nCliEventSchema = IgluSchema(\"cli_event\", \"1-0-1\")\nBlockEventSchema = IgluSchema(\"block_event\", \"1-0-0\")\nEnvironmentContextSchema = IgluSchema(\"environment_context\", \"1-0-0\")\nExceptionContextSchema = IgluSchema(\"exception_context\", \"1-0-0\")\nExitEventSchema = IgluSchema(\"exit_event\", \"1-0-1\")\nPluginsContextSchema = IgluSchema(\"plugins_context\", \"1-0-0\")\nProjectContextSchema = IgluSchema(\"project_context\", \"1-1-0\")\nTelemetryStateChangeEventSchema = IgluSchema(\"telemetry_state_change_event\", \"1-0-0\")\n", "path": "src/meltano/core/tracking/schemas.py"}]} | 803 | 223 |
gh_patches_debug_5224 | rasdani/github-patches | git_diff | scoutapp__scout_apm_python-146 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PyYAML security alert
Our application is getting a GitHub security alert from PyYAML, and this is the only package that has it as a dependency in our graph. It looks like this package no longer uses that package, but it is still declared as a dependency.
If this assessment is correct, the dependency should be removed from the `setup.py` and a new release upload to PyPI.
https://nvd.nist.gov/vuln/detail/CVE-2017-18342
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 import sys
3 from glob import glob
4
5 from setuptools import Extension, find_packages, setup
6
7 long_description = (
8 "Scout Application Performance Monitoring Agent - https://scoutapp.com"
9 )
10 if os.path.exists("README.md"):
11 long_description = open("README.md").read()
12
13 # Try to compile the extensions, except for platforms or versions
14 # where our extensions are not supported
15 compile_extensions = True
16
17 setup_args = {
18 "name": "scout_apm",
19 "version": "2.0.0",
20 "description": "Scout Application Performance Monitoring Agent",
21 "long_description": long_description,
22 "long_description_content_type": "text/markdown",
23 "url": "https://github.com/scoutapp/scout_apm_python",
24 "author": "Scout",
25 "author_email": "[email protected]",
26 "license": "MIT",
27 "zip_safe": False,
28 "python_requires": ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4",
29 "packages": find_packages("src"),
30 "package_dir": {"": "src"},
31 "py_modules": [os.splitext(os.basename(path))[0] for path in glob("src/*.py")],
32 "ext_modules": [
33 Extension("scout_apm.core.objtrace", ["src/scout_apm/core/ext/objtrace.c"])
34 ],
35 "entry_points": {
36 "console_scripts": [
37 "core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
38 ]
39 },
40 "install_requires": ["psutil", "PyYAML", "requests"],
41 "keywords": "apm performance monitoring development",
42 "classifiers": [
43 "Development Status :: 5 - Production/Stable",
44 "Intended Audience :: Developers",
45 "Topic :: System :: Monitoring",
46 "License :: OSI Approved :: MIT License",
47 "Operating System :: MacOS",
48 "Operating System :: POSIX",
49 "Operating System :: POSIX :: Linux",
50 "Programming Language :: Python :: 2",
51 "Programming Language :: Python :: 2.7",
52 "Programming Language :: Python :: 3",
53 "Programming Language :: Python :: 3.4",
54 "Programming Language :: Python :: 3.5",
55 "Programming Language :: Python :: 3.6",
56 "Programming Language :: Python :: 3.7",
57 ],
58 }
59
60 if sys.version_info <= (3, 0):
61 compile_extensions = False
62
63 if sys.platform.startswith("java"):
64 compile_extensions = False
65
66 if "__pypy__" in sys.builtin_module_names:
67 compile_extensions = False
68
69 if not compile_extensions:
70 del setup_args["ext_modules"]
71
72 setup(**setup_args)
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -37,7 +37,7 @@
"core-agent-manager = scout_apm.core.cli.core_agent_manager:main"
]
},
- "install_requires": ["psutil", "PyYAML", "requests"],
+ "install_requires": ["psutil", "requests"],
"keywords": "apm performance monitoring development",
"classifiers": [
"Development Status :: 5 - Production/Stable",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -37,7 +37,7 @@\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n- \"install_requires\": [\"psutil\", \"PyYAML\", \"requests\"],\n+ \"install_requires\": [\"psutil\", \"requests\"],\n \"keywords\": \"apm performance monitoring development\",\n \"classifiers\": [\n \"Development Status :: 5 - Production/Stable\",\n", "issue": "PyYAML security alert\nOur application is getting a GitHub security alert from PyYAML, and this is the only package that has it as a dependency in our graph. It looks like this package no longer uses that package, but it is still declared as a dependency.\r\n\r\nIf this assessment is correct, the dependency should be removed from the `setup.py` and a new release upload to PyPI.\r\n\r\nhttps://nvd.nist.gov/vuln/detail/CVE-2017-18342\n", "before_files": [{"content": "import os\nimport sys\nfrom glob import glob\n\nfrom setuptools import Extension, find_packages, setup\n\nlong_description = (\n \"Scout Application Performance Monitoring Agent - https://scoutapp.com\"\n)\nif os.path.exists(\"README.md\"):\n long_description = open(\"README.md\").read()\n\n# Try to compile the extensions, except for platforms or versions\n# where our extensions are not supported\ncompile_extensions = True\n\nsetup_args = {\n \"name\": \"scout_apm\",\n \"version\": \"2.0.0\",\n \"description\": \"Scout Application Performance Monitoring Agent\",\n \"long_description\": long_description,\n \"long_description_content_type\": \"text/markdown\",\n \"url\": \"https://github.com/scoutapp/scout_apm_python\",\n \"author\": \"Scout\",\n \"author_email\": \"[email protected]\",\n \"license\": \"MIT\",\n \"zip_safe\": False,\n \"python_requires\": \">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n \"packages\": find_packages(\"src\"),\n \"package_dir\": {\"\": \"src\"},\n \"py_modules\": [os.splitext(os.basename(path))[0] for path in glob(\"src/*.py\")],\n \"ext_modules\": [\n Extension(\"scout_apm.core.objtrace\", [\"src/scout_apm/core/ext/objtrace.c\"])\n ],\n \"entry_points\": {\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n \"install_requires\": [\"psutil\", \"PyYAML\", \"requests\"],\n \"keywords\": \"apm performance monitoring development\",\n \"classifiers\": [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n}\n\nif sys.version_info <= (3, 0):\n compile_extensions = False\n\nif sys.platform.startswith(\"java\"):\n compile_extensions = False\n\nif \"__pypy__\" in sys.builtin_module_names:\n compile_extensions = False\n\nif not compile_extensions:\n del setup_args[\"ext_modules\"]\n\nsetup(**setup_args)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nimport sys\nfrom glob import glob\n\nfrom setuptools import Extension, find_packages, setup\n\nlong_description = (\n \"Scout Application Performance Monitoring Agent - https://scoutapp.com\"\n)\nif os.path.exists(\"README.md\"):\n long_description = open(\"README.md\").read()\n\n# Try to compile the extensions, except for platforms or versions\n# where our extensions are not supported\ncompile_extensions = True\n\nsetup_args = {\n \"name\": \"scout_apm\",\n \"version\": \"2.0.0\",\n \"description\": \"Scout Application Performance Monitoring Agent\",\n \"long_description\": long_description,\n \"long_description_content_type\": \"text/markdown\",\n \"url\": \"https://github.com/scoutapp/scout_apm_python\",\n \"author\": \"Scout\",\n \"author_email\": \"[email protected]\",\n \"license\": \"MIT\",\n \"zip_safe\": False,\n \"python_requires\": \">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4\",\n \"packages\": find_packages(\"src\"),\n \"package_dir\": {\"\": \"src\"},\n \"py_modules\": [os.splitext(os.basename(path))[0] for path in glob(\"src/*.py\")],\n \"ext_modules\": [\n Extension(\"scout_apm.core.objtrace\", [\"src/scout_apm/core/ext/objtrace.c\"])\n ],\n \"entry_points\": {\n \"console_scripts\": [\n \"core-agent-manager = scout_apm.core.cli.core_agent_manager:main\"\n ]\n },\n \"install_requires\": [\"psutil\", \"requests\"],\n \"keywords\": \"apm performance monitoring development\",\n \"classifiers\": [\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Topic :: System :: Monitoring\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX\",\n \"Operating System :: POSIX :: Linux\",\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.4\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n ],\n}\n\nif sys.version_info <= (3, 0):\n compile_extensions = False\n\nif sys.platform.startswith(\"java\"):\n compile_extensions = False\n\nif \"__pypy__\" in sys.builtin_module_names:\n compile_extensions = False\n\nif not compile_extensions:\n del setup_args[\"ext_modules\"]\n\nsetup(**setup_args)\n", "path": "setup.py"}]} | 1,092 | 113 |
gh_patches_debug_4897 | rasdani/github-patches | git_diff | bridgecrewio__checkov-592 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CKV_GCP_29 - Checks failed after GCP resource renamed
**Describe the bug**
Google has renamed the value
`bucket_policy_only ` to `uniform_bucket_level_access`.
When adding the new value in the configuration the check CKV_GCP_29 ( Ensure that Cloud Storage buckets have uniform bucket-level access enabled ) still fails as it is still looking for the old value
**To Reproduce**
Steps to reproduce the behavior:
1. On tearragoat, add the value `uniform_bucket_level_access = true` and the checks will still fail
**Expected behavior**
The check should pass.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py`
Content:
```
1 from checkov.common.models.enums import CheckCategories
2 from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
3
4
5 class GoogleStorageBucketUniformAccess(BaseResourceValueCheck):
6 def __init__(self):
7 name = "Ensure that Cloud Storage buckets have uniform bucket-level access enabled"
8 id = "CKV_GCP_29"
9 supported_resources = ['google_storage_bucket']
10 categories = [CheckCategories.GENERAL_SECURITY]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def get_inspected_key(self):
14 return 'bucket_policy_only/[0]'
15
16
17 check = GoogleStorageBucketUniformAccess()
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py b/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py
--- a/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py
+++ b/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py
@@ -11,7 +11,7 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
- return 'bucket_policy_only/[0]'
+ return 'uniform_bucket_level_access/[0]/bucket_policy_only/[0]'
check = GoogleStorageBucketUniformAccess()
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py b/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py\n--- a/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py\n+++ b/checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py\n@@ -11,7 +11,7 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def get_inspected_key(self):\n- return 'bucket_policy_only/[0]'\n+ return 'uniform_bucket_level_access/[0]/bucket_policy_only/[0]'\n \n \n check = GoogleStorageBucketUniformAccess()\n", "issue": "CKV_GCP_29 - Checks failed after GCP resource renamed\n**Describe the bug**\r\nGoogle has renamed the value\r\n`bucket_policy_only ` to `uniform_bucket_level_access`.\r\n\r\nWhen adding the new value in the configuration the check CKV_GCP_29 ( Ensure that Cloud Storage buckets have uniform bucket-level access enabled ) still fails as it is still looking for the old value\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. On tearragoat, add the value `uniform_bucket_level_access = true` and the checks will still fail\r\n\r\n\r\n**Expected behavior**\r\nThe check should pass.\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass GoogleStorageBucketUniformAccess(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Cloud Storage buckets have uniform bucket-level access enabled\"\n id = \"CKV_GCP_29\"\n supported_resources = ['google_storage_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'bucket_policy_only/[0]'\n\n\ncheck = GoogleStorageBucketUniformAccess()\n", "path": "checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck\n\n\nclass GoogleStorageBucketUniformAccess(BaseResourceValueCheck):\n def __init__(self):\n name = \"Ensure that Cloud Storage buckets have uniform bucket-level access enabled\"\n id = \"CKV_GCP_29\"\n supported_resources = ['google_storage_bucket']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def get_inspected_key(self):\n return 'uniform_bucket_level_access/[0]/bucket_policy_only/[0]'\n\n\ncheck = GoogleStorageBucketUniformAccess()\n", "path": "checkov/terraform/checks/resource/gcp/GoogleStorageBucketUniformAccess.py"}]} | 572 | 154 |
gh_patches_debug_20216 | rasdani/github-patches | git_diff | yt-dlp__yt-dlp-6171 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pornez.Net - Problem with iframe extractor.
### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE
- [X] I understand that I will be **blocked** if I remove or skip any mandatory\* field
### Checklist
- [X] I'm reporting a broken site
- [X] I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details
- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)
- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)
- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required
### Region
belgium
### Provide a description that is worded well enough to be understood
Unable to download videos from pornez.net site.
the issue provide from "common.py" and "pornez.py".. And this is the same error on other videos from the site.
If anyone can help..
Thanks.
### Provide verbose output that clearly demonstrates the problem
- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)
- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below
### Complete Verbose Output
```shell
[debug] Command-line config: ['-c', '--fixup', 'warn', '-f', 'mp4', '-o', '.\\VideOs\\%(title)s-%(id)s.%(ext)s', 'https://pornez.net/video364069/akyb-046-miku-akyb-046-miku/', '-vU']
[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8
[debug] yt-dlp version 2023.01.06 [6becd25] (win_exe)
[debug] Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.19045-SP0 (OpenSSL 1.1.1k 25 Mar 2021)
[debug] exe versions: ffmpeg n5.1-27-g6f53f0d09e-20220829 (setts), ffprobe n5.1-27-g6f53f0d09e-20220829
[debug] Optional libraries: Cryptodome-3.16.0, brotli-1.0.9, certifi-2022.12.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-10.4
[debug] Proxy map: {}
[debug] Loaded 1760 extractors
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
Latest version: 2023.01.06, Current version: 2023.01.06
yt-dlp is up to date (2023.01.06)
[Pornez] Extracting URL: https://pornez.net/video364069/akyb-046-miku-akyb-046-miku/
[Pornez] 364069: Downloading webpage
ERROR: [Pornez] 364069: Unable to extract iframe; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U
File "yt_dlp\extractor\common.py", line 680, in extract
File "yt_dlp\extractor\pornez.py", line 22, in _real_extract
File "yt_dlp\extractor\common.py", line 1264, in _html_search_regex
File "yt_dlp\extractor\common.py", line 1228, in _search_regex
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yt_dlp/extractor/pornez.py`
Content:
```
1 from .common import InfoExtractor
2 from ..utils import int_or_none
3
4
5 class PornezIE(InfoExtractor):
6 _VALID_URL = r'https?://(?:www\.)?pornez\.net/video(?P<id>[0-9]+)/'
7 _TEST = {
8 'url': 'https://pornez.net/video344819/mistresst-funny_penis_names-wmv/',
9 'md5': '2e19a0a1cff3a5dbea0ef1b9e80bcbbc',
10 'info_dict': {
11 'id': '344819',
12 'ext': 'mp4',
13 'title': r'mistresst funny_penis_names wmv',
14 'thumbnail': r're:^https?://.*\.jpg$',
15 'age_limit': 18,
16 }
17 }
18
19 def _real_extract(self, url):
20 video_id = self._match_id(url)
21 webpage = self._download_webpage(url, video_id)
22 iframe_src = self._html_search_regex(
23 r'<iframe[^>]+src="(https?://pornez\.net/player/\?[^"]+)"', webpage, 'iframe', fatal=True)
24 title = self._html_search_meta(['name', 'twitter:title', 'og:title'], webpage, 'title', default=None)
25 if title is None:
26 title = self._search_regex(r'<h1>(.*?)</h1>', webpage, 'title', fatal=True)
27 thumbnail = self._html_search_meta(['thumbnailUrl'], webpage, 'title', default=None)
28 webpage = self._download_webpage(iframe_src, video_id)
29 entries = self._parse_html5_media_entries(iframe_src, webpage, video_id)[0]
30 for format in entries['formats']:
31 height = self._search_regex(r'_(\d+)\.m3u8', format['url'], 'height')
32 format['format_id'] = '%sp' % height
33 format['height'] = int_or_none(height)
34
35 entries.update({
36 'id': video_id,
37 'title': title,
38 'thumbnail': thumbnail,
39 'age_limit': 18
40 })
41 return entries
42
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yt_dlp/extractor/pornez.py b/yt_dlp/extractor/pornez.py
--- a/yt_dlp/extractor/pornez.py
+++ b/yt_dlp/extractor/pornez.py
@@ -1,5 +1,5 @@
from .common import InfoExtractor
-from ..utils import int_or_none
+from ..utils import int_or_none, urljoin
class PornezIE(InfoExtractor):
@@ -20,7 +20,8 @@
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
iframe_src = self._html_search_regex(
- r'<iframe[^>]+src="(https?://pornez\.net/player/\?[^"]+)"', webpage, 'iframe', fatal=True)
+ r'<iframe[^>]+src="([^"]+)"', webpage, 'iframe', fatal=True)
+ iframe_src = urljoin('https://pornez.net', iframe_src)
title = self._html_search_meta(['name', 'twitter:title', 'og:title'], webpage, 'title', default=None)
if title is None:
title = self._search_regex(r'<h1>(.*?)</h1>', webpage, 'title', fatal=True)
| {"golden_diff": "diff --git a/yt_dlp/extractor/pornez.py b/yt_dlp/extractor/pornez.py\n--- a/yt_dlp/extractor/pornez.py\n+++ b/yt_dlp/extractor/pornez.py\n@@ -1,5 +1,5 @@\n from .common import InfoExtractor\n-from ..utils import int_or_none\n+from ..utils import int_or_none, urljoin\n \n \n class PornezIE(InfoExtractor):\n@@ -20,7 +20,8 @@\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n iframe_src = self._html_search_regex(\n- r'<iframe[^>]+src=\"(https?://pornez\\.net/player/\\?[^\"]+)\"', webpage, 'iframe', fatal=True)\n+ r'<iframe[^>]+src=\"([^\"]+)\"', webpage, 'iframe', fatal=True)\n+ iframe_src = urljoin('https://pornez.net', iframe_src)\n title = self._html_search_meta(['name', 'twitter:title', 'og:title'], webpage, 'title', default=None)\n if title is None:\n title = self._search_regex(r'<h1>(.*?)</h1>', webpage, 'title', fatal=True)\n", "issue": "Pornez.Net - Problem with iframe extractor.\n### DO NOT REMOVE OR SKIP THE ISSUE TEMPLATE\n\n- [X] I understand that I will be **blocked** if I remove or skip any mandatory\\* field\n\n### Checklist\n\n- [X] I'm reporting a broken site\n- [X] I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)\n- [X] I've checked that all provided URLs are playable in a browser with the same IP and same login details\n- [X] I've checked that all URLs and arguments with special characters are [properly quoted or escaped](https://github.com/yt-dlp/yt-dlp/wiki/FAQ#video-url-contains-an-ampersand--and-im-getting-some-strange-output-1-2839-or-v-is-not-recognized-as-an-internal-or-external-command)\n- [X] I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates\n- [X] I've read the [guidelines for opening an issue](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#opening-an-issue)\n- [ ] I've read about [sharing account credentials](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#are-you-willing-to-share-account-details-if-needed) and I'm willing to share it if required\n\n### Region\n\nbelgium\n\n### Provide a description that is worded well enough to be understood\n\nUnable to download videos from pornez.net site.\r\n\r\nthe issue provide from \"common.py\" and \"pornez.py\".. And this is the same error on other videos from the site.\r\n\r\nIf anyone can help..\r\n\r\nThanks.\n\n### Provide verbose output that clearly demonstrates the problem\n\n- [X] Run **your** yt-dlp command with **-vU** flag added (`yt-dlp -vU <your command line>`)\n- [X] Copy the WHOLE output (starting with `[debug] Command-line config`) and insert it below\n\n### Complete Verbose Output\n\n```shell\n[debug] Command-line config: ['-c', '--fixup', 'warn', '-f', 'mp4', '-o', '.\\\\VideOs\\\\%(title)s-%(id)s.%(ext)s', 'https://pornez.net/video364069/akyb-046-miku-akyb-046-miku/', '-vU']\r\n[debug] Encodings: locale cp1252, fs utf-8, pref cp1252, out utf-8, error utf-8, screen utf-8\r\n[debug] yt-dlp version 2023.01.06 [6becd25] (win_exe)\r\n[debug] Python 3.8.10 (CPython AMD64 64bit) - Windows-10-10.0.19045-SP0 (OpenSSL 1.1.1k 25 Mar 2021)\r\n[debug] exe versions: ffmpeg n5.1-27-g6f53f0d09e-20220829 (setts), ffprobe n5.1-27-g6f53f0d09e-20220829\r\n[debug] Optional libraries: Cryptodome-3.16.0, brotli-1.0.9, certifi-2022.12.07, mutagen-1.46.0, sqlite3-2.6.0, websockets-10.4\r\n[debug] Proxy map: {}\r\n[debug] Loaded 1760 extractors\r\n[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\r\nLatest version: 2023.01.06, Current version: 2023.01.06\r\nyt-dlp is up to date (2023.01.06)\r\n[Pornez] Extracting URL: https://pornez.net/video364069/akyb-046-miku-akyb-046-miku/\r\n[Pornez] 364069: Downloading webpage\r\nERROR: [Pornez] 364069: Unable to extract iframe; please report this issue on https://github.com/yt-dlp/yt-dlp/issues?q= , filling out the appropriate issue template. Confirm you are on the latest version using yt-dlp -U\r\n File \"yt_dlp\\extractor\\common.py\", line 680, in extract\r\n File \"yt_dlp\\extractor\\pornez.py\", line 22, in _real_extract\r\n File \"yt_dlp\\extractor\\common.py\", line 1264, in _html_search_regex\r\n File \"yt_dlp\\extractor\\common.py\", line 1228, in _search_regex\n```\n\n", "before_files": [{"content": "from .common import InfoExtractor\nfrom ..utils import int_or_none\n\n\nclass PornezIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?pornez\\.net/video(?P<id>[0-9]+)/'\n _TEST = {\n 'url': 'https://pornez.net/video344819/mistresst-funny_penis_names-wmv/',\n 'md5': '2e19a0a1cff3a5dbea0ef1b9e80bcbbc',\n 'info_dict': {\n 'id': '344819',\n 'ext': 'mp4',\n 'title': r'mistresst funny_penis_names wmv',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'age_limit': 18,\n }\n }\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n iframe_src = self._html_search_regex(\n r'<iframe[^>]+src=\"(https?://pornez\\.net/player/\\?[^\"]+)\"', webpage, 'iframe', fatal=True)\n title = self._html_search_meta(['name', 'twitter:title', 'og:title'], webpage, 'title', default=None)\n if title is None:\n title = self._search_regex(r'<h1>(.*?)</h1>', webpage, 'title', fatal=True)\n thumbnail = self._html_search_meta(['thumbnailUrl'], webpage, 'title', default=None)\n webpage = self._download_webpage(iframe_src, video_id)\n entries = self._parse_html5_media_entries(iframe_src, webpage, video_id)[0]\n for format in entries['formats']:\n height = self._search_regex(r'_(\\d+)\\.m3u8', format['url'], 'height')\n format['format_id'] = '%sp' % height\n format['height'] = int_or_none(height)\n\n entries.update({\n 'id': video_id,\n 'title': title,\n 'thumbnail': thumbnail,\n 'age_limit': 18\n })\n return entries\n", "path": "yt_dlp/extractor/pornez.py"}], "after_files": [{"content": "from .common import InfoExtractor\nfrom ..utils import int_or_none, urljoin\n\n\nclass PornezIE(InfoExtractor):\n _VALID_URL = r'https?://(?:www\\.)?pornez\\.net/video(?P<id>[0-9]+)/'\n _TEST = {\n 'url': 'https://pornez.net/video344819/mistresst-funny_penis_names-wmv/',\n 'md5': '2e19a0a1cff3a5dbea0ef1b9e80bcbbc',\n 'info_dict': {\n 'id': '344819',\n 'ext': 'mp4',\n 'title': r'mistresst funny_penis_names wmv',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n 'age_limit': 18,\n }\n }\n\n def _real_extract(self, url):\n video_id = self._match_id(url)\n webpage = self._download_webpage(url, video_id)\n iframe_src = self._html_search_regex(\n r'<iframe[^>]+src=\"([^\"]+)\"', webpage, 'iframe', fatal=True)\n iframe_src = urljoin('https://pornez.net', iframe_src)\n title = self._html_search_meta(['name', 'twitter:title', 'og:title'], webpage, 'title', default=None)\n if title is None:\n title = self._search_regex(r'<h1>(.*?)</h1>', webpage, 'title', fatal=True)\n thumbnail = self._html_search_meta(['thumbnailUrl'], webpage, 'title', default=None)\n webpage = self._download_webpage(iframe_src, video_id)\n entries = self._parse_html5_media_entries(iframe_src, webpage, video_id)[0]\n for format in entries['formats']:\n height = self._search_regex(r'_(\\d+)\\.m3u8', format['url'], 'height')\n format['format_id'] = '%sp' % height\n format['height'] = int_or_none(height)\n\n entries.update({\n 'id': video_id,\n 'title': title,\n 'thumbnail': thumbnail,\n 'age_limit': 18\n })\n return entries\n", "path": "yt_dlp/extractor/pornez.py"}]} | 1,965 | 279 |
gh_patches_debug_20051 | rasdani/github-patches | git_diff | alltheplaces__alltheplaces-1111 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Harris Teeter
Mostly southeastern https://www.harristeeter.com/store/#/app/store-locator
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/harristeeter.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4 import re
5
6 from locations.items import GeojsonPointItem
7
8 DAYS = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']
9
10
11 class HarristeeterSpider(scrapy.Spider):
12 name = "harristeeter"
13 allowed_domains = ["harristeeter.com"]
14 start_urls = (
15 'https://www.harristeeter.com/store/#/app/store-locator',
16 )
17
18 handle_httpstatus_list = [401]
19 custom_settings = {
20 'DEFAULT_REQUEST_HEADERS' : {
21 'Accept': 'application/json, text/plain, */*',
22 'Accept-Encoding': 'gzip, deflate, br',
23 'Connection': 'keep-alive',
24 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
25 }
26 }
27
28
29 def store_hours(self, store_hours):
30 res=''
31 for day in store_hours:
32 match = re.search(r'(\w*)(\s*-\s*(\w*))?\s*(\d{1,2})(:(\d{1,2}))?\s*(am|pm|mp)?\s*-\s*(\d{1,2})(:(\d{1,2}))?\s*(am|pm|mp)',day.replace('Midnight','12:00pm'))
33
34 if not match:
35 continue
36 res += match[1][:2]
37
38 try:
39 res += match[2].replace(' ','')[:3]+' '
40 except Exception:
41 res += ' '
42
43 if match[5]:
44 first_minutes = match[5]
45 else:
46 first_minutes = ':00'
47
48 if match[9]:
49 second_minutes = match[9]
50 else:
51 second_minutes = ':00'
52
53 res += str(int(match[4])+(12 if match[7] in ['pm','mp'] else 0)) +first_minutes+'-'
54 res += str(int(match[8])+(12 if match[10] in ['pm','mp'] else 0)) +second_minutes+';'
55
56 return res.rstrip(';').strip()
57
58 def parse(self, response):
59 yield scrapy.Request('https://www.harristeeter.com/api/checkLogin',
60 method='POST',
61 callback=self.check_login)
62
63
64 def check_login(self, response):
65
66 yield scrapy.Request(
67 'https://www.harristeeter.com/store/#/app/store-locator',
68 callback=self.get_store_locator)
69
70 def get_store_locator(self, response):
71
72 yield scrapy.Request(
73 'https://www.harristeeter.com/api/v1/stores/search?Address=98011&Radius=20000&AllStores=true',
74 callback=self.parse_shop
75 )
76
77 def parse_shop(self, response):
78 shops = json.loads(response.text)['Data']
79
80 for shop in shops:
81 props = {
82 'opening_hours': shop['StoreHours'].replace('Open 24 hrs', 'Mo-Su 0:00-24:00'),
83 'phone': shop['Telephone'],
84 'country': shop['Country'],
85 'ref': shop['Title'],
86 'addr_full': shop['Street'],
87 'postcode': shop.get('ZipCode'),
88 'city': shop.get('City'),
89 'state': shop.get('State'),
90 'lat': float(shop['Latitude']),
91 'lon': float(shop['Longitude']),
92 }
93
94 yield GeojsonPointItem(**props)
95
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/locations/spiders/harristeeter.py b/locations/spiders/harristeeter.py
--- a/locations/spiders/harristeeter.py
+++ b/locations/spiders/harristeeter.py
@@ -79,16 +79,17 @@
for shop in shops:
props = {
- 'opening_hours': shop['StoreHours'].replace('Open 24 hrs', 'Mo-Su 0:00-24:00'),
- 'phone': shop['Telephone'],
- 'country': shop['Country'],
- 'ref': shop['Title'],
+ 'ref': shop['StoreNumber'],
'addr_full': shop['Street'],
- 'postcode': shop.get('ZipCode'),
'city': shop.get('City'),
'state': shop.get('State'),
+ 'postcode': shop.get('PostalCode'),
+ 'country': shop['Country'],
+ 'name': shop['StoreName'],
+ 'phone': shop['Telephone'],
'lat': float(shop['Latitude']),
'lon': float(shop['Longitude']),
+ 'opening_hours': shop['StoreHours'].replace('Open 24 Hours', 'Mo-Su 0:00-24:00')
}
yield GeojsonPointItem(**props)
| {"golden_diff": "diff --git a/locations/spiders/harristeeter.py b/locations/spiders/harristeeter.py\n--- a/locations/spiders/harristeeter.py\n+++ b/locations/spiders/harristeeter.py\n@@ -79,16 +79,17 @@\n \n for shop in shops:\n props = {\n- 'opening_hours': shop['StoreHours'].replace('Open 24 hrs', 'Mo-Su 0:00-24:00'),\n- 'phone': shop['Telephone'],\n- 'country': shop['Country'],\n- 'ref': shop['Title'],\n+ 'ref': shop['StoreNumber'],\n 'addr_full': shop['Street'],\n- 'postcode': shop.get('ZipCode'),\n 'city': shop.get('City'),\n 'state': shop.get('State'),\n+ 'postcode': shop.get('PostalCode'),\n+ 'country': shop['Country'],\n+ 'name': shop['StoreName'],\n+ 'phone': shop['Telephone'],\n 'lat': float(shop['Latitude']),\n 'lon': float(shop['Longitude']),\n+ 'opening_hours': shop['StoreHours'].replace('Open 24 Hours', 'Mo-Su 0:00-24:00')\n }\n \n yield GeojsonPointItem(**props)\n", "issue": "Harris Teeter\nMostly southeastern https://www.harristeeter.com/store/#/app/store-locator\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\nDAYS = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']\n\n\nclass HarristeeterSpider(scrapy.Spider):\n name = \"harristeeter\"\n allowed_domains = [\"harristeeter.com\"]\n start_urls = (\n 'https://www.harristeeter.com/store/#/app/store-locator',\n )\n\n handle_httpstatus_list = [401]\n custom_settings = {\n 'DEFAULT_REQUEST_HEADERS' : {\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',\n }\n }\n\n\n def store_hours(self, store_hours):\n res=''\n for day in store_hours:\n match = re.search(r'(\\w*)(\\s*-\\s*(\\w*))?\\s*(\\d{1,2})(:(\\d{1,2}))?\\s*(am|pm|mp)?\\s*-\\s*(\\d{1,2})(:(\\d{1,2}))?\\s*(am|pm|mp)',day.replace('Midnight','12:00pm'))\n\n if not match:\n continue\n res += match[1][:2]\n\n try:\n res += match[2].replace(' ','')[:3]+' '\n except Exception:\n res += ' '\n\n if match[5]:\n first_minutes = match[5]\n else:\n first_minutes = ':00'\n\n if match[9]:\n second_minutes = match[9]\n else:\n second_minutes = ':00'\n\n res += str(int(match[4])+(12 if match[7] in ['pm','mp'] else 0)) +first_minutes+'-'\n res += str(int(match[8])+(12 if match[10] in ['pm','mp'] else 0)) +second_minutes+';'\n\n return res.rstrip(';').strip()\n\n def parse(self, response):\n yield scrapy.Request('https://www.harristeeter.com/api/checkLogin',\n method='POST',\n callback=self.check_login)\n\n\n def check_login(self, response):\n\n yield scrapy.Request(\n 'https://www.harristeeter.com/store/#/app/store-locator',\n callback=self.get_store_locator)\n\n def get_store_locator(self, response):\n\n yield scrapy.Request(\n 'https://www.harristeeter.com/api/v1/stores/search?Address=98011&Radius=20000&AllStores=true',\n callback=self.parse_shop\n )\n\n def parse_shop(self, response):\n shops = json.loads(response.text)['Data']\n\n for shop in shops:\n props = {\n 'opening_hours': shop['StoreHours'].replace('Open 24 hrs', 'Mo-Su 0:00-24:00'),\n 'phone': shop['Telephone'],\n 'country': shop['Country'],\n 'ref': shop['Title'],\n 'addr_full': shop['Street'],\n 'postcode': shop.get('ZipCode'),\n 'city': shop.get('City'),\n 'state': shop.get('State'),\n 'lat': float(shop['Latitude']),\n 'lon': float(shop['Longitude']),\n }\n\n yield GeojsonPointItem(**props)\n", "path": "locations/spiders/harristeeter.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\nimport re\n\nfrom locations.items import GeojsonPointItem\n\nDAYS = ['Mo', 'Tu', 'We', 'Th', 'Fr', 'Sa', 'Su']\n\n\nclass HarristeeterSpider(scrapy.Spider):\n name = \"harristeeter\"\n allowed_domains = [\"harristeeter.com\"]\n start_urls = (\n 'https://www.harristeeter.com/store/#/app/store-locator',\n )\n\n handle_httpstatus_list = [401]\n custom_settings = {\n 'DEFAULT_REQUEST_HEADERS' : {\n 'Accept': 'application/json, text/plain, */*',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',\n }\n }\n\n\n def store_hours(self, store_hours):\n res=''\n for day in store_hours:\n match = re.search(r'(\\w*)(\\s*-\\s*(\\w*))?\\s*(\\d{1,2})(:(\\d{1,2}))?\\s*(am|pm|mp)?\\s*-\\s*(\\d{1,2})(:(\\d{1,2}))?\\s*(am|pm|mp)',day.replace('Midnight','12:00pm'))\n\n if not match:\n continue\n res += match[1][:2]\n\n try:\n res += match[2].replace(' ','')[:3]+' '\n except Exception:\n res += ' '\n\n if match[5]:\n first_minutes = match[5]\n else:\n first_minutes = ':00'\n\n if match[9]:\n second_minutes = match[9]\n else:\n second_minutes = ':00'\n\n res += str(int(match[4])+(12 if match[7] in ['pm','mp'] else 0)) +first_minutes+'-'\n res += str(int(match[8])+(12 if match[10] in ['pm','mp'] else 0)) +second_minutes+';'\n\n return res.rstrip(';').strip()\n\n def parse(self, response):\n yield scrapy.Request('https://www.harristeeter.com/api/checkLogin',\n method='POST',\n callback=self.check_login)\n\n\n def check_login(self, response):\n\n yield scrapy.Request(\n 'https://www.harristeeter.com/store/#/app/store-locator',\n callback=self.get_store_locator)\n\n def get_store_locator(self, response):\n\n yield scrapy.Request(\n 'https://www.harristeeter.com/api/v1/stores/search?Address=98011&Radius=20000&AllStores=true',\n callback=self.parse_shop\n )\n\n def parse_shop(self, response):\n shops = json.loads(response.text)['Data']\n\n for shop in shops:\n props = {\n 'ref': shop['StoreNumber'],\n 'addr_full': shop['Street'],\n 'city': shop.get('City'),\n 'state': shop.get('State'),\n 'postcode': shop.get('PostalCode'),\n 'country': shop['Country'],\n 'name': shop['StoreName'],\n 'phone': shop['Telephone'],\n 'lat': float(shop['Latitude']),\n 'lon': float(shop['Longitude']),\n 'opening_hours': shop['StoreHours'].replace('Open 24 Hours', 'Mo-Su 0:00-24:00')\n }\n\n yield GeojsonPointItem(**props)\n", "path": "locations/spiders/harristeeter.py"}]} | 1,283 | 289 |
gh_patches_debug_17150 | rasdani/github-patches | git_diff | Kinto__kinto-492 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
kinto start takes too much time.
It can sometimes take more than 2 seconds, as reported by @n1k0 on JS clients integration tests.
I investigated a bit and found out, that on my machine, loading the entrypoint takes more than 1 second already:
```
$ time python -c "from pkg_resources import load_entry_point; load_entry_point('kinto', 'console_scripts', 'kinto')"
python -c 0,96s user 0,16s system 99% cpu 1,132 total
```
In comparison, `pserve` takes 200msec:
```
$ time python -c "from pkg_resources import load_entry_point; load_entry_point('pyramid', 'console_scripts', 'pcreate')"
python -c 0,18s user 0,09s system 98% cpu 0,272 total
```
I realized that moving `import requests` from `cliquet.initialization` imports [PR](https://github.com/mozilla-services/cliquet/pull/674), and remove `import pip` from `kinto.__main__` I could reduce by half:
```
$ time python -c "from pkg_resources import load_entry_point; load_entry_point('kinto', 'console_scripts', 'kinto')"
python -c 0,36s user 0,18s system 98% cpu 0,543 total
```
I knew this was not going to speed up the `kinto start` command too much. I tracked down and noticed the `__main__:main` was executed twice because of `--reload` argument.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/__main__.py`
Content:
```
1 from __future__ import print_function
2 import argparse
3 import os
4 import sys
5
6 from six.moves import input
7 from cliquet.scripts import cliquet
8 from pyramid.scripts import pserve
9 from pyramid.paster import bootstrap
10 from kinto import __version__
11 from kinto.config import init
12
13 CONFIG_FILE = 'config/kinto.ini'
14
15
16 def main(args=None):
17 """The main routine."""
18 if args is None:
19 args = sys.argv[1:]
20
21 parser = argparse.ArgumentParser(description="Kinto commands")
22 parser.add_argument('--ini',
23 help='Application configuration file',
24 dest='ini_file',
25 required=False,
26 default=CONFIG_FILE)
27 parser.add_argument('--backend',
28 help='Specify backend',
29 dest='backend',
30 required=False,
31 default=None)
32
33 parser.add_argument('-v', '--version',
34 action='version', version=__version__,
35 help='Print the Kinto version and exit.')
36
37 subparsers = parser.add_subparsers(title='subcommands',
38 description='valid subcommands',
39 help='init/start/migrate')
40
41 parser_init = subparsers.add_parser('init')
42 parser_init.set_defaults(which='init')
43
44 parser_migrate = subparsers.add_parser('migrate')
45 parser_migrate.set_defaults(which='migrate')
46
47 parser_start = subparsers.add_parser('start')
48 parser_start.set_defaults(which='start')
49
50 args = vars(parser.parse_args())
51 config_file = args['ini_file']
52
53 if args['which'] == 'init':
54 if os.path.exists(config_file):
55 print("%s already exist." % config_file, file=sys.stderr)
56 sys.exit(1)
57
58 backend = args['backend']
59 if not backend:
60 while True:
61 prompt = ("Select the backend you would like to use: "
62 "(1 - postgresql, 2 - redis, default - memory) ")
63 answer = input(prompt).strip()
64 try:
65 backends = {"1": "postgresql", "2": "redis", "": "memory"}
66 backend = backends[answer]
67 break
68 except KeyError:
69 pass
70
71 init(config_file, backend)
72
73 # Install postgresql libraries if necessary
74 if backend == "postgresql":
75 try:
76 import psycopg2 # NOQA
77 except ImportError:
78 import pip
79 pip.main(['install', "cliquet[postgresql]"])
80
81 elif args['which'] == 'migrate':
82 env = bootstrap(config_file)
83 cliquet.init_schema(env)
84
85 elif args['which'] == 'start':
86 pserve_argv = ['pserve', config_file, '--reload']
87 pserve.main(pserve_argv)
88
89
90 if __name__ == "__main__":
91 main()
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/__main__.py b/kinto/__main__.py
--- a/kinto/__main__.py
+++ b/kinto/__main__.py
@@ -45,6 +45,11 @@
parser_migrate.set_defaults(which='migrate')
parser_start = subparsers.add_parser('start')
+ parser_start.add_argument('--reload',
+ action='store_true',
+ help='Restart when code or config changes',
+ required=False,
+ default=False)
parser_start.set_defaults(which='start')
args = vars(parser.parse_args())
@@ -83,7 +88,9 @@
cliquet.init_schema(env)
elif args['which'] == 'start':
- pserve_argv = ['pserve', config_file, '--reload']
+ pserve_argv = ['pserve', config_file]
+ if args['reload']:
+ pserve_argv.append('--reload')
pserve.main(pserve_argv)
| {"golden_diff": "diff --git a/kinto/__main__.py b/kinto/__main__.py\n--- a/kinto/__main__.py\n+++ b/kinto/__main__.py\n@@ -45,6 +45,11 @@\n parser_migrate.set_defaults(which='migrate')\n \n parser_start = subparsers.add_parser('start')\n+ parser_start.add_argument('--reload',\n+ action='store_true',\n+ help='Restart when code or config changes',\n+ required=False,\n+ default=False)\n parser_start.set_defaults(which='start')\n \n args = vars(parser.parse_args())\n@@ -83,7 +88,9 @@\n cliquet.init_schema(env)\n \n elif args['which'] == 'start':\n- pserve_argv = ['pserve', config_file, '--reload']\n+ pserve_argv = ['pserve', config_file]\n+ if args['reload']:\n+ pserve_argv.append('--reload')\n pserve.main(pserve_argv)\n", "issue": "kinto start takes too much time.\nIt can sometimes take more than 2 seconds, as reported by @n1k0 on JS clients integration tests.\n\nI investigated a bit and found out, that on my machine, loading the entrypoint takes more than 1 second already:\n\n```\n$ time python -c \"from pkg_resources import load_entry_point; load_entry_point('kinto', 'console_scripts', 'kinto')\"\npython -c 0,96s user 0,16s system 99% cpu 1,132 total\n```\n\nIn comparison, `pserve` takes 200msec: \n\n```\n$ time python -c \"from pkg_resources import load_entry_point; load_entry_point('pyramid', 'console_scripts', 'pcreate')\"\npython -c 0,18s user 0,09s system 98% cpu 0,272 total\n```\n\nI realized that moving `import requests` from `cliquet.initialization` imports [PR](https://github.com/mozilla-services/cliquet/pull/674), and remove `import pip` from `kinto.__main__` I could reduce by half:\n\n```\n$ time python -c \"from pkg_resources import load_entry_point; load_entry_point('kinto', 'console_scripts', 'kinto')\"\npython -c 0,36s user 0,18s system 98% cpu 0,543 total\n```\n\nI knew this was not going to speed up the `kinto start` command too much. I tracked down and noticed the `__main__:main` was executed twice because of `--reload` argument.\n\n", "before_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\n\nfrom six.moves import input\nfrom cliquet.scripts import cliquet\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n parser.add_argument('-v', '--version',\n action='version', version=__version__,\n help='Print the Kinto version and exit.')\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n help='init/start/migrate')\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.set_defaults(which='start')\n\n args = vars(parser.parse_args())\n config_file = args['ini_file']\n\n if args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exist.\" % config_file, file=sys.stderr)\n sys.exit(1)\n\n backend = args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"cliquet[postgresql]\"])\n\n elif args['which'] == 'migrate':\n env = bootstrap(config_file)\n cliquet.init_schema(env)\n\n elif args['which'] == 'start':\n pserve_argv = ['pserve', config_file, '--reload']\n pserve.main(pserve_argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "kinto/__main__.py"}], "after_files": [{"content": "from __future__ import print_function\nimport argparse\nimport os\nimport sys\n\nfrom six.moves import input\nfrom cliquet.scripts import cliquet\nfrom pyramid.scripts import pserve\nfrom pyramid.paster import bootstrap\nfrom kinto import __version__\nfrom kinto.config import init\n\nCONFIG_FILE = 'config/kinto.ini'\n\n\ndef main(args=None):\n \"\"\"The main routine.\"\"\"\n if args is None:\n args = sys.argv[1:]\n\n parser = argparse.ArgumentParser(description=\"Kinto commands\")\n parser.add_argument('--ini',\n help='Application configuration file',\n dest='ini_file',\n required=False,\n default=CONFIG_FILE)\n parser.add_argument('--backend',\n help='Specify backend',\n dest='backend',\n required=False,\n default=None)\n\n parser.add_argument('-v', '--version',\n action='version', version=__version__,\n help='Print the Kinto version and exit.')\n\n subparsers = parser.add_subparsers(title='subcommands',\n description='valid subcommands',\n help='init/start/migrate')\n\n parser_init = subparsers.add_parser('init')\n parser_init.set_defaults(which='init')\n\n parser_migrate = subparsers.add_parser('migrate')\n parser_migrate.set_defaults(which='migrate')\n\n parser_start = subparsers.add_parser('start')\n parser_start.add_argument('--reload',\n action='store_true',\n help='Restart when code or config changes',\n required=False,\n default=False)\n parser_start.set_defaults(which='start')\n\n args = vars(parser.parse_args())\n config_file = args['ini_file']\n\n if args['which'] == 'init':\n if os.path.exists(config_file):\n print(\"%s already exist.\" % config_file, file=sys.stderr)\n sys.exit(1)\n\n backend = args['backend']\n if not backend:\n while True:\n prompt = (\"Select the backend you would like to use: \"\n \"(1 - postgresql, 2 - redis, default - memory) \")\n answer = input(prompt).strip()\n try:\n backends = {\"1\": \"postgresql\", \"2\": \"redis\", \"\": \"memory\"}\n backend = backends[answer]\n break\n except KeyError:\n pass\n\n init(config_file, backend)\n\n # Install postgresql libraries if necessary\n if backend == \"postgresql\":\n try:\n import psycopg2 # NOQA\n except ImportError:\n import pip\n pip.main(['install', \"cliquet[postgresql]\"])\n\n elif args['which'] == 'migrate':\n env = bootstrap(config_file)\n cliquet.init_schema(env)\n\n elif args['which'] == 'start':\n pserve_argv = ['pserve', config_file]\n if args['reload']:\n pserve_argv.append('--reload')\n pserve.main(pserve_argv)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "kinto/__main__.py"}]} | 1,381 | 210 |
gh_patches_debug_28526 | rasdani/github-patches | git_diff | akvo__akvo-rsr-2553 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Search in EUTF akvo site
Partner team had a training and workshop with EUTF last week and discovered that search terms in EUTF akvo site returned unrelated results.
Search for tombouctou shows up a project of SNV in EUTF akvo page, which is confusing for the partner as they expect to see their own projects only on their akvo site.
<img width="1070" alt="screen shot 2017-02-06 at 15 56 41" src="https://cloud.githubusercontent.com/assets/21127166/22652066/45bdf606-ec85-11e6-9c05-25d421b329c1.png">
What the partner expects is to see just projects where they are one of the participating partners.
If the search does not match any of their projects, it should then not return anything.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/views/typeahead.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4 See more details in the license.txt file located at the root folder of the
5 Akvo RSR module. For additional details on the GNU license please
6 see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from akvo.rest.serializers import (TypeaheadCountrySerializer,
10 TypeaheadOrganisationSerializer,
11 TypeaheadProjectSerializer,
12 TypeaheadProjectUpdateSerializer)
13
14 from akvo.codelists.models import Country, Version
15 from akvo.rsr.models import Organisation, Project, ProjectUpdate
16
17 from django.conf import settings
18
19 from rest_framework.decorators import api_view
20 from rest_framework.response import Response
21
22
23 def rejig(queryset, serializer):
24 """Rearrange & add queryset count to the response data."""
25 return {
26 'count': queryset.count(),
27 'results': serializer.data
28 }
29
30
31 @api_view(['GET'])
32 def typeahead_country(request):
33 iati_version = Version.objects.get(code=settings.IATI_VERSION)
34 countries = Country.objects.filter(version=iati_version)
35 return Response(
36 rejig(countries, TypeaheadCountrySerializer(countries, many=True))
37 )
38
39
40 @api_view(['GET'])
41 def typeahead_organisation(request):
42 organisations = Organisation.objects.all()
43 return Response(
44 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
45 many=True))
46 )
47
48
49 @api_view(['GET'])
50 def typeahead_user_organisations(request):
51 user = request.user
52 is_admin = user.is_active and (user.is_superuser or user.is_admin)
53 organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()
54 return Response(
55 rejig(organisations, TypeaheadOrganisationSerializer(organisations,
56 many=True))
57 )
58
59
60 @api_view(['GET'])
61 def typeahead_project(request):
62 projects = Project.objects.all().exclude(title='')
63 return Response(
64 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
65 )
66
67
68 @api_view(['GET'])
69 def typeahead_user_projects(request):
70 user = request.user
71 is_admin = user.is_active and (user.is_superuser or user.is_admin)
72 if is_admin:
73 projects = Project.objects.all()
74 else:
75 projects = user.approved_organisations().all_projects()
76 projects = projects.exclude(title='')
77 return Response(
78 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
79 )
80
81
82 @api_view(['GET'])
83 def typeahead_impact_projects(request):
84 user = request.user
85 projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()
86 projects = projects.published().filter(is_impact_project=True).order_by('title')
87
88 return Response(
89 rejig(projects, TypeaheadProjectSerializer(projects, many=True))
90 )
91
92
93 @api_view(['GET'])
94 def typeahead_projectupdate(request):
95 updates = ProjectUpdate.objects.all()
96 return Response(
97 rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))
98 )
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py
--- a/akvo/rest/views/typeahead.py
+++ b/akvo/rest/views/typeahead.py
@@ -13,6 +13,7 @@
from akvo.codelists.models import Country, Version
from akvo.rsr.models import Organisation, Project, ProjectUpdate
+from akvo.rsr.views.project import _project_directory_coll
from django.conf import settings
@@ -59,7 +60,39 @@
@api_view(['GET'])
def typeahead_project(request):
- projects = Project.objects.all().exclude(title='')
+ """Return the typeaheads for projects.
+
+ Without any query parameters, it returns the info for all the projects in
+ the current context -- changes depending on whether we are on a partner
+ site, or the RSR site.
+
+ If a project query parameter with a project id is passed, the info for all
+ projects associated with partners for the specified project is returned.
+
+ NOTE: The unauthenticated user gets information about all the projects when
+ using this API endpoint. More permission checking will need to be added,
+ if the amount of data being returned is changed.
+
+ """
+ project_id = request.GET.get('project', None)
+ if project_id is None:
+ project = None
+
+ else:
+ try:
+ project = Project.objects.get(id=project_id)
+ except Project.DoesNotExist:
+ project = None
+
+ if project is None:
+ # Search bar - organization projects, published
+ projects = _project_directory_coll(request)
+
+ else:
+ # Project editor - all projects of partners for this project
+ projects = Project.objects.of_partners(project.partners.distinct()).distinct()
+
+ projects = projects.exclude(title='')
return Response(
rejig(projects, TypeaheadProjectSerializer(projects, many=True))
)
| {"golden_diff": "diff --git a/akvo/rest/views/typeahead.py b/akvo/rest/views/typeahead.py\n--- a/akvo/rest/views/typeahead.py\n+++ b/akvo/rest/views/typeahead.py\n@@ -13,6 +13,7 @@\n \n from akvo.codelists.models import Country, Version\n from akvo.rsr.models import Organisation, Project, ProjectUpdate\n+from akvo.rsr.views.project import _project_directory_coll\n \n from django.conf import settings\n \n@@ -59,7 +60,39 @@\n \n @api_view(['GET'])\n def typeahead_project(request):\n- projects = Project.objects.all().exclude(title='')\n+ \"\"\"Return the typeaheads for projects.\n+\n+ Without any query parameters, it returns the info for all the projects in\n+ the current context -- changes depending on whether we are on a partner\n+ site, or the RSR site.\n+\n+ If a project query parameter with a project id is passed, the info for all\n+ projects associated with partners for the specified project is returned.\n+\n+ NOTE: The unauthenticated user gets information about all the projects when\n+ using this API endpoint. More permission checking will need to be added,\n+ if the amount of data being returned is changed.\n+\n+ \"\"\"\n+ project_id = request.GET.get('project', None)\n+ if project_id is None:\n+ project = None\n+\n+ else:\n+ try:\n+ project = Project.objects.get(id=project_id)\n+ except Project.DoesNotExist:\n+ project = None\n+\n+ if project is None:\n+ # Search bar - organization projects, published\n+ projects = _project_directory_coll(request)\n+\n+ else:\n+ # Project editor - all projects of partners for this project\n+ projects = Project.objects.of_partners(project.partners.distinct()).distinct()\n+\n+ projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n", "issue": "Search in EUTF akvo site\nPartner team had a training and workshop with EUTF last week and discovered that search terms in EUTF akvo site returned unrelated results.\r\n\r\nSearch for tombouctou shows up a project of SNV in EUTF akvo page, which is confusing for the partner as they expect to see their own projects only on their akvo site. \r\n\r\n<img width=\"1070\" alt=\"screen shot 2017-02-06 at 15 56 41\" src=\"https://cloud.githubusercontent.com/assets/21127166/22652066/45bdf606-ec85-11e6-9c05-25d421b329c1.png\">\r\n\r\nWhat the partner expects is to see just projects where they are one of the participating partners. \r\nIf the search does not match any of their projects, it should then not return anything. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer)\n\nfrom akvo.codelists.models import Country, Version\nfrom akvo.rsr.models import Organisation, Project, ProjectUpdate\n\nfrom django.conf import settings\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n iati_version = Version.objects.get(code=settings.IATI_VERSION)\n countries = Country.objects.filter(version=iati_version)\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n organisations = Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n projects = Project.objects.all().exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n updates = ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\nSee more details in the license.txt file located at the root folder of the\nAkvo RSR module. For additional details on the GNU license please\nsee < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom akvo.rest.serializers import (TypeaheadCountrySerializer,\n TypeaheadOrganisationSerializer,\n TypeaheadProjectSerializer,\n TypeaheadProjectUpdateSerializer)\n\nfrom akvo.codelists.models import Country, Version\nfrom akvo.rsr.models import Organisation, Project, ProjectUpdate\nfrom akvo.rsr.views.project import _project_directory_coll\n\nfrom django.conf import settings\n\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\n\ndef rejig(queryset, serializer):\n \"\"\"Rearrange & add queryset count to the response data.\"\"\"\n return {\n 'count': queryset.count(),\n 'results': serializer.data\n }\n\n\n@api_view(['GET'])\ndef typeahead_country(request):\n iati_version = Version.objects.get(code=settings.IATI_VERSION)\n countries = Country.objects.filter(version=iati_version)\n return Response(\n rejig(countries, TypeaheadCountrySerializer(countries, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_organisation(request):\n organisations = Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_organisations(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n organisations = user.approved_organisations() if not is_admin else Organisation.objects.all()\n return Response(\n rejig(organisations, TypeaheadOrganisationSerializer(organisations,\n many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_project(request):\n \"\"\"Return the typeaheads for projects.\n\n Without any query parameters, it returns the info for all the projects in\n the current context -- changes depending on whether we are on a partner\n site, or the RSR site.\n\n If a project query parameter with a project id is passed, the info for all\n projects associated with partners for the specified project is returned.\n\n NOTE: The unauthenticated user gets information about all the projects when\n using this API endpoint. More permission checking will need to be added,\n if the amount of data being returned is changed.\n\n \"\"\"\n project_id = request.GET.get('project', None)\n if project_id is None:\n project = None\n\n else:\n try:\n project = Project.objects.get(id=project_id)\n except Project.DoesNotExist:\n project = None\n\n if project is None:\n # Search bar - organization projects, published\n projects = _project_directory_coll(request)\n\n else:\n # Project editor - all projects of partners for this project\n projects = Project.objects.of_partners(project.partners.distinct()).distinct()\n\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_user_projects(request):\n user = request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if is_admin:\n projects = Project.objects.all()\n else:\n projects = user.approved_organisations().all_projects()\n projects = projects.exclude(title='')\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_impact_projects(request):\n user = request.user\n projects = Project.objects.all() if user.is_admin or user.is_superuser else user.my_projects()\n projects = projects.published().filter(is_impact_project=True).order_by('title')\n\n return Response(\n rejig(projects, TypeaheadProjectSerializer(projects, many=True))\n )\n\n\n@api_view(['GET'])\ndef typeahead_projectupdate(request):\n updates = ProjectUpdate.objects.all()\n return Response(\n rejig(updates, TypeaheadProjectUpdateSerializer(updates, many=True))\n )\n", "path": "akvo/rest/views/typeahead.py"}]} | 1,325 | 434 |
gh_patches_debug_33599 | rasdani/github-patches | git_diff | pyjanitor-devs__pyjanitor-1116 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Require pyspark minimal version is v3.2.0 to cut duplicates codes
Since [pyspark v3.2.0](https://github.com/apache/spark/blob/5d45a415f3a29898d92380380cfd82bfc7f579ea/python/pyspark/pandas/extensions.py#L28-L64), it has contained `CachedAccessor`, `_register_accessor`, `_register_accessor`
janitor requires pyspark minimal version is v3.1.2 at present.
Compared to v3.1.2, v3.2.0 is a minor version.
https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/janitor/spark/backend.py#L9-L37
Note: The pyspark in the [setup.py](https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/.requirements/spark.txt#L9) file requires v3.2.1 but ci ([environment-dev.yml](https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/environment-dev.yml#L41)) requires v3.1.2.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `janitor/spark/backend.py`
Content:
```
1 """ Backend functions for pyspark."""
2
3 import warnings
4 from functools import wraps
5
6 from janitor.utils import import_message
7
8
9 class CachedAccessor:
10 """
11 Custom property-like object (descriptor) for caching accessors.
12
13 Parameters
14 ----------
15 name : str
16 The namespace this will be accessed under, e.g. `df.foo`
17 accessor : cls
18 The class with the extension methods.
19
20 NOTE
21 ----
22 Modified based on pandas.core.accessor.
23 """
24
25 def __init__(self, name, accessor):
26 self._name = name
27 self._accessor = accessor
28
29 def __get__(self, obj, cls):
30 if obj is None:
31 # we're accessing the attribute of the class, i.e., Dataset.geo
32 return self._accessor
33 accessor_obj = self._accessor(obj)
34 # Replace the property with the accessor object. Inspired by:
35 # http://www.pydanny.com/cached-property.html
36 setattr(obj, self._name, accessor_obj)
37 return accessor_obj
38
39
40 def _register_accessor(name, cls):
41 """
42 NOTE
43 ----
44 Modified based on pandas.core.accessor.
45 """
46
47 def decorator(accessor):
48 if hasattr(cls, name):
49 warnings.warn(
50 "registration of accessor {!r} under name {!r} for type "
51 "{!r} is overriding a preexisting attribute with the same "
52 "name.".format(accessor, name, cls),
53 UserWarning,
54 stacklevel=2,
55 )
56 setattr(cls, name, CachedAccessor(name, accessor))
57 return accessor
58
59 return decorator
60
61
62 def register_dataframe_accessor(name):
63 """
64 NOTE
65 ----
66 Modified based on pandas.core.accessor.
67
68 .. # noqa: DAR101 name
69 .. # noqa: DAR201
70 """
71 try:
72 from pyspark.sql import DataFrame
73 except ImportError:
74 import_message(
75 submodule="spark",
76 package="pyspark",
77 conda_channel="conda-forge",
78 pip_install=True,
79 )
80
81 return _register_accessor(name, DataFrame)
82
83
84 def register_dataframe_method(method):
85 """Register a function as a method attached to the Pyspark DataFrame.
86
87 NOTE
88 ----
89 Modified based on pandas_flavor.register.
90
91 .. # noqa: DAR101 method
92 .. # noqa: DAR201
93 """
94
95 def inner(*args, **kwargs):
96 class AccessorMethod:
97 def __init__(self, pyspark_obj):
98 self._obj = pyspark_obj
99
100 @wraps(method)
101 def __call__(self, *args, **kwargs):
102 return method(self._obj, *args, **kwargs)
103
104 register_dataframe_accessor(method.__name__)(AccessorMethod)
105
106 return method
107
108 return inner()
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/janitor/spark/backend.py b/janitor/spark/backend.py
--- a/janitor/spark/backend.py
+++ b/janitor/spark/backend.py
@@ -1,84 +1,20 @@
""" Backend functions for pyspark."""
-import warnings
from functools import wraps
-from janitor.utils import import_message
+try:
+ from pyspark.pandas.extensions import register_dataframe_accessor
-class CachedAccessor:
- """
- Custom property-like object (descriptor) for caching accessors.
-
- Parameters
- ----------
- name : str
- The namespace this will be accessed under, e.g. `df.foo`
- accessor : cls
- The class with the extension methods.
-
- NOTE
- ----
- Modified based on pandas.core.accessor.
- """
-
- def __init__(self, name, accessor):
- self._name = name
- self._accessor = accessor
-
- def __get__(self, obj, cls):
- if obj is None:
- # we're accessing the attribute of the class, i.e., Dataset.geo
- return self._accessor
- accessor_obj = self._accessor(obj)
- # Replace the property with the accessor object. Inspired by:
- # http://www.pydanny.com/cached-property.html
- setattr(obj, self._name, accessor_obj)
- return accessor_obj
-
-
-def _register_accessor(name, cls):
- """
- NOTE
- ----
- Modified based on pandas.core.accessor.
- """
-
- def decorator(accessor):
- if hasattr(cls, name):
- warnings.warn(
- "registration of accessor {!r} under name {!r} for type "
- "{!r} is overriding a preexisting attribute with the same "
- "name.".format(accessor, name, cls),
- UserWarning,
- stacklevel=2,
- )
- setattr(cls, name, CachedAccessor(name, accessor))
- return accessor
-
- return decorator
-
-
-def register_dataframe_accessor(name):
- """
- NOTE
- ----
- Modified based on pandas.core.accessor.
-
- .. # noqa: DAR101 name
- .. # noqa: DAR201
- """
- try:
- from pyspark.sql import DataFrame
- except ImportError:
- import_message(
- submodule="spark",
- package="pyspark",
- conda_channel="conda-forge",
- pip_install=True,
- )
+except ImportError:
+ from janitor.utils import import_message
- return _register_accessor(name, DataFrame)
+ import_message(
+ submodule="spark",
+ package="pyspark",
+ conda_channel="conda-forge",
+ pip_install=True,
+ )
def register_dataframe_method(method):
| {"golden_diff": "diff --git a/janitor/spark/backend.py b/janitor/spark/backend.py\n--- a/janitor/spark/backend.py\n+++ b/janitor/spark/backend.py\n@@ -1,84 +1,20 @@\n \"\"\" Backend functions for pyspark.\"\"\"\n \n-import warnings\n from functools import wraps\n \n-from janitor.utils import import_message\n \n+try:\n+ from pyspark.pandas.extensions import register_dataframe_accessor\n \n-class CachedAccessor:\n- \"\"\"\n- Custom property-like object (descriptor) for caching accessors.\n-\n- Parameters\n- ----------\n- name : str\n- The namespace this will be accessed under, e.g. `df.foo`\n- accessor : cls\n- The class with the extension methods.\n-\n- NOTE\n- ----\n- Modified based on pandas.core.accessor.\n- \"\"\"\n-\n- def __init__(self, name, accessor):\n- self._name = name\n- self._accessor = accessor\n-\n- def __get__(self, obj, cls):\n- if obj is None:\n- # we're accessing the attribute of the class, i.e., Dataset.geo\n- return self._accessor\n- accessor_obj = self._accessor(obj)\n- # Replace the property with the accessor object. Inspired by:\n- # http://www.pydanny.com/cached-property.html\n- setattr(obj, self._name, accessor_obj)\n- return accessor_obj\n-\n-\n-def _register_accessor(name, cls):\n- \"\"\"\n- NOTE\n- ----\n- Modified based on pandas.core.accessor.\n- \"\"\"\n-\n- def decorator(accessor):\n- if hasattr(cls, name):\n- warnings.warn(\n- \"registration of accessor {!r} under name {!r} for type \"\n- \"{!r} is overriding a preexisting attribute with the same \"\n- \"name.\".format(accessor, name, cls),\n- UserWarning,\n- stacklevel=2,\n- )\n- setattr(cls, name, CachedAccessor(name, accessor))\n- return accessor\n-\n- return decorator\n-\n-\n-def register_dataframe_accessor(name):\n- \"\"\"\n- NOTE\n- ----\n- Modified based on pandas.core.accessor.\n-\n- .. # noqa: DAR101 name\n- .. # noqa: DAR201\n- \"\"\"\n- try:\n- from pyspark.sql import DataFrame\n- except ImportError:\n- import_message(\n- submodule=\"spark\",\n- package=\"pyspark\",\n- conda_channel=\"conda-forge\",\n- pip_install=True,\n- )\n+except ImportError:\n+ from janitor.utils import import_message\n \n- return _register_accessor(name, DataFrame)\n+ import_message(\n+ submodule=\"spark\",\n+ package=\"pyspark\",\n+ conda_channel=\"conda-forge\",\n+ pip_install=True,\n+ )\n \n \n def register_dataframe_method(method):\n", "issue": "Require pyspark minimal version is v3.2.0 to cut duplicates codes\nSince [pyspark v3.2.0](https://github.com/apache/spark/blob/5d45a415f3a29898d92380380cfd82bfc7f579ea/python/pyspark/pandas/extensions.py#L28-L64), it has contained `CachedAccessor`, `_register_accessor`, `_register_accessor`\r\n\r\njanitor requires pyspark minimal version is v3.1.2 at present.\r\nCompared to v3.1.2, v3.2.0 is a minor version.\r\n\r\nhttps://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/janitor/spark/backend.py#L9-L37\r\n\r\nNote: The pyspark in the [setup.py](https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/.requirements/spark.txt#L9) file requires v3.2.1 but ci ([environment-dev.yml](https://github.com/pyjanitor-devs/pyjanitor/blob/4867b20273e99cb2f4c32278123d5c23c2ccaa67/environment-dev.yml#L41)) requires v3.1.2.\n", "before_files": [{"content": "\"\"\" Backend functions for pyspark.\"\"\"\n\nimport warnings\nfrom functools import wraps\n\nfrom janitor.utils import import_message\n\n\nclass CachedAccessor:\n \"\"\"\n Custom property-like object (descriptor) for caching accessors.\n\n Parameters\n ----------\n name : str\n The namespace this will be accessed under, e.g. `df.foo`\n accessor : cls\n The class with the extension methods.\n\n NOTE\n ----\n Modified based on pandas.core.accessor.\n \"\"\"\n\n def __init__(self, name, accessor):\n self._name = name\n self._accessor = accessor\n\n def __get__(self, obj, cls):\n if obj is None:\n # we're accessing the attribute of the class, i.e., Dataset.geo\n return self._accessor\n accessor_obj = self._accessor(obj)\n # Replace the property with the accessor object. Inspired by:\n # http://www.pydanny.com/cached-property.html\n setattr(obj, self._name, accessor_obj)\n return accessor_obj\n\n\ndef _register_accessor(name, cls):\n \"\"\"\n NOTE\n ----\n Modified based on pandas.core.accessor.\n \"\"\"\n\n def decorator(accessor):\n if hasattr(cls, name):\n warnings.warn(\n \"registration of accessor {!r} under name {!r} for type \"\n \"{!r} is overriding a preexisting attribute with the same \"\n \"name.\".format(accessor, name, cls),\n UserWarning,\n stacklevel=2,\n )\n setattr(cls, name, CachedAccessor(name, accessor))\n return accessor\n\n return decorator\n\n\ndef register_dataframe_accessor(name):\n \"\"\"\n NOTE\n ----\n Modified based on pandas.core.accessor.\n\n .. # noqa: DAR101 name\n .. # noqa: DAR201\n \"\"\"\n try:\n from pyspark.sql import DataFrame\n except ImportError:\n import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n pip_install=True,\n )\n\n return _register_accessor(name, DataFrame)\n\n\ndef register_dataframe_method(method):\n \"\"\"Register a function as a method attached to the Pyspark DataFrame.\n\n NOTE\n ----\n Modified based on pandas_flavor.register.\n\n .. # noqa: DAR101 method\n .. # noqa: DAR201\n \"\"\"\n\n def inner(*args, **kwargs):\n class AccessorMethod:\n def __init__(self, pyspark_obj):\n self._obj = pyspark_obj\n\n @wraps(method)\n def __call__(self, *args, **kwargs):\n return method(self._obj, *args, **kwargs)\n\n register_dataframe_accessor(method.__name__)(AccessorMethod)\n\n return method\n\n return inner()\n", "path": "janitor/spark/backend.py"}], "after_files": [{"content": "\"\"\" Backend functions for pyspark.\"\"\"\n\nfrom functools import wraps\n\n\ntry:\n from pyspark.pandas.extensions import register_dataframe_accessor\n\nexcept ImportError:\n from janitor.utils import import_message\n\n import_message(\n submodule=\"spark\",\n package=\"pyspark\",\n conda_channel=\"conda-forge\",\n pip_install=True,\n )\n\n\ndef register_dataframe_method(method):\n \"\"\"Register a function as a method attached to the Pyspark DataFrame.\n\n NOTE\n ----\n Modified based on pandas_flavor.register.\n\n .. # noqa: DAR101 method\n .. # noqa: DAR201\n \"\"\"\n\n def inner(*args, **kwargs):\n class AccessorMethod:\n def __init__(self, pyspark_obj):\n self._obj = pyspark_obj\n\n @wraps(method)\n def __call__(self, *args, **kwargs):\n return method(self._obj, *args, **kwargs)\n\n register_dataframe_accessor(method.__name__)(AccessorMethod)\n\n return method\n\n return inner()\n", "path": "janitor/spark/backend.py"}]} | 1,453 | 636 |
gh_patches_debug_21302 | rasdani/github-patches | git_diff | TheAlgorithms__Python-9108 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Remove duplicate GCD implementation
### Feature description
[`greatest_common_divisor.py`](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py) and [`euclidean_gcd.py`](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py) both have basically the same two implementations of the Euclidean algorithm for calculating the GCD of 2 numbers. Thus, one of them should probably be removed as a duplicate.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `maths/euclidean_gcd.py`
Content:
```
1 """ https://en.wikipedia.org/wiki/Euclidean_algorithm """
2
3
4 def euclidean_gcd(a: int, b: int) -> int:
5 """
6 Examples:
7 >>> euclidean_gcd(3, 5)
8 1
9
10 >>> euclidean_gcd(6, 3)
11 3
12 """
13 while b:
14 a, b = b, a % b
15 return a
16
17
18 def euclidean_gcd_recursive(a: int, b: int) -> int:
19 """
20 Recursive method for euclicedan gcd algorithm
21
22 Examples:
23 >>> euclidean_gcd_recursive(3, 5)
24 1
25
26 >>> euclidean_gcd_recursive(6, 3)
27 3
28 """
29 return a if b == 0 else euclidean_gcd_recursive(b, a % b)
30
31
32 def main():
33 print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}")
34 print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}")
35 print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}")
36 print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}")
37 print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}")
38
39 print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}")
40 print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}")
41 print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}")
42 print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}")
43 print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}")
44
45
46 if __name__ == "__main__":
47 main()
48
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/maths/euclidean_gcd.py b/maths/euclidean_gcd.py
deleted file mode 100644
--- a/maths/euclidean_gcd.py
+++ /dev/null
@@ -1,47 +0,0 @@
-""" https://en.wikipedia.org/wiki/Euclidean_algorithm """
-
-
-def euclidean_gcd(a: int, b: int) -> int:
- """
- Examples:
- >>> euclidean_gcd(3, 5)
- 1
-
- >>> euclidean_gcd(6, 3)
- 3
- """
- while b:
- a, b = b, a % b
- return a
-
-
-def euclidean_gcd_recursive(a: int, b: int) -> int:
- """
- Recursive method for euclicedan gcd algorithm
-
- Examples:
- >>> euclidean_gcd_recursive(3, 5)
- 1
-
- >>> euclidean_gcd_recursive(6, 3)
- 3
- """
- return a if b == 0 else euclidean_gcd_recursive(b, a % b)
-
-
-def main():
- print(f"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}")
- print(f"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}")
- print(f"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}")
- print(f"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}")
- print(f"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}")
-
- print(f"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}")
- print(f"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}")
- print(f"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}")
- print(f"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}")
- print(f"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}")
-
-
-if __name__ == "__main__":
- main()
| {"golden_diff": "diff --git a/maths/euclidean_gcd.py b/maths/euclidean_gcd.py\ndeleted file mode 100644\n--- a/maths/euclidean_gcd.py\n+++ /dev/null\n@@ -1,47 +0,0 @@\n-\"\"\" https://en.wikipedia.org/wiki/Euclidean_algorithm \"\"\"\n-\n-\n-def euclidean_gcd(a: int, b: int) -> int:\n- \"\"\"\n- Examples:\n- >>> euclidean_gcd(3, 5)\n- 1\n-\n- >>> euclidean_gcd(6, 3)\n- 3\n- \"\"\"\n- while b:\n- a, b = b, a % b\n- return a\n-\n-\n-def euclidean_gcd_recursive(a: int, b: int) -> int:\n- \"\"\"\n- Recursive method for euclicedan gcd algorithm\n-\n- Examples:\n- >>> euclidean_gcd_recursive(3, 5)\n- 1\n-\n- >>> euclidean_gcd_recursive(6, 3)\n- 3\n- \"\"\"\n- return a if b == 0 else euclidean_gcd_recursive(b, a % b)\n-\n-\n-def main():\n- print(f\"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}\")\n- print(f\"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}\")\n- print(f\"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}\")\n- print(f\"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}\")\n- print(f\"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}\")\n-\n- print(f\"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}\")\n- print(f\"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}\")\n- print(f\"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}\")\n- print(f\"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}\")\n- print(f\"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}\")\n-\n-\n-if __name__ == \"__main__\":\n- main()\n", "issue": "Remove duplicate GCD implementation\n### Feature description\n\n[`greatest_common_divisor.py`](https://github.com/TheAlgorithms/Python/blob/master/maths/greatest_common_divisor.py) and [`euclidean_gcd.py`](https://github.com/TheAlgorithms/Python/blob/master/maths/euclidean_gcd.py) both have basically the same two implementations of the Euclidean algorithm for calculating the GCD of 2 numbers. Thus, one of them should probably be removed as a duplicate.\n", "before_files": [{"content": "\"\"\" https://en.wikipedia.org/wiki/Euclidean_algorithm \"\"\"\n\n\ndef euclidean_gcd(a: int, b: int) -> int:\n \"\"\"\n Examples:\n >>> euclidean_gcd(3, 5)\n 1\n\n >>> euclidean_gcd(6, 3)\n 3\n \"\"\"\n while b:\n a, b = b, a % b\n return a\n\n\ndef euclidean_gcd_recursive(a: int, b: int) -> int:\n \"\"\"\n Recursive method for euclicedan gcd algorithm\n\n Examples:\n >>> euclidean_gcd_recursive(3, 5)\n 1\n\n >>> euclidean_gcd_recursive(6, 3)\n 3\n \"\"\"\n return a if b == 0 else euclidean_gcd_recursive(b, a % b)\n\n\ndef main():\n print(f\"euclidean_gcd(3, 5) = {euclidean_gcd(3, 5)}\")\n print(f\"euclidean_gcd(5, 3) = {euclidean_gcd(5, 3)}\")\n print(f\"euclidean_gcd(1, 3) = {euclidean_gcd(1, 3)}\")\n print(f\"euclidean_gcd(3, 6) = {euclidean_gcd(3, 6)}\")\n print(f\"euclidean_gcd(6, 3) = {euclidean_gcd(6, 3)}\")\n\n print(f\"euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3, 5)}\")\n print(f\"euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5, 3)}\")\n print(f\"euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1, 3)}\")\n print(f\"euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3, 6)}\")\n print(f\"euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6, 3)}\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "maths/euclidean_gcd.py"}], "after_files": [{"content": null, "path": "maths/euclidean_gcd.py"}]} | 931 | 572 |
gh_patches_debug_1750 | rasdani/github-patches | git_diff | locustio__locust-1839 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
OOM error with master/slaves setup (zeromq, windows)
Hi !
### Describe the bug
An out of memory error occurs with ZeroMQ trying to allocate a crazy amount of memory in decoded_allocator, sometime up to several petabytes. This might very well be a ZeroMQ bug :
` OUT OF MEMORY (bundled\zeromq\src\decoder_allocators.cpp:89)`
I added some logs and recompiled pyzmq to check what's going on. Upon further investigation, _max_counters seems to take a crazy value at some point. See [zmq_logs.txt](https://github.com/locustio/locust/files/4618065/zmq_logs.txt)
As you can see, allocator instance 0x0000016A9270F700 is constructed with _max_counters=249, but before crash its value has changed to 1557249601288, which causes a malloc of several terabytes.
### Steps to reproduce
Sorry, I couldn't find a surefire way to reproduce this one. It seems kind of random. It sometime happens before the test is even started, sometime when the test is stopped. Sometime it doesn't happen at all. It does seem to happen more often when stopping a test in the web UI. Simply run the ps1 attached and do some stuff in the web UI.
### Environment
- OS: Windows 10.0.18362.778
- Python version: 3.6
- Locust version: 0.14.6
- Locust files : [test_locust.zip](https://github.com/locustio/locust/files/4618016/test_locust.zip)
I managed to repro the bug on two computers : my work computer and my personal computer. Both are on Windows 10/Python 3.6 that comes with VS2017, but my personal computer has a pristine python environent, just ran pip install locustio.
Am I doing something I'm not supposed to ?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import ast
3 import os
4 import re
5 import sys
6
7 from setuptools import find_packages, setup
8
9 ROOT_PATH = os.path.abspath(os.path.dirname(__file__))
10
11 # parse version from locust/__init__.py
12 _version_re = re.compile(r"__version__\s+=\s+(.*)")
13 _init_file = os.path.join(ROOT_PATH, "locust", "__init__.py")
14 with open(_init_file, "rb") as f:
15 version = str(ast.literal_eval(_version_re.search(f.read().decode("utf-8")).group(1)))
16
17 setup(
18 name="locust",
19 version=version,
20 install_requires=[
21 "gevent>=20.9.0",
22 "flask>=2.0.0",
23 "Werkzeug>=2.0.0",
24 "requests>=2.9.1",
25 "msgpack>=0.6.2",
26 "pyzmq>=16.0.2",
27 "geventhttpclient>=1.4.4",
28 "ConfigArgParse>=1.0",
29 "psutil>=5.6.7",
30 "Flask-BasicAuth>=0.2.0",
31 "Flask-Cors>=3.0.10",
32 "roundrobin>=0.0.2",
33 ],
34 test_suite="locust.test",
35 tests_require=[
36 "cryptography",
37 "mock",
38 "pyquery",
39 ],
40 extras_require={
41 ":sys_platform == 'win32'": ["pywin32"],
42 },
43 )
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -23,7 +23,7 @@
"Werkzeug>=2.0.0",
"requests>=2.9.1",
"msgpack>=0.6.2",
- "pyzmq>=16.0.2",
+ "pyzmq>=22.2.1",
"geventhttpclient>=1.4.4",
"ConfigArgParse>=1.0",
"psutil>=5.6.7",
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -23,7 +23,7 @@\n \"Werkzeug>=2.0.0\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n- \"pyzmq>=16.0.2\",\n+ \"pyzmq>=22.2.1\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n", "issue": "OOM error with master/slaves setup (zeromq, windows)\nHi !\r\n \r\n### Describe the bug\r\nAn out of memory error occurs with ZeroMQ trying to allocate a crazy amount of memory in decoded_allocator, sometime up to several petabytes. This might very well be a ZeroMQ bug : \r\n` OUT OF MEMORY (bundled\\zeromq\\src\\decoder_allocators.cpp:89)`\r\n \r\nI added some logs and recompiled pyzmq to check what's going on. Upon further investigation, _max_counters seems to take a crazy value at some point. See [zmq_logs.txt](https://github.com/locustio/locust/files/4618065/zmq_logs.txt)\r\nAs you can see, allocator instance 0x0000016A9270F700 is constructed with _max_counters=249, but before crash its value has changed to 1557249601288, which causes a malloc of several terabytes.\r\n \r\n \r\n### Steps to reproduce\r\nSorry, I couldn't find a surefire way to reproduce this one. It seems kind of random. It sometime happens before the test is even started, sometime when the test is stopped. Sometime it doesn't happen at all. It does seem to happen more often when stopping a test in the web UI. Simply run the ps1 attached and do some stuff in the web UI.\r\n \r\n### Environment\r\n \r\n- OS: Windows 10.0.18362.778\r\n- Python version: 3.6\r\n- Locust version: 0.14.6\r\n- Locust files : [test_locust.zip](https://github.com/locustio/locust/files/4618016/test_locust.zip)\r\n \r\nI managed to repro the bug on two computers : my work computer and my personal computer. Both are on Windows 10/Python 3.6 that comes with VS2017, but my personal computer has a pristine python environent, just ran pip install locustio.\r\n\r\nAm I doing something I'm not supposed to ?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, \"rb\") as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1)))\n\nsetup(\n name=\"locust\",\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n \"flask>=2.0.0\",\n \"Werkzeug>=2.0.0\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n \"pyzmq>=16.0.2\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\",\n \"Flask-Cors>=3.0.10\",\n \"roundrobin>=0.0.2\",\n ],\n test_suite=\"locust.test\",\n tests_require=[\n \"cryptography\",\n \"mock\",\n \"pyquery\",\n ],\n extras_require={\n \":sys_platform == 'win32'\": [\"pywin32\"],\n },\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport ast\nimport os\nimport re\nimport sys\n\nfrom setuptools import find_packages, setup\n\nROOT_PATH = os.path.abspath(os.path.dirname(__file__))\n\n# parse version from locust/__init__.py\n_version_re = re.compile(r\"__version__\\s+=\\s+(.*)\")\n_init_file = os.path.join(ROOT_PATH, \"locust\", \"__init__.py\")\nwith open(_init_file, \"rb\") as f:\n version = str(ast.literal_eval(_version_re.search(f.read().decode(\"utf-8\")).group(1)))\n\nsetup(\n name=\"locust\",\n version=version,\n install_requires=[\n \"gevent>=20.9.0\",\n \"flask>=2.0.0\",\n \"Werkzeug>=2.0.0\",\n \"requests>=2.9.1\",\n \"msgpack>=0.6.2\",\n \"pyzmq>=22.2.1\",\n \"geventhttpclient>=1.4.4\",\n \"ConfigArgParse>=1.0\",\n \"psutil>=5.6.7\",\n \"Flask-BasicAuth>=0.2.0\",\n \"Flask-Cors>=3.0.10\",\n \"roundrobin>=0.0.2\",\n ],\n test_suite=\"locust.test\",\n tests_require=[\n \"cryptography\",\n \"mock\",\n \"pyquery\",\n ],\n extras_require={\n \":sys_platform == 'win32'\": [\"pywin32\"],\n },\n)\n", "path": "setup.py"}]} | 1,139 | 127 |
gh_patches_debug_13788 | rasdani/github-patches | git_diff | pyca__cryptography-1615 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Backend loading code produces a warning with the latest setuptools
The use `load(require=False)` (specifically the `require` kwarg) is deprecated. /cc @dstufft
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/backends/__init__.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import pkg_resources
8
9 from cryptography.hazmat.backends.multibackend import MultiBackend
10
11
12 _available_backends_list = None
13
14
15 def _available_backends():
16 global _available_backends_list
17
18 if _available_backends_list is None:
19 _available_backends_list = [
20 backend.load(require=False)
21 for backend in pkg_resources.iter_entry_points(
22 "cryptography.backends"
23 )
24 ]
25
26 return _available_backends_list
27
28 _default_backend = None
29
30
31 def default_backend():
32 global _default_backend
33
34 if _default_backend is None:
35 _default_backend = MultiBackend(_available_backends())
36
37 return _default_backend
38
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/backends/__init__.py b/src/cryptography/hazmat/backends/__init__.py
--- a/src/cryptography/hazmat/backends/__init__.py
+++ b/src/cryptography/hazmat/backends/__init__.py
@@ -17,8 +17,13 @@
if _available_backends_list is None:
_available_backends_list = [
- backend.load(require=False)
- for backend in pkg_resources.iter_entry_points(
+ # setuptools 11.3 deprecated support for the require parameter to
+ # load(), and introduced the new resolve() method instead.
+ # This can be removed if/when we can assume setuptools>=11.3. At
+ # some point we may wish to add a warning, to push people along,
+ # but at present this would result in too many warnings.
+ ep.resolve() if hasattr(ep, "resolve") else ep.load(require=False)
+ for ep in pkg_resources.iter_entry_points(
"cryptography.backends"
)
]
| {"golden_diff": "diff --git a/src/cryptography/hazmat/backends/__init__.py b/src/cryptography/hazmat/backends/__init__.py\n--- a/src/cryptography/hazmat/backends/__init__.py\n+++ b/src/cryptography/hazmat/backends/__init__.py\n@@ -17,8 +17,13 @@\n \n if _available_backends_list is None:\n _available_backends_list = [\n- backend.load(require=False)\n- for backend in pkg_resources.iter_entry_points(\n+ # setuptools 11.3 deprecated support for the require parameter to\n+ # load(), and introduced the new resolve() method instead.\n+ # This can be removed if/when we can assume setuptools>=11.3. At\n+ # some point we may wish to add a warning, to push people along,\n+ # but at present this would result in too many warnings.\n+ ep.resolve() if hasattr(ep, \"resolve\") else ep.load(require=False)\n+ for ep in pkg_resources.iter_entry_points(\n \"cryptography.backends\"\n )\n ]\n", "issue": "Backend loading code produces a warning with the latest setuptools\nThe use `load(require=False)` (specifically the `require` kwarg) is deprecated. /cc @dstufft \n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport pkg_resources\n\nfrom cryptography.hazmat.backends.multibackend import MultiBackend\n\n\n_available_backends_list = None\n\n\ndef _available_backends():\n global _available_backends_list\n\n if _available_backends_list is None:\n _available_backends_list = [\n backend.load(require=False)\n for backend in pkg_resources.iter_entry_points(\n \"cryptography.backends\"\n )\n ]\n\n return _available_backends_list\n\n_default_backend = None\n\n\ndef default_backend():\n global _default_backend\n\n if _default_backend is None:\n _default_backend = MultiBackend(_available_backends())\n\n return _default_backend\n", "path": "src/cryptography/hazmat/backends/__init__.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport pkg_resources\n\nfrom cryptography.hazmat.backends.multibackend import MultiBackend\n\n\n_available_backends_list = None\n\n\ndef _available_backends():\n global _available_backends_list\n\n if _available_backends_list is None:\n _available_backends_list = [\n # setuptools 11.3 deprecated support for the require parameter to\n # load(), and introduced the new resolve() method instead.\n # This can be removed if/when we can assume setuptools>=11.3. At\n # some point we may wish to add a warning, to push people along,\n # but at present this would result in too many warnings.\n ep.resolve() if hasattr(ep, \"resolve\") else ep.load(require=False)\n for ep in pkg_resources.iter_entry_points(\n \"cryptography.backends\"\n )\n ]\n\n return _available_backends_list\n\n_default_backend = None\n\n\ndef default_backend():\n global _default_backend\n\n if _default_backend is None:\n _default_backend = MultiBackend(_available_backends())\n\n return _default_backend\n", "path": "src/cryptography/hazmat/backends/__init__.py"}]} | 575 | 234 |
gh_patches_debug_580 | rasdani/github-patches | git_diff | pex-tool__pex-1191 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Release 2.1.26
On the docket:
+ [x] Pex requirement parsing is tripped up by files in the CWD with the same name as requirements' project names. #1188
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pex/version.py`
Content:
```
1 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
2 # Licensed under the Apache License, Version 2.0 (see LICENSE).
3
4 __version__ = "2.1.25"
5
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pex/version.py b/pex/version.py
--- a/pex/version.py
+++ b/pex/version.py
@@ -1,4 +1,4 @@
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
-__version__ = "2.1.25"
+__version__ = "2.1.26"
| {"golden_diff": "diff --git a/pex/version.py b/pex/version.py\n--- a/pex/version.py\n+++ b/pex/version.py\n@@ -1,4 +1,4 @@\n # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n # Licensed under the Apache License, Version 2.0 (see LICENSE).\n \n-__version__ = \"2.1.25\"\n+__version__ = \"2.1.26\"\n", "issue": "Release 2.1.26\nOn the docket:\r\n+ [x] Pex requirement parsing is tripped up by files in the CWD with the same name as requirements' project names. #1188\n", "before_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.25\"\n", "path": "pex/version.py"}], "after_files": [{"content": "# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\n__version__ = \"2.1.26\"\n", "path": "pex/version.py"}]} | 355 | 96 |
gh_patches_debug_2211 | rasdani/github-patches | git_diff | rasterio__rasterio-883 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Single int indexes param in sample method
According to docs the `indexes` param in the `sample` method can be a "list of ints or a single int".
However passing a single int raises this exception: `IndexError: too many indices for array`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rasterio/sample.py`
Content:
```
1 # Workaround for issue #378. A pure Python generator.
2
3 def sample_gen(dataset, xy, indexes=None):
4 index = dataset.index
5 read = dataset.read
6 for x, y in xy:
7 r, c = index(x, y)
8 window = ((r, r+1), (c, c+1))
9 data = read(indexes, window=window, masked=False, boundless=True)
10 yield data[:,0,0]
11
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rasterio/sample.py b/rasterio/sample.py
--- a/rasterio/sample.py
+++ b/rasterio/sample.py
@@ -3,6 +3,10 @@
def sample_gen(dataset, xy, indexes=None):
index = dataset.index
read = dataset.read
+
+ if isinstance(indexes, int):
+ indexes = [indexes]
+
for x, y in xy:
r, c = index(x, y)
window = ((r, r+1), (c, c+1))
| {"golden_diff": "diff --git a/rasterio/sample.py b/rasterio/sample.py\n--- a/rasterio/sample.py\n+++ b/rasterio/sample.py\n@@ -3,6 +3,10 @@\n def sample_gen(dataset, xy, indexes=None):\n index = dataset.index\n read = dataset.read\n+\n+ if isinstance(indexes, int):\n+ indexes = [indexes]\n+\n for x, y in xy:\n r, c = index(x, y)\n window = ((r, r+1), (c, c+1))\n", "issue": "Single int indexes param in sample method\nAccording to docs the `indexes` param in the `sample` method can be a \"list of ints or a single int\".\n\nHowever passing a single int raises this exception: `IndexError: too many indices for array`.\n\n", "before_files": [{"content": "# Workaround for issue #378. A pure Python generator.\n\ndef sample_gen(dataset, xy, indexes=None):\n index = dataset.index\n read = dataset.read\n for x, y in xy:\n r, c = index(x, y)\n window = ((r, r+1), (c, c+1))\n data = read(indexes, window=window, masked=False, boundless=True)\n yield data[:,0,0]\n", "path": "rasterio/sample.py"}], "after_files": [{"content": "# Workaround for issue #378. A pure Python generator.\n\ndef sample_gen(dataset, xy, indexes=None):\n index = dataset.index\n read = dataset.read\n\n if isinstance(indexes, int):\n indexes = [indexes]\n\n for x, y in xy:\n r, c = index(x, y)\n window = ((r, r+1), (c, c+1))\n data = read(indexes, window=window, masked=False, boundless=True)\n yield data[:,0,0]\n", "path": "rasterio/sample.py"}]} | 425 | 118 |
gh_patches_debug_62161 | rasdani/github-patches | git_diff | Parsl__parsl-3431 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Radical Pilot test failure in CI
**Describe the bug**
Since around Friday (according to @WardLT ), Parsl CI has been failing with this radical failure:
```
parsl/tests/conftest.py:180:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
parsl/tests/configs/local_radical.py:4: in <module>
from parsl.executors.radical import RadicalPilotExecutor
parsl/executors/radical/__init__.py:1: in <module>
from parsl.executors.radical.executor import RadicalPilotExecutor
parsl/executors/radical/executor.py:20: in <module>
from .rpex_resources import ResourceConfig
parsl/executors/radical/rpex_resources.py:8: in <module>
import radical.pilot as rp
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
__copyright__ = "Copyright 2013-2014, http://radical.rutgers.edu/"
__license__ = "MIT"
# ------------------------------------------------------------------------------
# we *first* import radical.utils, so that the monkeypatching of the logger has
# a chance to kick in before the logging module is pulled by any other 3rd party
# module, and also to monkeypatch `os.fork()` for the `atfork` functionality
import radical.utils as _ru
# ------------------------------------------------------------------------------
# constants and types
from .states import *
from .constants import *
# ------------------------------------------------------------------------------
# import API
from .session import Session
from .proxy import Proxy
from .task_manager import TaskManager
from .task import Task
from .raptor_tasks import RaptorMaster, RaptorWorker
from .pytask import PythonTask
from .task_description import TaskDescription
from .task_description import TASK_EXECUTABLE
from .task_description import TASK_METH, TASK_METHOD
from .task_description import TASK_FUNC, TASK_FUNCTION
from .task_description import TASK_EXEC, TASK_EVAL
from .task_description import TASK_PROC, TASK_SHELL
from .task_description import RAPTOR_MASTER, RAPTOR_WORKER
from .task_description import AGENT_SERVICE
from .resource_config import ResourceConfig
from .pilot_manager import PilotManager
from .pilot import Pilot
from .pilot_description import PilotDescription
pythontask = PythonTask.pythontask
# ------------------------------------------------------------------------------
# make submodules available -- mostly for internal use
from . import utils
from . import tmgr
from . import pmgr
from . import agent
from .agent import Agent_0
from .agent import Agent_n
from .raptor import Master, Worker
# ------------------------------------------------------------------------------
#
# get version info
#
import os as _os
> version_short, version_detail, version_base, version_branch, \
sdist_name, sdist_path = _ru.get_version(_os.path.dirname(__file__))
E ValueError: not enough values to unpack (expected 6, got 5)
.venv/lib/python3.11/site-packages/radical/pilot/__init__.py:62: ValueError
```
cc @AymenFJA @andre-merzky
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2
3 with open('parsl/version.py') as f:
4 exec(f.read())
5
6 with open('requirements.txt') as f:
7 install_requires = f.readlines()
8
9 extras_require = {
10 'monitoring' : [
11 'sqlalchemy>=1.4,<2'
12 ],
13 'visualization' : [
14 'pydot',
15 'networkx>=2.5,<2.6',
16 'Flask>=1.0.2',
17 'flask_sqlalchemy',
18 'pandas<2.2',
19 'plotly',
20 'python-daemon'
21 ],
22 'aws' : ['boto3'],
23 'kubernetes' : ['kubernetes'],
24 'oauth_ssh' : ['oauth-ssh>=0.9'],
25 'docs' : [
26 'ipython<=8.6.0',
27 'nbsphinx',
28 'sphinx>=7.1,<7.2', # 7.2 requires python 3.9+
29 'sphinx_rtd_theme'
30 ],
31 'google_cloud' : ['google-auth', 'google-api-python-client'],
32 'gssapi' : ['python-gssapi'],
33 'azure' : ['azure<=4', 'msrestazure'],
34 'workqueue': ['work_queue'],
35 'flux': ['pyyaml', 'cffi', 'jsonschema'],
36 'proxystore': ['proxystore'],
37 'radical-pilot': ['radical.pilot==1.52.1'],
38 # Disabling psi-j since github direct links are not allowed by pypi
39 # 'psij': ['psi-j-parsl@git+https://github.com/ExaWorks/psi-j-parsl']
40 }
41 extras_require['all'] = sum(extras_require.values(), [])
42
43 setup(
44 name='parsl',
45 version=VERSION,
46 description='Simple data dependent workflows in Python',
47 long_description='Simple parallel workflows system for Python',
48 url='https://github.com/Parsl/parsl',
49 author='The Parsl Team',
50 author_email='[email protected]',
51 license='Apache 2.0',
52 download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),
53 include_package_data=True,
54 package_data={'parsl': ['py.typed']},
55 packages=find_packages(),
56 python_requires=">=3.8.0",
57 install_requires=install_requires,
58 scripts = ['parsl/executors/high_throughput/process_worker_pool.py',
59 'parsl/executors/workqueue/exec_parsl_function.py',
60 'parsl/executors/workqueue/parsl_coprocess.py',
61 ],
62
63 extras_require=extras_require,
64 classifiers=[
65 # Maturity
66 'Development Status :: 5 - Production/Stable',
67 # Intended audience
68 'Intended Audience :: Developers',
69 # Licence, must match with licence above
70 'License :: OSI Approved :: Apache Software License',
71 # Python versions supported
72 'Programming Language :: Python :: 3.8',
73 'Programming Language :: Python :: 3.9',
74 'Programming Language :: Python :: 3.10',
75 'Programming Language :: Python :: 3.11',
76 'Programming Language :: Python :: 3.12',
77 ],
78 keywords=['Workflows', 'Scientific computing'],
79 entry_points={'console_scripts':
80 [
81 'parsl-globus-auth=parsl.data_provider.globus:cli_run',
82 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',
83 'parsl-perf=parsl.benchmark.perf:cli_run',
84 ]}
85 )
86
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -34,7 +34,7 @@
'workqueue': ['work_queue'],
'flux': ['pyyaml', 'cffi', 'jsonschema'],
'proxystore': ['proxystore'],
- 'radical-pilot': ['radical.pilot==1.52.1'],
+ 'radical-pilot': ['radical.pilot==1.52.1', 'radical.utils==1.52'],
# Disabling psi-j since github direct links are not allowed by pypi
# 'psij': ['psi-j-parsl@git+https://github.com/ExaWorks/psi-j-parsl']
}
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -34,7 +34,7 @@\n 'workqueue': ['work_queue'],\n 'flux': ['pyyaml', 'cffi', 'jsonschema'],\n 'proxystore': ['proxystore'],\n- 'radical-pilot': ['radical.pilot==1.52.1'],\n+ 'radical-pilot': ['radical.pilot==1.52.1', 'radical.utils==1.52'],\n # Disabling psi-j since github direct links are not allowed by pypi\n # 'psij': ['psi-j-parsl@git+https://github.com/ExaWorks/psi-j-parsl']\n }\n", "issue": "Radical Pilot test failure in CI\n**Describe the bug**\r\nSince around Friday (according to @WardLT ), Parsl CI has been failing with this radical failure:\r\n\r\n```\r\nparsl/tests/conftest.py:180: \r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\nparsl/tests/configs/local_radical.py:4: in <module>\r\n from parsl.executors.radical import RadicalPilotExecutor\r\nparsl/executors/radical/__init__.py:1: in <module>\r\n from parsl.executors.radical.executor import RadicalPilotExecutor\r\nparsl/executors/radical/executor.py:20: in <module>\r\n from .rpex_resources import ResourceConfig\r\nparsl/executors/radical/rpex_resources.py:8: in <module>\r\n import radical.pilot as rp\r\n_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ \r\n\r\n \r\n __copyright__ = \"Copyright 2013-2014, http://radical.rutgers.edu/\"\r\n __license__ = \"MIT\"\r\n \r\n # ------------------------------------------------------------------------------\r\n # we *first* import radical.utils, so that the monkeypatching of the logger has\r\n # a chance to kick in before the logging module is pulled by any other 3rd party\r\n # module, and also to monkeypatch `os.fork()` for the `atfork` functionality\r\n import radical.utils as _ru\r\n \r\n # ------------------------------------------------------------------------------\r\n # constants and types\r\n from .states import *\r\n from .constants import *\r\n \r\n \r\n # ------------------------------------------------------------------------------\r\n # import API\r\n from .session import Session\r\n from .proxy import Proxy\r\n \r\n from .task_manager import TaskManager\r\n from .task import Task\r\n from .raptor_tasks import RaptorMaster, RaptorWorker\r\n from .pytask import PythonTask\r\n from .task_description import TaskDescription\r\n from .task_description import TASK_EXECUTABLE\r\n from .task_description import TASK_METH, TASK_METHOD\r\n from .task_description import TASK_FUNC, TASK_FUNCTION\r\n from .task_description import TASK_EXEC, TASK_EVAL\r\n from .task_description import TASK_PROC, TASK_SHELL\r\n from .task_description import RAPTOR_MASTER, RAPTOR_WORKER\r\n from .task_description import AGENT_SERVICE\r\n from .resource_config import ResourceConfig\r\n \r\n from .pilot_manager import PilotManager\r\n from .pilot import Pilot\r\n from .pilot_description import PilotDescription\r\n \r\n pythontask = PythonTask.pythontask\r\n \r\n \r\n # ------------------------------------------------------------------------------\r\n # make submodules available -- mostly for internal use\r\n from . import utils\r\n from . import tmgr\r\n from . import pmgr\r\n from . import agent\r\n \r\n from .agent import Agent_0\r\n from .agent import Agent_n\r\n \r\n from .raptor import Master, Worker\r\n \r\n \r\n # ------------------------------------------------------------------------------\r\n #\r\n # get version info\r\n #\r\n import os as _os\r\n \r\n> version_short, version_detail, version_base, version_branch, \\\r\n sdist_name, sdist_path = _ru.get_version(_os.path.dirname(__file__))\r\nE ValueError: not enough values to unpack (expected 6, got 5)\r\n\r\n.venv/lib/python3.11/site-packages/radical/pilot/__init__.py:62: ValueError\r\n```\r\n\r\ncc @AymenFJA @andre-merzky\n", "before_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'sqlalchemy>=1.4,<2'\n ],\n 'visualization' : [\n 'pydot',\n 'networkx>=2.5,<2.6',\n 'Flask>=1.0.2',\n 'flask_sqlalchemy',\n 'pandas<2.2',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'oauth_ssh' : ['oauth-ssh>=0.9'],\n 'docs' : [\n 'ipython<=8.6.0',\n 'nbsphinx',\n 'sphinx>=7.1,<7.2', # 7.2 requires python 3.9+\n 'sphinx_rtd_theme'\n ],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n 'azure' : ['azure<=4', 'msrestazure'],\n 'workqueue': ['work_queue'],\n 'flux': ['pyyaml', 'cffi', 'jsonschema'],\n 'proxystore': ['proxystore'],\n 'radical-pilot': ['radical.pilot==1.52.1'],\n # Disabling psi-j since github direct links are not allowed by pypi\n # 'psij': ['psi-j-parsl@git+https://github.com/ExaWorks/psi-j-parsl']\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n package_data={'parsl': ['py.typed']},\n packages=find_packages(),\n python_requires=\">=3.8.0\",\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/workqueue/exec_parsl_function.py',\n 'parsl/executors/workqueue/parsl_coprocess.py',\n ],\n\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 5 - Production/Stable',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Programming Language :: Python :: 3.12',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',\n 'parsl-perf=parsl.benchmark.perf:cli_run',\n ]}\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\n\nwith open('parsl/version.py') as f:\n exec(f.read())\n\nwith open('requirements.txt') as f:\n install_requires = f.readlines()\n\nextras_require = {\n 'monitoring' : [\n 'sqlalchemy>=1.4,<2'\n ],\n 'visualization' : [\n 'pydot',\n 'networkx>=2.5,<2.6',\n 'Flask>=1.0.2',\n 'flask_sqlalchemy',\n 'pandas<2.2',\n 'plotly',\n 'python-daemon'\n ],\n 'aws' : ['boto3'],\n 'kubernetes' : ['kubernetes'],\n 'oauth_ssh' : ['oauth-ssh>=0.9'],\n 'docs' : [\n 'ipython<=8.6.0',\n 'nbsphinx',\n 'sphinx>=7.1,<7.2', # 7.2 requires python 3.9+\n 'sphinx_rtd_theme'\n ],\n 'google_cloud' : ['google-auth', 'google-api-python-client'],\n 'gssapi' : ['python-gssapi'],\n 'azure' : ['azure<=4', 'msrestazure'],\n 'workqueue': ['work_queue'],\n 'flux': ['pyyaml', 'cffi', 'jsonschema'],\n 'proxystore': ['proxystore'],\n 'radical-pilot': ['radical.pilot==1.52.1', 'radical.utils==1.52'],\n # Disabling psi-j since github direct links are not allowed by pypi\n # 'psij': ['psi-j-parsl@git+https://github.com/ExaWorks/psi-j-parsl']\n}\nextras_require['all'] = sum(extras_require.values(), [])\n\nsetup(\n name='parsl',\n version=VERSION,\n description='Simple data dependent workflows in Python',\n long_description='Simple parallel workflows system for Python',\n url='https://github.com/Parsl/parsl',\n author='The Parsl Team',\n author_email='[email protected]',\n license='Apache 2.0',\n download_url='https://github.com/Parsl/parsl/archive/{}.tar.gz'.format(VERSION),\n include_package_data=True,\n package_data={'parsl': ['py.typed']},\n packages=find_packages(),\n python_requires=\">=3.8.0\",\n install_requires=install_requires,\n scripts = ['parsl/executors/high_throughput/process_worker_pool.py',\n 'parsl/executors/workqueue/exec_parsl_function.py',\n 'parsl/executors/workqueue/parsl_coprocess.py',\n ],\n\n extras_require=extras_require,\n classifiers=[\n # Maturity\n 'Development Status :: 5 - Production/Stable',\n # Intended audience\n 'Intended Audience :: Developers',\n # Licence, must match with licence above\n 'License :: OSI Approved :: Apache Software License',\n # Python versions supported\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n 'Programming Language :: Python :: 3.11',\n 'Programming Language :: Python :: 3.12',\n ],\n keywords=['Workflows', 'Scientific computing'],\n entry_points={'console_scripts':\n [\n 'parsl-globus-auth=parsl.data_provider.globus:cli_run',\n 'parsl-visualize=parsl.monitoring.visualization.app:cli_run',\n 'parsl-perf=parsl.benchmark.perf:cli_run',\n ]}\n)\n", "path": "setup.py"}]} | 2,013 | 166 |
gh_patches_debug_13896 | rasdani/github-patches | git_diff | ARM-DOE__ACT-553 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Accessor not available in dataset
I fetched the latest updates after the lazy_loading PR and ran pytest and am seeing a lot of errors with accessors not loading. Clean, QCFilter, and QCTests are no longer available in the datasets for some reason.
FAILED test_io.py::test_io_mfdataset - AttributeError: 'Dataset' object has no attribute 'clean'
FAILED test_io.py::test_io_write - AttributeError: 'Dataset' object has no attribute 'clean'
FAILED test_io.py::test_clean_cf_qc - AttributeError: 'Dataset' object has no attribute 'clean'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `act/qc/__init__.py`
Content:
```
1 """
2 This module contains procedures for working with QC information
3 and for applying tests to data.
4
5 """
6
7 import lazy_loader as lazy
8
9 # We need to import clean first to register the accessor
10 from .clean import *
11
12 __getattr__, __dir__, __all__ = lazy.attach(
13 __name__,
14 submodules=[
15 'add_supplemental_qc',
16 'arm',
17 'bsrn_tests',
18 'comparison_tests',
19 'qcfilter',
20 'qctests',
21 'radiometer_tests',
22 'sp2',
23 ],
24 submod_attrs={
25 'arm': ['add_dqr_to_qc'],
26 'qcfilter': ['QCFilter'],
27 'qctests': ['QCTests'],
28 'radiometer_tests': ['fft_shading_test'],
29 'bsrn_tests': ['QCTests'],
30 'comparison_tests': ['QCTests'],
31 'add_supplemental_qc': ['read_yaml_supplemental_qc'],
32 'sp2': ['SP2ParticleCriteria', 'get_waveform_statistics'],
33 },
34 )
35
```
Path: `act/__init__.py`
Content:
```
1 """
2 ACT: The Atmospheric Community Toolkit
3 ======================================
4
5 """
6
7 import lazy_loader as lazy
8 # No more pandas warnings
9 from pandas.plotting import register_matplotlib_converters
10
11 from . import tests
12 from ._version import get_versions
13
14 register_matplotlib_converters()
15
16 # Import the lazy loaded modules
17 submodules = [
18 'corrections',
19 'discovery',
20 'io',
21 'qc',
22 'utils',
23 'retrievals',
24 'plotting',
25 ]
26 __getattr__, __dir__, _ = lazy.attach(__name__, submodules)
27
28 # Version for source builds
29 vdict = get_versions()
30 __version__ = vdict['version']
31
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/act/__init__.py b/act/__init__.py
--- a/act/__init__.py
+++ b/act/__init__.py
@@ -5,11 +5,13 @@
"""
import lazy_loader as lazy
+
# No more pandas warnings
from pandas.plotting import register_matplotlib_converters
from . import tests
from ._version import get_versions
+from .qc import QCFilter, QCTests, clean
register_matplotlib_converters()
diff --git a/act/qc/__init__.py b/act/qc/__init__.py
--- a/act/qc/__init__.py
+++ b/act/qc/__init__.py
@@ -8,6 +8,8 @@
# We need to import clean first to register the accessor
from .clean import *
+from .qcfilter import QCFilter
+from .qctests import QCTests
__getattr__, __dir__, __all__ = lazy.attach(
__name__,
| {"golden_diff": "diff --git a/act/__init__.py b/act/__init__.py\n--- a/act/__init__.py\n+++ b/act/__init__.py\n@@ -5,11 +5,13 @@\n \"\"\"\n \n import lazy_loader as lazy\n+\n # No more pandas warnings\n from pandas.plotting import register_matplotlib_converters\n \n from . import tests\n from ._version import get_versions\n+from .qc import QCFilter, QCTests, clean\n \n register_matplotlib_converters()\n \ndiff --git a/act/qc/__init__.py b/act/qc/__init__.py\n--- a/act/qc/__init__.py\n+++ b/act/qc/__init__.py\n@@ -8,6 +8,8 @@\n \n # We need to import clean first to register the accessor\n from .clean import *\n+from .qcfilter import QCFilter\n+from .qctests import QCTests\n \n __getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n", "issue": "Accessor not available in dataset\nI fetched the latest updates after the lazy_loading PR and ran pytest and am seeing a lot of errors with accessors not loading. Clean, QCFilter, and QCTests are no longer available in the datasets for some reason.\r\n\r\nFAILED test_io.py::test_io_mfdataset - AttributeError: 'Dataset' object has no attribute 'clean'\r\nFAILED test_io.py::test_io_write - AttributeError: 'Dataset' object has no attribute 'clean'\r\nFAILED test_io.py::test_clean_cf_qc - AttributeError: 'Dataset' object has no attribute 'clean'\n", "before_files": [{"content": "\"\"\"\nThis module contains procedures for working with QC information\nand for applying tests to data.\n\n\"\"\"\n\nimport lazy_loader as lazy\n\n# We need to import clean first to register the accessor\nfrom .clean import *\n\n__getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n submodules=[\n 'add_supplemental_qc',\n 'arm',\n 'bsrn_tests',\n 'comparison_tests',\n 'qcfilter',\n 'qctests',\n 'radiometer_tests',\n 'sp2',\n ],\n submod_attrs={\n 'arm': ['add_dqr_to_qc'],\n 'qcfilter': ['QCFilter'],\n 'qctests': ['QCTests'],\n 'radiometer_tests': ['fft_shading_test'],\n 'bsrn_tests': ['QCTests'],\n 'comparison_tests': ['QCTests'],\n 'add_supplemental_qc': ['read_yaml_supplemental_qc'],\n 'sp2': ['SP2ParticleCriteria', 'get_waveform_statistics'],\n },\n)\n", "path": "act/qc/__init__.py"}, {"content": "\"\"\"\nACT: The Atmospheric Community Toolkit\n======================================\n\n\"\"\"\n\nimport lazy_loader as lazy\n# No more pandas warnings\nfrom pandas.plotting import register_matplotlib_converters\n\nfrom . import tests\nfrom ._version import get_versions\n\nregister_matplotlib_converters()\n\n# Import the lazy loaded modules\nsubmodules = [\n 'corrections',\n 'discovery',\n 'io',\n 'qc',\n 'utils',\n 'retrievals',\n 'plotting',\n]\n__getattr__, __dir__, _ = lazy.attach(__name__, submodules)\n\n# Version for source builds\nvdict = get_versions()\n__version__ = vdict['version']\n", "path": "act/__init__.py"}], "after_files": [{"content": "\"\"\"\nThis module contains procedures for working with QC information\nand for applying tests to data.\n\n\"\"\"\n\nimport lazy_loader as lazy\n\n# We need to import clean first to register the accessor\nfrom .clean import *\nfrom .qcfilter import QCFilter\nfrom .qctests import QCTests\n\n__getattr__, __dir__, __all__ = lazy.attach(\n __name__,\n submodules=[\n 'add_supplemental_qc',\n 'arm',\n 'bsrn_tests',\n 'comparison_tests',\n 'qcfilter',\n 'qctests',\n 'radiometer_tests',\n 'sp2',\n ],\n submod_attrs={\n 'arm': ['add_dqr_to_qc'],\n 'qcfilter': ['QCFilter'],\n 'qctests': ['QCTests'],\n 'radiometer_tests': ['fft_shading_test'],\n 'bsrn_tests': ['QCTests'],\n 'comparison_tests': ['QCTests'],\n 'add_supplemental_qc': ['read_yaml_supplemental_qc'],\n 'sp2': ['SP2ParticleCriteria', 'get_waveform_statistics'],\n },\n)\n", "path": "act/qc/__init__.py"}, {"content": "\"\"\"\nACT: The Atmospheric Community Toolkit\n======================================\n\n\"\"\"\n\nimport lazy_loader as lazy\n\n# No more pandas warnings\nfrom pandas.plotting import register_matplotlib_converters\n\nfrom . import tests\nfrom ._version import get_versions\nfrom .qc import QCFilter, QCTests, clean\n\nregister_matplotlib_converters()\n\n# Import the lazy loaded modules\nsubmodules = [\n 'corrections',\n 'discovery',\n 'io',\n 'qc',\n 'utils',\n 'retrievals',\n 'plotting',\n]\n__getattr__, __dir__, _ = lazy.attach(__name__, submodules)\n\n# Version for source builds\nvdict = get_versions()\n__version__ = vdict['version']\n", "path": "act/__init__.py"}]} | 874 | 216 |
gh_patches_debug_7560 | rasdani/github-patches | git_diff | Qiskit__qiskit-1875 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Registers cannot be index with negative integers or slices
<!-- ⚠️ If you do not respect this template, your issue will be closed -->
<!-- ⚠️ Make sure to browse the opened and closed issues to confirm this idea does not exist. -->
### What is the expected enhancement?
It would be nice if the behavior mimicked python lists more. e.g.
```python
q = QuantumRegister(5)
q[-1]
q[-3:-1]
etc.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qiskit/circuit/register.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Copyright 2017, IBM.
4 #
5 # This source code is licensed under the Apache License, Version 2.0 found in
6 # the LICENSE.txt file in the root directory of this source tree.
7
8 """
9 Base register reference object.
10 """
11 import re
12 import logging
13 import itertools
14
15 from qiskit.exceptions import QiskitError, QiskitIndexError
16
17 logger = logging.getLogger(__name__)
18
19
20 class Register:
21 """Implement a generic register."""
22
23 # Counter for the number of instances in this class.
24 instances_counter = itertools.count()
25 # Prefix to use for auto naming.
26 prefix = 'reg'
27
28 def __init__(self, size, name=None):
29 """Create a new generic register.
30 """
31
32 if name is None:
33 name = '%s%i' % (self.prefix, next(self.instances_counter))
34
35 if not isinstance(name, str):
36 raise QiskitError("The circuit name should be a string "
37 "(or None for autogenerate a name).")
38
39 test = re.compile('[a-z][a-zA-Z0-9_]*')
40 if test.match(name) is None:
41 raise QiskitError("%s is an invalid OPENQASM register name." % name)
42
43 self.name = name
44 self.size = size
45 if size <= 0:
46 raise QiskitError("register size must be positive")
47
48 def __repr__(self):
49 """Return the official string representing the register."""
50 return "%s(%d, '%s')" % (self.__class__.__qualname__,
51 self.size, self.name)
52
53 def __len__(self):
54 """Return register size"""
55 return self.size
56
57 def check_range(self, j):
58 """Check that j is a valid index into self."""
59 if isinstance(j, int):
60 if j < 0 or j >= self.size:
61 raise QiskitIndexError("register index out of range")
62 elif isinstance(j, slice):
63 if j.start < 0 or j.stop >= self.size or (j.step is not None and
64 j.step <= 0):
65 raise QiskitIndexError("register index slice out of range")
66
67 def __getitem__(self, key):
68 """
69 Arg:
70 key (int|slice|list): index of the bit/qubit to be retrieved.
71
72 Returns:
73 tuple[Register, int]: a tuple in the form `(self, key)` if key is int.
74 If key is a slice, return a `list((self,key))`.
75
76 Raises:
77 QiskitError: if the `key` is not an integer.
78 QiskitIndexError: if the `key` is not in the range
79 `(0, self.size)`.
80 """
81 if not isinstance(key, (int, slice, list)):
82 raise QiskitError("expected integer or slice index into register")
83 self.check_range(key)
84 if isinstance(key, slice):
85 return [(self, ind) for ind in range(*key.indices(len(self)))]
86 elif isinstance(key, list): # list of qubit indices
87 if max(key) < len(self):
88 return [(self, ind) for ind in key]
89 else:
90 raise QiskitError('register index out of range')
91 else:
92 return self, key
93
94 def __iter__(self):
95 """
96 Returns:
97 iterator: an iterator over the bits/qubits of the register, in the
98 form `tuple (Register, int)`.
99 """
100 return zip([self]*self.size, range(self.size))
101
102 def __eq__(self, other):
103 """Two Registers are the same if they are of the same type
104 (i.e. quantum/classical), and have the same name and size.
105
106 Args:
107 other (Register): other Register
108
109 Returns:
110 bool: are self and other equal.
111 """
112 res = False
113 if type(self) is type(other) and \
114 self.name == other.name and \
115 self.size == other.size:
116 res = True
117 return res
118
119 def __hash__(self):
120 """Make object hashable, based on the name and size to hash."""
121 return hash((type(self), self.name, self.size))
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qiskit/circuit/register.py b/qiskit/circuit/register.py
--- a/qiskit/circuit/register.py
+++ b/qiskit/circuit/register.py
@@ -80,6 +80,8 @@
"""
if not isinstance(key, (int, slice, list)):
raise QiskitError("expected integer or slice index into register")
+ if isinstance(key, int) and key < 0:
+ key = self.size + key
self.check_range(key)
if isinstance(key, slice):
return [(self, ind) for ind in range(*key.indices(len(self)))]
| {"golden_diff": "diff --git a/qiskit/circuit/register.py b/qiskit/circuit/register.py\n--- a/qiskit/circuit/register.py\n+++ b/qiskit/circuit/register.py\n@@ -80,6 +80,8 @@\n \"\"\"\n if not isinstance(key, (int, slice, list)):\n raise QiskitError(\"expected integer or slice index into register\")\n+ if isinstance(key, int) and key < 0:\n+ key = self.size + key\n self.check_range(key)\n if isinstance(key, slice):\n return [(self, ind) for ind in range(*key.indices(len(self)))]\n", "issue": "Registers cannot be index with negative integers or slices\n<!-- \u26a0\ufe0f If you do not respect this template, your issue will be closed -->\r\n<!-- \u26a0\ufe0f Make sure to browse the opened and closed issues to confirm this idea does not exist. -->\r\n\r\n### What is the expected enhancement?\r\n\r\nIt would be nice if the behavior mimicked python lists more. e.g.\r\n\r\n```python\r\nq = QuantumRegister(5)\r\nq[-1]\r\nq[-3:-1]\r\netc.\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\nBase register reference object.\n\"\"\"\nimport re\nimport logging\nimport itertools\n\nfrom qiskit.exceptions import QiskitError, QiskitIndexError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Register:\n \"\"\"Implement a generic register.\"\"\"\n\n # Counter for the number of instances in this class.\n instances_counter = itertools.count()\n # Prefix to use for auto naming.\n prefix = 'reg'\n\n def __init__(self, size, name=None):\n \"\"\"Create a new generic register.\n \"\"\"\n\n if name is None:\n name = '%s%i' % (self.prefix, next(self.instances_counter))\n\n if not isinstance(name, str):\n raise QiskitError(\"The circuit name should be a string \"\n \"(or None for autogenerate a name).\")\n\n test = re.compile('[a-z][a-zA-Z0-9_]*')\n if test.match(name) is None:\n raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n\n self.name = name\n self.size = size\n if size <= 0:\n raise QiskitError(\"register size must be positive\")\n\n def __repr__(self):\n \"\"\"Return the official string representing the register.\"\"\"\n return \"%s(%d, '%s')\" % (self.__class__.__qualname__,\n self.size, self.name)\n\n def __len__(self):\n \"\"\"Return register size\"\"\"\n return self.size\n\n def check_range(self, j):\n \"\"\"Check that j is a valid index into self.\"\"\"\n if isinstance(j, int):\n if j < 0 or j >= self.size:\n raise QiskitIndexError(\"register index out of range\")\n elif isinstance(j, slice):\n if j.start < 0 or j.stop >= self.size or (j.step is not None and\n j.step <= 0):\n raise QiskitIndexError(\"register index slice out of range\")\n\n def __getitem__(self, key):\n \"\"\"\n Arg:\n key (int|slice|list): index of the bit/qubit to be retrieved.\n\n Returns:\n tuple[Register, int]: a tuple in the form `(self, key)` if key is int.\n If key is a slice, return a `list((self,key))`.\n\n Raises:\n QiskitError: if the `key` is not an integer.\n QiskitIndexError: if the `key` is not in the range\n `(0, self.size)`.\n \"\"\"\n if not isinstance(key, (int, slice, list)):\n raise QiskitError(\"expected integer or slice index into register\")\n self.check_range(key)\n if isinstance(key, slice):\n return [(self, ind) for ind in range(*key.indices(len(self)))]\n elif isinstance(key, list): # list of qubit indices\n if max(key) < len(self):\n return [(self, ind) for ind in key]\n else:\n raise QiskitError('register index out of range')\n else:\n return self, key\n\n def __iter__(self):\n \"\"\"\n Returns:\n iterator: an iterator over the bits/qubits of the register, in the\n form `tuple (Register, int)`.\n \"\"\"\n return zip([self]*self.size, range(self.size))\n\n def __eq__(self, other):\n \"\"\"Two Registers are the same if they are of the same type\n (i.e. quantum/classical), and have the same name and size.\n\n Args:\n other (Register): other Register\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n res = False\n if type(self) is type(other) and \\\n self.name == other.name and \\\n self.size == other.size:\n res = True\n return res\n\n def __hash__(self):\n \"\"\"Make object hashable, based on the name and size to hash.\"\"\"\n return hash((type(self), self.name, self.size))\n", "path": "qiskit/circuit/register.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Copyright 2017, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\n\"\"\"\nBase register reference object.\n\"\"\"\nimport re\nimport logging\nimport itertools\n\nfrom qiskit.exceptions import QiskitError, QiskitIndexError\n\nlogger = logging.getLogger(__name__)\n\n\nclass Register:\n \"\"\"Implement a generic register.\"\"\"\n\n # Counter for the number of instances in this class.\n instances_counter = itertools.count()\n # Prefix to use for auto naming.\n prefix = 'reg'\n\n def __init__(self, size, name=None):\n \"\"\"Create a new generic register.\n \"\"\"\n\n if name is None:\n name = '%s%i' % (self.prefix, next(self.instances_counter))\n\n if not isinstance(name, str):\n raise QiskitError(\"The circuit name should be a string \"\n \"(or None for autogenerate a name).\")\n\n test = re.compile('[a-z][a-zA-Z0-9_]*')\n if test.match(name) is None:\n raise QiskitError(\"%s is an invalid OPENQASM register name.\" % name)\n\n self.name = name\n self.size = size\n if size <= 0:\n raise QiskitError(\"register size must be positive\")\n\n def __repr__(self):\n \"\"\"Return the official string representing the register.\"\"\"\n return \"%s(%d, '%s')\" % (self.__class__.__qualname__,\n self.size, self.name)\n\n def __len__(self):\n \"\"\"Return register size\"\"\"\n return self.size\n\n def check_range(self, j):\n \"\"\"Check that j is a valid index into self.\"\"\"\n if isinstance(j, int):\n if j < 0 or j >= self.size:\n raise QiskitIndexError(\"register index out of range\")\n elif isinstance(j, slice):\n if j.start < 0 or j.stop >= self.size or (j.step is not None and\n j.step <= 0):\n raise QiskitIndexError(\"register index slice out of range\")\n\n def __getitem__(self, key):\n \"\"\"\n Arg:\n key (int|slice|list): index of the bit/qubit to be retrieved.\n\n Returns:\n tuple[Register, int]: a tuple in the form `(self, key)` if key is int.\n If key is a slice, return a `list((self,key))`.\n\n Raises:\n QiskitError: if the `key` is not an integer.\n QiskitIndexError: if the `key` is not in the range\n `(0, self.size)`.\n \"\"\"\n if not isinstance(key, (int, slice, list)):\n raise QiskitError(\"expected integer or slice index into register\")\n if isinstance(key, int) and key < 0:\n key = self.size + key\n self.check_range(key)\n if isinstance(key, slice):\n return [(self, ind) for ind in range(*key.indices(len(self)))]\n elif isinstance(key, list): # list of qubit indices\n if max(key) < len(self):\n return [(self, ind) for ind in key]\n else:\n raise QiskitError('register index out of range')\n else:\n return self, key\n\n def __iter__(self):\n \"\"\"\n Returns:\n iterator: an iterator over the bits/qubits of the register, in the\n form `tuple (Register, int)`.\n \"\"\"\n return zip([self]*self.size, range(self.size))\n\n def __eq__(self, other):\n \"\"\"Two Registers are the same if they are of the same type\n (i.e. quantum/classical), and have the same name and size.\n\n Args:\n other (Register): other Register\n\n Returns:\n bool: are self and other equal.\n \"\"\"\n res = False\n if type(self) is type(other) and \\\n self.name == other.name and \\\n self.size == other.size:\n res = True\n return res\n\n def __hash__(self):\n \"\"\"Make object hashable, based on the name and size to hash.\"\"\"\n return hash((type(self), self.name, self.size))\n", "path": "qiskit/circuit/register.py"}]} | 1,539 | 136 |
gh_patches_debug_29560 | rasdani/github-patches | git_diff | mlflow__mlflow-9384 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Enable `PT027`
### Summary
- Enable [PT027](https://beta.ruff.rs/docs/rules/pytest-unittest-raises-assertion/).
- Remove `unittest-assert-raises`.
```diff
diff --git a/pylintrc b/pylintrc
index 9148d110e..342dfc943 100644
--- a/pylintrc
+++ b/pylintrc
@@ -79,7 +79,6 @@ enable=signature-differs,
# Built-in rules
# --------------
# Custom rules
- unittest-assert-raises,
lazy-builtin-import,
useless-assignment,
diff --git a/pyproject.toml b/pyproject.toml
index 6c64df56e..120e8420c 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -32,6 +32,7 @@ select = [
"PT022",
"PT023",
"PT026",
+ "PT027",
"RUF010",
"UP004",
"UP008",
```
### Notes
- Make sure to open a PR from a **non-master** branch.
- Sign off the commit using the `-s` flag when making a commit:
```sh
git commit -s -m "..."
# ^^ make sure to use this
```
- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pylint_plugins/__init__.py`
Content:
```
1 from pylint_plugins.unittest_assert_raises import UnittestAssertRaises
2 from pylint_plugins.import_checker import ImportChecker
3 from pylint_plugins.assign_checker import AssignChecker
4
5
6 def register(linter):
7 linter.register_checker(UnittestAssertRaises(linter))
8 linter.register_checker(ImportChecker(linter))
9 linter.register_checker(AssignChecker(linter))
10
```
Path: `pylint_plugins/errors.py`
Content:
```
1 from typing import NamedTuple, Dict, Tuple
2 from functools import reduce
3
4
5 class Message(NamedTuple):
6 id: str
7 name: str
8 message: str
9 reason: str
10
11 def to_dict(self) -> Dict[str, Tuple[str, str, str]]:
12 return {self.id: (self.message, self.name, self.reason)}
13
14
15 def to_msgs(*messages: Message) -> Dict[str, Tuple[str, str, str]]:
16 return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})
17
18
19 UNITTEST_PYTEST_RAISES = Message(
20 id="W0003",
21 name="unittest-assert-raises",
22 message="Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.",
23 reason="To enforce 'pytest-raises-multiple-statements' Message.",
24 )
25
26
27 LAZY_BUILTIN_IMPORT = Message(
28 id="W0007",
29 name="lazy-builtin-import",
30 message="Import built-in module(s) (%s) at the top of the file.",
31 reason="There is no reason they should be imported inside a function.",
32 )
33
34 USELESS_ASSIGNMENT = Message(
35 id="W0008",
36 name="useless-assignment",
37 message="Useless assignment. Use immediate return instead.",
38 reason="For simplicity and readability",
39 )
40
```
Path: `pylint_plugins/unittest_assert_raises.py`
Content:
```
1 import astroid
2 from pylint.interfaces import IAstroidChecker
3 from pylint.checkers import BaseChecker
4
5 from pylint_plugins.errors import UNITTEST_PYTEST_RAISES, to_msgs
6
7
8 def _is_unittest_assert_raises(node: astroid.Call):
9 return isinstance(node.func, astroid.Attribute) and (
10 node.func.as_string() in ("self.assertRaises", "self.assertRaisesRegex")
11 )
12
13
14 class UnittestAssertRaises(BaseChecker):
15 __implements__ = IAstroidChecker
16
17 name = "unittest-assert-raises"
18 msgs = to_msgs(UNITTEST_PYTEST_RAISES)
19 priority = -1
20
21 def visit_call(self, node: astroid.Call):
22 if _is_unittest_assert_raises(node):
23 self.add_message(UNITTEST_PYTEST_RAISES.name, node=node)
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pylint_plugins/__init__.py b/pylint_plugins/__init__.py
--- a/pylint_plugins/__init__.py
+++ b/pylint_plugins/__init__.py
@@ -1,9 +1,7 @@
-from pylint_plugins.unittest_assert_raises import UnittestAssertRaises
from pylint_plugins.import_checker import ImportChecker
from pylint_plugins.assign_checker import AssignChecker
def register(linter):
- linter.register_checker(UnittestAssertRaises(linter))
linter.register_checker(ImportChecker(linter))
linter.register_checker(AssignChecker(linter))
diff --git a/pylint_plugins/errors.py b/pylint_plugins/errors.py
--- a/pylint_plugins/errors.py
+++ b/pylint_plugins/errors.py
@@ -16,14 +16,6 @@
return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})
-UNITTEST_PYTEST_RAISES = Message(
- id="W0003",
- name="unittest-assert-raises",
- message="Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.",
- reason="To enforce 'pytest-raises-multiple-statements' Message.",
-)
-
-
LAZY_BUILTIN_IMPORT = Message(
id="W0007",
name="lazy-builtin-import",
diff --git a/pylint_plugins/unittest_assert_raises.py b/pylint_plugins/unittest_assert_raises.py
deleted file mode 100644
--- a/pylint_plugins/unittest_assert_raises.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import astroid
-from pylint.interfaces import IAstroidChecker
-from pylint.checkers import BaseChecker
-
-from pylint_plugins.errors import UNITTEST_PYTEST_RAISES, to_msgs
-
-
-def _is_unittest_assert_raises(node: astroid.Call):
- return isinstance(node.func, astroid.Attribute) and (
- node.func.as_string() in ("self.assertRaises", "self.assertRaisesRegex")
- )
-
-
-class UnittestAssertRaises(BaseChecker):
- __implements__ = IAstroidChecker
-
- name = "unittest-assert-raises"
- msgs = to_msgs(UNITTEST_PYTEST_RAISES)
- priority = -1
-
- def visit_call(self, node: astroid.Call):
- if _is_unittest_assert_raises(node):
- self.add_message(UNITTEST_PYTEST_RAISES.name, node=node)
| {"golden_diff": "diff --git a/pylint_plugins/__init__.py b/pylint_plugins/__init__.py\n--- a/pylint_plugins/__init__.py\n+++ b/pylint_plugins/__init__.py\n@@ -1,9 +1,7 @@\n-from pylint_plugins.unittest_assert_raises import UnittestAssertRaises\n from pylint_plugins.import_checker import ImportChecker\n from pylint_plugins.assign_checker import AssignChecker\n \n \n def register(linter):\n- linter.register_checker(UnittestAssertRaises(linter))\n linter.register_checker(ImportChecker(linter))\n linter.register_checker(AssignChecker(linter))\ndiff --git a/pylint_plugins/errors.py b/pylint_plugins/errors.py\n--- a/pylint_plugins/errors.py\n+++ b/pylint_plugins/errors.py\n@@ -16,14 +16,6 @@\n return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})\n \n \n-UNITTEST_PYTEST_RAISES = Message(\n- id=\"W0003\",\n- name=\"unittest-assert-raises\",\n- message=\"Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.\",\n- reason=\"To enforce 'pytest-raises-multiple-statements' Message.\",\n-)\n-\n-\n LAZY_BUILTIN_IMPORT = Message(\n id=\"W0007\",\n name=\"lazy-builtin-import\",\ndiff --git a/pylint_plugins/unittest_assert_raises.py b/pylint_plugins/unittest_assert_raises.py\ndeleted file mode 100644\n--- a/pylint_plugins/unittest_assert_raises.py\n+++ /dev/null\n@@ -1,23 +0,0 @@\n-import astroid\n-from pylint.interfaces import IAstroidChecker\n-from pylint.checkers import BaseChecker\n-\n-from pylint_plugins.errors import UNITTEST_PYTEST_RAISES, to_msgs\n-\n-\n-def _is_unittest_assert_raises(node: astroid.Call):\n- return isinstance(node.func, astroid.Attribute) and (\n- node.func.as_string() in (\"self.assertRaises\", \"self.assertRaisesRegex\")\n- )\n-\n-\n-class UnittestAssertRaises(BaseChecker):\n- __implements__ = IAstroidChecker\n-\n- name = \"unittest-assert-raises\"\n- msgs = to_msgs(UNITTEST_PYTEST_RAISES)\n- priority = -1\n-\n- def visit_call(self, node: astroid.Call):\n- if _is_unittest_assert_raises(node):\n- self.add_message(UNITTEST_PYTEST_RAISES.name, node=node)\n", "issue": "Enable `PT027`\n### Summary\n\n- Enable [PT027](https://beta.ruff.rs/docs/rules/pytest-unittest-raises-assertion/).\r\n- Remove `unittest-assert-raises`.\r\n\r\n```diff\r\ndiff --git a/pylintrc b/pylintrc\r\nindex 9148d110e..342dfc943 100644\r\n--- a/pylintrc\r\n+++ b/pylintrc\r\n@@ -79,7 +79,6 @@ enable=signature-differs,\r\n # Built-in rules\r\n # --------------\r\n # Custom rules\r\n- unittest-assert-raises,\r\n lazy-builtin-import,\r\n useless-assignment,\r\n \r\ndiff --git a/pyproject.toml b/pyproject.toml\r\nindex 6c64df56e..120e8420c 100644\r\n--- a/pyproject.toml\r\n+++ b/pyproject.toml\r\n@@ -32,6 +32,7 @@ select = [\r\n \"PT022\",\r\n \"PT023\",\r\n \"PT026\",\r\n+ \"PT027\",\r\n \"RUF010\",\r\n \"UP004\",\r\n \"UP008\",\r\n```\n\n### Notes\n\n- Make sure to open a PR from a **non-master** branch.\r\n- Sign off the commit using the `-s` flag when making a commit:\r\n\r\n ```sh\r\n git commit -s -m \"...\"\r\n # ^^ make sure to use this\r\n ```\r\n\r\n- Include `#{issue_number}` (e.g. `#123`) in the PR description when opening a PR.\r\n\n", "before_files": [{"content": "from pylint_plugins.unittest_assert_raises import UnittestAssertRaises\nfrom pylint_plugins.import_checker import ImportChecker\nfrom pylint_plugins.assign_checker import AssignChecker\n\n\ndef register(linter):\n linter.register_checker(UnittestAssertRaises(linter))\n linter.register_checker(ImportChecker(linter))\n linter.register_checker(AssignChecker(linter))\n", "path": "pylint_plugins/__init__.py"}, {"content": "from typing import NamedTuple, Dict, Tuple\nfrom functools import reduce\n\n\nclass Message(NamedTuple):\n id: str\n name: str\n message: str\n reason: str\n\n def to_dict(self) -> Dict[str, Tuple[str, str, str]]:\n return {self.id: (self.message, self.name, self.reason)}\n\n\ndef to_msgs(*messages: Message) -> Dict[str, Tuple[str, str, str]]:\n return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})\n\n\nUNITTEST_PYTEST_RAISES = Message(\n id=\"W0003\",\n name=\"unittest-assert-raises\",\n message=\"Use `pytest.raises` instead of `unittest.TestCase.assertRaises`.\",\n reason=\"To enforce 'pytest-raises-multiple-statements' Message.\",\n)\n\n\nLAZY_BUILTIN_IMPORT = Message(\n id=\"W0007\",\n name=\"lazy-builtin-import\",\n message=\"Import built-in module(s) (%s) at the top of the file.\",\n reason=\"There is no reason they should be imported inside a function.\",\n)\n\nUSELESS_ASSIGNMENT = Message(\n id=\"W0008\",\n name=\"useless-assignment\",\n message=\"Useless assignment. Use immediate return instead.\",\n reason=\"For simplicity and readability\",\n)\n", "path": "pylint_plugins/errors.py"}, {"content": "import astroid\nfrom pylint.interfaces import IAstroidChecker\nfrom pylint.checkers import BaseChecker\n\nfrom pylint_plugins.errors import UNITTEST_PYTEST_RAISES, to_msgs\n\n\ndef _is_unittest_assert_raises(node: astroid.Call):\n return isinstance(node.func, astroid.Attribute) and (\n node.func.as_string() in (\"self.assertRaises\", \"self.assertRaisesRegex\")\n )\n\n\nclass UnittestAssertRaises(BaseChecker):\n __implements__ = IAstroidChecker\n\n name = \"unittest-assert-raises\"\n msgs = to_msgs(UNITTEST_PYTEST_RAISES)\n priority = -1\n\n def visit_call(self, node: astroid.Call):\n if _is_unittest_assert_raises(node):\n self.add_message(UNITTEST_PYTEST_RAISES.name, node=node)\n", "path": "pylint_plugins/unittest_assert_raises.py"}], "after_files": [{"content": "from pylint_plugins.import_checker import ImportChecker\nfrom pylint_plugins.assign_checker import AssignChecker\n\n\ndef register(linter):\n linter.register_checker(ImportChecker(linter))\n linter.register_checker(AssignChecker(linter))\n", "path": "pylint_plugins/__init__.py"}, {"content": "from typing import NamedTuple, Dict, Tuple\nfrom functools import reduce\n\n\nclass Message(NamedTuple):\n id: str\n name: str\n message: str\n reason: str\n\n def to_dict(self) -> Dict[str, Tuple[str, str, str]]:\n return {self.id: (self.message, self.name, self.reason)}\n\n\ndef to_msgs(*messages: Message) -> Dict[str, Tuple[str, str, str]]:\n return reduce(lambda x, y: {**x, **y.to_dict()}, messages, {})\n\n\nLAZY_BUILTIN_IMPORT = Message(\n id=\"W0007\",\n name=\"lazy-builtin-import\",\n message=\"Import built-in module(s) (%s) at the top of the file.\",\n reason=\"There is no reason they should be imported inside a function.\",\n)\n\nUSELESS_ASSIGNMENT = Message(\n id=\"W0008\",\n name=\"useless-assignment\",\n message=\"Useless assignment. Use immediate return instead.\",\n reason=\"For simplicity and readability\",\n)\n", "path": "pylint_plugins/errors.py"}, {"content": null, "path": "pylint_plugins/unittest_assert_raises.py"}]} | 1,324 | 527 |
gh_patches_debug_42048 | rasdani/github-patches | git_diff | joke2k__faker-1243 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update python_requires in setup.py
https://github.com/joke2k/faker/blob/146f205b942d15c95160df35d3e431624697d079/setup.py#L65
Finnish IBAN should be 18 characters of length
* Faker version: 4.1.1
Finnish IBAN should be 18 characters of length. Currently returned Finnish IBAN has 20 characters.
### Steps to reproduce
```
from faker import Faker
>>> fake = Faker('fi_FI')
>>> fin_iban = fake.iban()
>>> fin_iban
'FI807370583252728936'
>>> len(fin_iban)
20
```
### Expected behavior
```
>>> len(fin_iban)
18
```
### Actual behavior
```
>>> len(fin_iban)
20
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/generator.py`
Content:
```
1 import random as random_module
2 import re
3
4 _re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
5 random = random_module.Random()
6 mod_random = random # compat with name released in 0.8
7
8
9 class Generator:
10
11 __config = {}
12
13 def __init__(self, **config):
14 self.providers = []
15 self.__config = dict(
16 list(self.__config.items()) + list(config.items()))
17 self.__random = random
18
19 def add_provider(self, provider):
20
21 if isinstance(provider, type):
22 provider = provider(self)
23
24 self.providers.insert(0, provider)
25
26 for method_name in dir(provider):
27 # skip 'private' method
28 if method_name.startswith('_'):
29 continue
30
31 faker_function = getattr(provider, method_name)
32
33 if callable(faker_function):
34 # add all faker method to generator
35 self.set_formatter(method_name, faker_function)
36
37 def provider(self, name):
38 try:
39 lst = [p for p in self.get_providers()
40 if p.__provider__ == name.lower()]
41 return lst[0]
42 except IndexError:
43 return None
44
45 def get_providers(self):
46 """Returns added providers."""
47 return self.providers
48
49 @property
50 def random(self):
51 return self.__random
52
53 @random.setter
54 def random(self, value):
55 self.__random = value
56
57 def seed_instance(self, seed=None):
58 """Calls random.seed"""
59 if self.__random == random:
60 # create per-instance random obj when first time seed_instance() is
61 # called
62 self.__random = random_module.Random()
63 self.__random.seed(seed)
64 return self
65
66 @classmethod
67 def seed(cls, seed=None):
68 random.seed(seed)
69
70 def format(self, formatter, *args, **kwargs):
71 """
72 This is a secure way to make a fake from another Provider.
73 """
74 # TODO: data export?
75 return self.get_formatter(formatter)(*args, **kwargs)
76
77 def get_formatter(self, formatter):
78 try:
79 return getattr(self, formatter)
80 except AttributeError:
81 if 'locale' in self.__config:
82 msg = 'Unknown formatter "{}" with locale "{}"'.format(
83 formatter, self.__config['locale'],
84 )
85 else:
86 raise AttributeError('Unknown formatter "{}"'.format(
87 formatter,
88 ))
89 raise AttributeError(msg)
90
91 def set_formatter(self, name, method):
92 """
93 This method adds a provider method to generator.
94 Override this method to add some decoration or logging stuff.
95 """
96 setattr(self, name, method)
97
98 def parse(self, text):
99 """
100 Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')
101 with the result from the token method call.
102 """
103 return _re_token.sub(self.__format_token, text)
104
105 def __format_token(self, matches):
106 formatter = list(matches.groups())
107 formatter[1] = str(self.format(formatter[1]))
108 return ''.join(formatter)
109
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/faker/generator.py b/faker/generator.py
--- a/faker/generator.py
+++ b/faker/generator.py
@@ -1,14 +1,16 @@
import random as random_module
import re
-_re_token = re.compile(r'\{\{(\s?)(\w+)(\s?)\}\}')
+_re_token = re.compile(r'\{\{\s*(\w+)(:\s*\w+?)?\s*\}\}')
random = random_module.Random()
mod_random = random # compat with name released in 0.8
class Generator:
- __config = {}
+ __config = {
+ 'arguments': {},
+ }
def __init__(self, **config):
self.providers = []
@@ -71,7 +73,6 @@
"""
This is a secure way to make a fake from another Provider.
"""
- # TODO: data export?
return self.get_formatter(formatter)(*args, **kwargs)
def get_formatter(self, formatter):
@@ -95,14 +96,84 @@
"""
setattr(self, name, method)
+ def set_arguments(self, group, argument, value=None):
+ """
+ Creates an argument group, with an individual argument or a dictionary
+ of arguments. Used with the Generator.parse method.
+
+ generator.set_arguments('small', 'max_value', 10)
+ generator.set_arguments('small', {'min_value': 5, 'max_value': 10})
+ """
+ if group not in self.__config['arguments']:
+ self.__config['arguments'][group] = {}
+
+ if isinstance(argument, dict):
+ self.__config['arguments'][group] = argument
+ elif not isinstance(argument, str):
+ raise ValueError("Arguments must be either a string or dictionary")
+ else:
+ self.__config['arguments'][group][argument] = value
+
+ def get_arguments(self, group, argument=None):
+ """
+ Get the value of an argument configured within a argument group, or
+ the entire group as a dictionary.
+
+ generator.get_arguments('small', 'max_value')
+ generator.get_arguments('small')
+ """
+ if group in self.__config['arguments'] and argument:
+ result = self.__config['arguments'][group].get(argument)
+ else:
+ result = self.__config['arguments'].get(group)
+
+ return result
+
+ def del_arguments(self, group, argument=None):
+ """
+ Delete an argument from an argument group or the entire
+ argument group.
+
+ generator.del_arguments('small')
+ generator.del_arguments('small', 'max_value')
+ """
+ if group in self.__config['arguments']:
+ if argument:
+ result = self.__config['arguments'][group].pop(argument)
+ else:
+ result = self.__config['arguments'].pop(group)
+ else:
+ result = None
+
+ return result
+
def parse(self, text):
"""
Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')
- with the result from the token method call.
+ with the result from the token method call. Arguments can be
+ parsed by using an argument group. '{{ tokenName:group }}'
+
+ Example:
+
+ generator.set_arguments('red_rgb', {'hue': 'red', 'color_format': 'rgb'})
+ generator.set_arguments('small', 'max_value', 10)
+
+ generator.parse('{{ color:red_rgb }} - {{ pyint:small }}')
"""
return _re_token.sub(self.__format_token, text)
def __format_token(self, matches):
- formatter = list(matches.groups())
- formatter[1] = str(self.format(formatter[1]))
- return ''.join(formatter)
+ formatter, argument_group = list(matches.groups())
+ argument_group = argument_group.lstrip(":").strip() if argument_group else ''
+
+ if argument_group:
+ try:
+ arguments = self.__config['arguments'][argument_group]
+ except KeyError:
+ raise AttributeError('Unknown argument group "{}"'.format(argument_group))
+
+ formatted = str(self.format(formatter, **arguments))
+ else:
+ formatted = str(self.format(formatter))
+
+ return ''.join(formatted)
| {"golden_diff": "diff --git a/faker/generator.py b/faker/generator.py\n--- a/faker/generator.py\n+++ b/faker/generator.py\n@@ -1,14 +1,16 @@\n import random as random_module\n import re\n \n-_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\n+_re_token = re.compile(r'\\{\\{\\s*(\\w+)(:\\s*\\w+?)?\\s*\\}\\}')\n random = random_module.Random()\n mod_random = random # compat with name released in 0.8\n \n \n class Generator:\n \n- __config = {}\n+ __config = {\n+ 'arguments': {},\n+ }\n \n def __init__(self, **config):\n self.providers = []\n@@ -71,7 +73,6 @@\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n- # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n \n def get_formatter(self, formatter):\n@@ -95,14 +96,84 @@\n \"\"\"\n setattr(self, name, method)\n \n+ def set_arguments(self, group, argument, value=None):\n+ \"\"\"\n+ Creates an argument group, with an individual argument or a dictionary\n+ of arguments. Used with the Generator.parse method.\n+\n+ generator.set_arguments('small', 'max_value', 10)\n+ generator.set_arguments('small', {'min_value': 5, 'max_value': 10})\n+ \"\"\"\n+ if group not in self.__config['arguments']:\n+ self.__config['arguments'][group] = {}\n+\n+ if isinstance(argument, dict):\n+ self.__config['arguments'][group] = argument\n+ elif not isinstance(argument, str):\n+ raise ValueError(\"Arguments must be either a string or dictionary\")\n+ else:\n+ self.__config['arguments'][group][argument] = value\n+\n+ def get_arguments(self, group, argument=None):\n+ \"\"\"\n+ Get the value of an argument configured within a argument group, or\n+ the entire group as a dictionary.\n+\n+ generator.get_arguments('small', 'max_value')\n+ generator.get_arguments('small')\n+ \"\"\"\n+ if group in self.__config['arguments'] and argument:\n+ result = self.__config['arguments'][group].get(argument)\n+ else:\n+ result = self.__config['arguments'].get(group)\n+\n+ return result\n+\n+ def del_arguments(self, group, argument=None):\n+ \"\"\"\n+ Delete an argument from an argument group or the entire\n+ argument group.\n+\n+ generator.del_arguments('small')\n+ generator.del_arguments('small', 'max_value')\n+ \"\"\"\n+ if group in self.__config['arguments']:\n+ if argument:\n+ result = self.__config['arguments'][group].pop(argument)\n+ else:\n+ result = self.__config['arguments'].pop(group)\n+ else:\n+ result = None\n+\n+ return result\n+\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n- with the result from the token method call.\n+ with the result from the token method call. Arguments can be\n+ parsed by using an argument group. '{{ tokenName:group }}'\n+\n+ Example:\n+\n+ generator.set_arguments('red_rgb', {'hue': 'red', 'color_format': 'rgb'})\n+ generator.set_arguments('small', 'max_value', 10)\n+\n+ generator.parse('{{ color:red_rgb }} - {{ pyint:small }}')\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n \n def __format_token(self, matches):\n- formatter = list(matches.groups())\n- formatter[1] = str(self.format(formatter[1]))\n- return ''.join(formatter)\n+ formatter, argument_group = list(matches.groups())\n+ argument_group = argument_group.lstrip(\":\").strip() if argument_group else ''\n+\n+ if argument_group:\n+ try:\n+ arguments = self.__config['arguments'][argument_group]\n+ except KeyError:\n+ raise AttributeError('Unknown argument group \"{}\"'.format(argument_group))\n+\n+ formatted = str(self.format(formatter, **arguments))\n+ else:\n+ formatted = str(self.format(formatter))\n+\n+ return ''.join(formatted)\n", "issue": "Update python_requires in setup.py\nhttps://github.com/joke2k/faker/blob/146f205b942d15c95160df35d3e431624697d079/setup.py#L65\nFinnish IBAN should be 18 characters of length\n* Faker version: 4.1.1\r\n\r\nFinnish IBAN should be 18 characters of length. Currently returned Finnish IBAN has 20 characters.\r\n\r\n### Steps to reproduce\r\n\r\n```\r\nfrom faker import Faker\r\n>>> fake = Faker('fi_FI')\r\n>>> fin_iban = fake.iban()\r\n>>> fin_iban\r\n'FI807370583252728936'\r\n>>> len(fin_iban)\r\n20\r\n```\r\n\r\n### Expected behavior\r\n\r\n```\r\n>>> len(fin_iban)\r\n18\r\n```\r\n\r\n### Actual behavior\r\n\r\n```\r\n>>> len(fin_iban)\r\n20\r\n```\r\n\n", "before_files": [{"content": "import random as random_module\nimport re\n\n_re_token = re.compile(r'\\{\\{(\\s?)(\\w+)(\\s?)\\}\\}')\nrandom = random_module.Random()\nmod_random = random # compat with name released in 0.8\n\n\nclass Generator:\n\n __config = {}\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n self.__random = random\n\n def add_provider(self, provider):\n\n if isinstance(provider, type):\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if callable(faker_function):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self):\n return self.__random\n\n @random.setter\n def random(self, value):\n self.__random = value\n\n def seed_instance(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n if self.__random == random:\n # create per-instance random obj when first time seed_instance() is\n # called\n self.__random = random_module.Random()\n self.__random.seed(seed)\n return self\n\n @classmethod\n def seed(cls, seed=None):\n random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n # TODO: data export?\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n if 'locale' in self.__config:\n msg = 'Unknown formatter \"{}\" with locale \"{}\"'.format(\n formatter, self.__config['locale'],\n )\n else:\n raise AttributeError('Unknown formatter \"{}\"'.format(\n formatter,\n ))\n raise AttributeError(msg)\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call.\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter = list(matches.groups())\n formatter[1] = str(self.format(formatter[1]))\n return ''.join(formatter)\n", "path": "faker/generator.py"}], "after_files": [{"content": "import random as random_module\nimport re\n\n_re_token = re.compile(r'\\{\\{\\s*(\\w+)(:\\s*\\w+?)?\\s*\\}\\}')\nrandom = random_module.Random()\nmod_random = random # compat with name released in 0.8\n\n\nclass Generator:\n\n __config = {\n 'arguments': {},\n }\n\n def __init__(self, **config):\n self.providers = []\n self.__config = dict(\n list(self.__config.items()) + list(config.items()))\n self.__random = random\n\n def add_provider(self, provider):\n\n if isinstance(provider, type):\n provider = provider(self)\n\n self.providers.insert(0, provider)\n\n for method_name in dir(provider):\n # skip 'private' method\n if method_name.startswith('_'):\n continue\n\n faker_function = getattr(provider, method_name)\n\n if callable(faker_function):\n # add all faker method to generator\n self.set_formatter(method_name, faker_function)\n\n def provider(self, name):\n try:\n lst = [p for p in self.get_providers()\n if p.__provider__ == name.lower()]\n return lst[0]\n except IndexError:\n return None\n\n def get_providers(self):\n \"\"\"Returns added providers.\"\"\"\n return self.providers\n\n @property\n def random(self):\n return self.__random\n\n @random.setter\n def random(self, value):\n self.__random = value\n\n def seed_instance(self, seed=None):\n \"\"\"Calls random.seed\"\"\"\n if self.__random == random:\n # create per-instance random obj when first time seed_instance() is\n # called\n self.__random = random_module.Random()\n self.__random.seed(seed)\n return self\n\n @classmethod\n def seed(cls, seed=None):\n random.seed(seed)\n\n def format(self, formatter, *args, **kwargs):\n \"\"\"\n This is a secure way to make a fake from another Provider.\n \"\"\"\n return self.get_formatter(formatter)(*args, **kwargs)\n\n def get_formatter(self, formatter):\n try:\n return getattr(self, formatter)\n except AttributeError:\n if 'locale' in self.__config:\n msg = 'Unknown formatter \"{}\" with locale \"{}\"'.format(\n formatter, self.__config['locale'],\n )\n else:\n raise AttributeError('Unknown formatter \"{}\"'.format(\n formatter,\n ))\n raise AttributeError(msg)\n\n def set_formatter(self, name, method):\n \"\"\"\n This method adds a provider method to generator.\n Override this method to add some decoration or logging stuff.\n \"\"\"\n setattr(self, name, method)\n\n def set_arguments(self, group, argument, value=None):\n \"\"\"\n Creates an argument group, with an individual argument or a dictionary\n of arguments. Used with the Generator.parse method.\n\n generator.set_arguments('small', 'max_value', 10)\n generator.set_arguments('small', {'min_value': 5, 'max_value': 10})\n \"\"\"\n if group not in self.__config['arguments']:\n self.__config['arguments'][group] = {}\n\n if isinstance(argument, dict):\n self.__config['arguments'][group] = argument\n elif not isinstance(argument, str):\n raise ValueError(\"Arguments must be either a string or dictionary\")\n else:\n self.__config['arguments'][group][argument] = value\n\n def get_arguments(self, group, argument=None):\n \"\"\"\n Get the value of an argument configured within a argument group, or\n the entire group as a dictionary.\n\n generator.get_arguments('small', 'max_value')\n generator.get_arguments('small')\n \"\"\"\n if group in self.__config['arguments'] and argument:\n result = self.__config['arguments'][group].get(argument)\n else:\n result = self.__config['arguments'].get(group)\n\n return result\n\n def del_arguments(self, group, argument=None):\n \"\"\"\n Delete an argument from an argument group or the entire\n argument group.\n\n generator.del_arguments('small')\n generator.del_arguments('small', 'max_value')\n \"\"\"\n if group in self.__config['arguments']:\n if argument:\n result = self.__config['arguments'][group].pop(argument)\n else:\n result = self.__config['arguments'].pop(group)\n else:\n result = None\n\n return result\n\n def parse(self, text):\n \"\"\"\n Replaces tokens (like '{{ tokenName }}' or '{{tokenName}}')\n with the result from the token method call. Arguments can be\n parsed by using an argument group. '{{ tokenName:group }}'\n\n Example:\n\n generator.set_arguments('red_rgb', {'hue': 'red', 'color_format': 'rgb'})\n generator.set_arguments('small', 'max_value', 10)\n\n generator.parse('{{ color:red_rgb }} - {{ pyint:small }}')\n \"\"\"\n return _re_token.sub(self.__format_token, text)\n\n def __format_token(self, matches):\n formatter, argument_group = list(matches.groups())\n argument_group = argument_group.lstrip(\":\").strip() if argument_group else ''\n\n if argument_group:\n try:\n arguments = self.__config['arguments'][argument_group]\n except KeyError:\n raise AttributeError('Unknown argument group \"{}\"'.format(argument_group))\n\n formatted = str(self.format(formatter, **arguments))\n else:\n formatted = str(self.format(formatter))\n\n return ''.join(formatted)\n", "path": "faker/generator.py"}]} | 1,352 | 960 |
gh_patches_debug_9667 | rasdani/github-patches | git_diff | comic__grand-challenge.org-3259 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unsupported upload interface type breaks later file uploads widgets
I have a reader study with a 3d-liver-model interface (.obj kind). The following likely also extends to archive items.
We don't support uploading these via the UI.
However, when one of these interfaces is present all subsequent file upload widgets are not loaded on page: https://grand-challenge.org/reader-studies/chris-test-reader-study-the-second-coming/display-sets/create-single/

I've since removed the display set with the interface to quickly do a workaround.
Not sure if this needs to be fixed or is so corner case that we can safely ignore it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/uploads/widgets.py`
Content:
```
1 from django.forms.widgets import HiddenInput, MultipleHiddenInput
2
3
4 class UserUploadWidgetMixin:
5 template_name = "uploads/widget.html"
6 input_type = None
7
8 def __init__(self, *args, allowed_file_types=None, **kwargs):
9 super().__init__(*args, **kwargs)
10 self.allowed_file_types = allowed_file_types
11
12 def get_context(self, *args, **kwargs):
13 context = super().get_context(*args, **kwargs)
14 context["widget"]["allowed_file_types"] = {
15 "id": f"{context['widget']['attrs']['id']}AllowedFileTypes",
16 "value": self.allowed_file_types,
17 }
18 return context
19
20 class Media:
21 css = {"all": ("vendored/uppy/uppy.min.css",)}
22 js = (
23 "vendored/uppy/uppy.min.js",
24 "js/user_upload.js",
25 )
26
27
28 class UserUploadSingleWidget(UserUploadWidgetMixin, HiddenInput):
29 pass
30
31
32 class UserUploadMultipleWidget(UserUploadWidgetMixin, MultipleHiddenInput):
33 def get_context(self, name, value, attrs):
34 context = super().get_context(name, value, attrs)
35 context["widget"]["attrs"]["multiple"] = True
36 return context
37
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/app/grandchallenge/uploads/widgets.py b/app/grandchallenge/uploads/widgets.py
--- a/app/grandchallenge/uploads/widgets.py
+++ b/app/grandchallenge/uploads/widgets.py
@@ -11,8 +11,10 @@
def get_context(self, *args, **kwargs):
context = super().get_context(*args, **kwargs)
+ widget_id = f'X_{context["widget"]["attrs"]["id"]}'
+ context["widget"]["attrs"]["id"] = widget_id
context["widget"]["allowed_file_types"] = {
- "id": f"{context['widget']['attrs']['id']}AllowedFileTypes",
+ "id": f"{widget_id}AllowedFileTypes",
"value": self.allowed_file_types,
}
return context
| {"golden_diff": "diff --git a/app/grandchallenge/uploads/widgets.py b/app/grandchallenge/uploads/widgets.py\n--- a/app/grandchallenge/uploads/widgets.py\n+++ b/app/grandchallenge/uploads/widgets.py\n@@ -11,8 +11,10 @@\n \n def get_context(self, *args, **kwargs):\n context = super().get_context(*args, **kwargs)\n+ widget_id = f'X_{context[\"widget\"][\"attrs\"][\"id\"]}'\n+ context[\"widget\"][\"attrs\"][\"id\"] = widget_id\n context[\"widget\"][\"allowed_file_types\"] = {\n- \"id\": f\"{context['widget']['attrs']['id']}AllowedFileTypes\",\n+ \"id\": f\"{widget_id}AllowedFileTypes\",\n \"value\": self.allowed_file_types,\n }\n return context\n", "issue": "Unsupported upload interface type breaks later file uploads widgets\nI have a reader study with a 3d-liver-model interface (.obj kind). The following likely also extends to archive items.\r\n\r\nWe don't support uploading these via the UI.\r\n\r\nHowever, when one of these interfaces is present all subsequent file upload widgets are not loaded on page: https://grand-challenge.org/reader-studies/chris-test-reader-study-the-second-coming/display-sets/create-single/\r\n\r\n\r\n\r\nI've since removed the display set with the interface to quickly do a workaround.\r\n\r\nNot sure if this needs to be fixed or is so corner case that we can safely ignore it.\n", "before_files": [{"content": "from django.forms.widgets import HiddenInput, MultipleHiddenInput\n\n\nclass UserUploadWidgetMixin:\n template_name = \"uploads/widget.html\"\n input_type = None\n\n def __init__(self, *args, allowed_file_types=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.allowed_file_types = allowed_file_types\n\n def get_context(self, *args, **kwargs):\n context = super().get_context(*args, **kwargs)\n context[\"widget\"][\"allowed_file_types\"] = {\n \"id\": f\"{context['widget']['attrs']['id']}AllowedFileTypes\",\n \"value\": self.allowed_file_types,\n }\n return context\n\n class Media:\n css = {\"all\": (\"vendored/uppy/uppy.min.css\",)}\n js = (\n \"vendored/uppy/uppy.min.js\",\n \"js/user_upload.js\",\n )\n\n\nclass UserUploadSingleWidget(UserUploadWidgetMixin, HiddenInput):\n pass\n\n\nclass UserUploadMultipleWidget(UserUploadWidgetMixin, MultipleHiddenInput):\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context[\"widget\"][\"attrs\"][\"multiple\"] = True\n return context\n", "path": "app/grandchallenge/uploads/widgets.py"}], "after_files": [{"content": "from django.forms.widgets import HiddenInput, MultipleHiddenInput\n\n\nclass UserUploadWidgetMixin:\n template_name = \"uploads/widget.html\"\n input_type = None\n\n def __init__(self, *args, allowed_file_types=None, **kwargs):\n super().__init__(*args, **kwargs)\n self.allowed_file_types = allowed_file_types\n\n def get_context(self, *args, **kwargs):\n context = super().get_context(*args, **kwargs)\n widget_id = f'X_{context[\"widget\"][\"attrs\"][\"id\"]}'\n context[\"widget\"][\"attrs\"][\"id\"] = widget_id\n context[\"widget\"][\"allowed_file_types\"] = {\n \"id\": f\"{widget_id}AllowedFileTypes\",\n \"value\": self.allowed_file_types,\n }\n return context\n\n class Media:\n css = {\"all\": (\"vendored/uppy/uppy.min.css\",)}\n js = (\n \"vendored/uppy/uppy.min.js\",\n \"js/user_upload.js\",\n )\n\n\nclass UserUploadSingleWidget(UserUploadWidgetMixin, HiddenInput):\n pass\n\n\nclass UserUploadMultipleWidget(UserUploadWidgetMixin, MultipleHiddenInput):\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context[\"widget\"][\"attrs\"][\"multiple\"] = True\n return context\n", "path": "app/grandchallenge/uploads/widgets.py"}]} | 777 | 170 |
gh_patches_debug_24998 | rasdani/github-patches | git_diff | translate__pootle-6733 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Expire project cache when language mappings change
# Steps to reproduce:
- change language mappings for a project
# Results (Expected/Actual):
- expected is that on disk files that are now mapped should be immediately detected
# Environment (i.e. 'pootle --version', DB, OS, Browser):
2.9+
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/apps/pootle_project/forms.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 from django import forms
10 from django.db import connection
11 from django.forms.models import BaseModelFormSet
12
13 from django_rq.queues import get_queue
14
15 from pootle.core.utils.db import useable_connection
16 from pootle.i18n.gettext import ugettext as _
17 from pootle_config.utils import ObjectConfig
18 from pootle_language.models import Language
19 from pootle_misc.forms import LiberalModelChoiceField
20 from pootle_project.models import Project
21 from pootle_translationproject.models import TranslationProject
22 from pootle_translationproject.signals import (tp_init_failed_async,
23 tp_inited_async)
24
25
26 def update_translation_project(tp, response_url):
27 """Wraps translation project initializing to allow it to be running
28 as RQ job.
29 """
30 try:
31 with useable_connection():
32 tp.init_from_templates()
33 except Exception as e:
34 tp_init_failed_async.send(sender=tp.__class__, instance=tp)
35 raise e
36 tp_inited_async.send(sender=tp.__class__,
37 instance=tp, response_url=response_url)
38
39
40 class TranslationProjectFormSet(BaseModelFormSet):
41
42 def __init__(self, *args, **kwargs):
43 self.response_url = kwargs.pop("response_url")
44 super(TranslationProjectFormSet, self).__init__(*args, **kwargs)
45 self.queryset = self.queryset.select_related("language", "project")
46
47 def save_new(self, form, commit=True):
48 return form.save(
49 response_url=self.response_url,
50 commit=commit)
51
52 def delete_existing(self, tp, commit=True):
53 config = ObjectConfig(tp.project)
54 mapping = config.get("pootle.core.lang_mapping", {})
55 if tp.language.code in mapping:
56 del mapping[tp.language.code]
57 config["pootle.core.lang_mapping"] = mapping
58 super(TranslationProjectFormSet, self).delete_existing(
59 tp, commit=commit)
60
61
62 class TranslationProjectForm(forms.ModelForm):
63
64 language = LiberalModelChoiceField(
65 label=_("Language"),
66 queryset=Language.objects.all(),
67 widget=forms.Select(
68 attrs={
69 'class': 'js-select2 select2-language'}))
70 project = forms.ModelChoiceField(
71 queryset=Project.objects.all(),
72 widget=forms.HiddenInput())
73
74 fs_code = forms.CharField(
75 label=_("Filesystem language code"),
76 required=False)
77
78 class Meta(object):
79 prefix = "existing_language"
80 model = TranslationProject
81 fields = ('language', 'project')
82
83 def __init__(self, *args, **kwargs):
84 """If this form is not bound, it must be called with an initial value
85 for Project.
86 """
87 super(TranslationProjectForm, self).__init__(*args, **kwargs)
88 if kwargs.get("instance"):
89 project_id = kwargs["instance"].project.pk
90 project = kwargs["instance"].project
91 language = kwargs["instance"].language
92 mappings = project.config.get("pootle.core.lang_mapping", {})
93 mappings = dict((v, k) for k, v in mappings.iteritems())
94 mapped = mappings.get(language.code)
95 self.fields["fs_code"].initial = mapped
96 else:
97 project_id = kwargs["initial"]["project"]
98 self.fields["language"].queryset = (
99 self.fields["language"].queryset.exclude(
100 translationproject__project_id=project_id))
101 self.fields["project"].queryset = self.fields[
102 "project"].queryset.filter(pk=project_id)
103
104 def clean(self):
105 project = self.cleaned_data.get("project")
106 language = self.cleaned_data.get("language")
107 if project and language:
108 mapped_code = self.cleaned_data["fs_code"]
109 mapping = project.config.get("pootle.core.lang_mapping", {})
110 if mapped_code:
111 tps = project.translationproject_set.all()
112 lang_codes = tps.values_list("language__code", flat=True)
113 bad_fs_code = (
114 (mapped_code in mapping.keys()
115 and not mapping.get(mapped_code) == language.code)
116 or mapped_code in lang_codes)
117 if bad_fs_code:
118 self.errors["fs_code"] = self.error_class(
119 [_("Unable to add mapped code '%(mapped_code)s' for "
120 "language '%(code)s'. Mapped filesystem codes must "
121 "be unique and cannot be in use with an existing "
122 "Translation Project")
123 % dict(mapped_code=mapped_code, code=language.code)])
124 if language.code in mapping.keys():
125 self.errors["language"] = self.error_class(
126 [_("Unable to add language '%s'. "
127 "Another language is already mapped to this code")
128 % language.code])
129
130 def save(self, response_url=None, commit=True):
131 tp = self.instance
132 initialize_from_templates = False
133 if tp.id is None:
134 initialize_from_templates = tp.can_be_inited_from_templates()
135 tp = super(TranslationProjectForm, self).save(commit)
136 project = tp.project
137 config = ObjectConfig(project)
138 mappings = config.get("pootle.core.lang_mapping", {})
139 mappings = dict((v, k) for k, v in mappings.iteritems())
140 if not self.cleaned_data["fs_code"]:
141 if tp.language.code in mappings:
142 del mappings[tp.language.code]
143 else:
144 mappings[tp.language.code] = self.cleaned_data["fs_code"]
145 config["pootle.core.lang_mapping"] = dict(
146 (v, k) for k, v in mappings.iteritems())
147 if initialize_from_templates:
148 def _enqueue_job():
149 queue = get_queue('default')
150 queue.enqueue(
151 update_translation_project,
152 tp,
153 response_url)
154 connection.on_commit(_enqueue_job)
155 return tp
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pootle/apps/pootle_project/forms.py b/pootle/apps/pootle_project/forms.py
--- a/pootle/apps/pootle_project/forms.py
+++ b/pootle/apps/pootle_project/forms.py
@@ -12,6 +12,7 @@
from django_rq.queues import get_queue
+from pootle.core.signals import update_revisions
from pootle.core.utils.db import useable_connection
from pootle.i18n.gettext import ugettext as _
from pootle_config.utils import ObjectConfig
@@ -140,8 +141,18 @@
if not self.cleaned_data["fs_code"]:
if tp.language.code in mappings:
del mappings[tp.language.code]
+ context = self.instance.project.directory
+ update_revisions.send(
+ context.__class__,
+ instance=context,
+ keys=["stats"])
else:
mappings[tp.language.code] = self.cleaned_data["fs_code"]
+ context = self.instance.project.directory
+ update_revisions.send(
+ context.__class__,
+ instance=context,
+ keys=["stats"])
config["pootle.core.lang_mapping"] = dict(
(v, k) for k, v in mappings.iteritems())
if initialize_from_templates:
| {"golden_diff": "diff --git a/pootle/apps/pootle_project/forms.py b/pootle/apps/pootle_project/forms.py\n--- a/pootle/apps/pootle_project/forms.py\n+++ b/pootle/apps/pootle_project/forms.py\n@@ -12,6 +12,7 @@\n \n from django_rq.queues import get_queue\n \n+from pootle.core.signals import update_revisions\n from pootle.core.utils.db import useable_connection\n from pootle.i18n.gettext import ugettext as _\n from pootle_config.utils import ObjectConfig\n@@ -140,8 +141,18 @@\n if not self.cleaned_data[\"fs_code\"]:\n if tp.language.code in mappings:\n del mappings[tp.language.code]\n+ context = self.instance.project.directory\n+ update_revisions.send(\n+ context.__class__,\n+ instance=context,\n+ keys=[\"stats\"])\n else:\n mappings[tp.language.code] = self.cleaned_data[\"fs_code\"]\n+ context = self.instance.project.directory\n+ update_revisions.send(\n+ context.__class__,\n+ instance=context,\n+ keys=[\"stats\"])\n config[\"pootle.core.lang_mapping\"] = dict(\n (v, k) for k, v in mappings.iteritems())\n if initialize_from_templates:\n", "issue": "Expire project cache when language mappings change\n# Steps to reproduce:\r\n\r\n- change language mappings for a project\r\n\r\n# Results (Expected/Actual):\r\n\r\n- expected is that on disk files that are now mapped should be immediately detected\r\n\r\n# Environment (i.e. 'pootle --version', DB, OS, Browser):\r\n\r\n2.9+\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import forms\nfrom django.db import connection\nfrom django.forms.models import BaseModelFormSet\n\nfrom django_rq.queues import get_queue\n\nfrom pootle.core.utils.db import useable_connection\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_config.utils import ObjectConfig\nfrom pootle_language.models import Language\nfrom pootle_misc.forms import LiberalModelChoiceField\nfrom pootle_project.models import Project\nfrom pootle_translationproject.models import TranslationProject\nfrom pootle_translationproject.signals import (tp_init_failed_async,\n tp_inited_async)\n\n\ndef update_translation_project(tp, response_url):\n \"\"\"Wraps translation project initializing to allow it to be running\n as RQ job.\n \"\"\"\n try:\n with useable_connection():\n tp.init_from_templates()\n except Exception as e:\n tp_init_failed_async.send(sender=tp.__class__, instance=tp)\n raise e\n tp_inited_async.send(sender=tp.__class__,\n instance=tp, response_url=response_url)\n\n\nclass TranslationProjectFormSet(BaseModelFormSet):\n\n def __init__(self, *args, **kwargs):\n self.response_url = kwargs.pop(\"response_url\")\n super(TranslationProjectFormSet, self).__init__(*args, **kwargs)\n self.queryset = self.queryset.select_related(\"language\", \"project\")\n\n def save_new(self, form, commit=True):\n return form.save(\n response_url=self.response_url,\n commit=commit)\n\n def delete_existing(self, tp, commit=True):\n config = ObjectConfig(tp.project)\n mapping = config.get(\"pootle.core.lang_mapping\", {})\n if tp.language.code in mapping:\n del mapping[tp.language.code]\n config[\"pootle.core.lang_mapping\"] = mapping\n super(TranslationProjectFormSet, self).delete_existing(\n tp, commit=commit)\n\n\nclass TranslationProjectForm(forms.ModelForm):\n\n language = LiberalModelChoiceField(\n label=_(\"Language\"),\n queryset=Language.objects.all(),\n widget=forms.Select(\n attrs={\n 'class': 'js-select2 select2-language'}))\n project = forms.ModelChoiceField(\n queryset=Project.objects.all(),\n widget=forms.HiddenInput())\n\n fs_code = forms.CharField(\n label=_(\"Filesystem language code\"),\n required=False)\n\n class Meta(object):\n prefix = \"existing_language\"\n model = TranslationProject\n fields = ('language', 'project')\n\n def __init__(self, *args, **kwargs):\n \"\"\"If this form is not bound, it must be called with an initial value\n for Project.\n \"\"\"\n super(TranslationProjectForm, self).__init__(*args, **kwargs)\n if kwargs.get(\"instance\"):\n project_id = kwargs[\"instance\"].project.pk\n project = kwargs[\"instance\"].project\n language = kwargs[\"instance\"].language\n mappings = project.config.get(\"pootle.core.lang_mapping\", {})\n mappings = dict((v, k) for k, v in mappings.iteritems())\n mapped = mappings.get(language.code)\n self.fields[\"fs_code\"].initial = mapped\n else:\n project_id = kwargs[\"initial\"][\"project\"]\n self.fields[\"language\"].queryset = (\n self.fields[\"language\"].queryset.exclude(\n translationproject__project_id=project_id))\n self.fields[\"project\"].queryset = self.fields[\n \"project\"].queryset.filter(pk=project_id)\n\n def clean(self):\n project = self.cleaned_data.get(\"project\")\n language = self.cleaned_data.get(\"language\")\n if project and language:\n mapped_code = self.cleaned_data[\"fs_code\"]\n mapping = project.config.get(\"pootle.core.lang_mapping\", {})\n if mapped_code:\n tps = project.translationproject_set.all()\n lang_codes = tps.values_list(\"language__code\", flat=True)\n bad_fs_code = (\n (mapped_code in mapping.keys()\n and not mapping.get(mapped_code) == language.code)\n or mapped_code in lang_codes)\n if bad_fs_code:\n self.errors[\"fs_code\"] = self.error_class(\n [_(\"Unable to add mapped code '%(mapped_code)s' for \"\n \"language '%(code)s'. Mapped filesystem codes must \"\n \"be unique and cannot be in use with an existing \"\n \"Translation Project\")\n % dict(mapped_code=mapped_code, code=language.code)])\n if language.code in mapping.keys():\n self.errors[\"language\"] = self.error_class(\n [_(\"Unable to add language '%s'. \"\n \"Another language is already mapped to this code\")\n % language.code])\n\n def save(self, response_url=None, commit=True):\n tp = self.instance\n initialize_from_templates = False\n if tp.id is None:\n initialize_from_templates = tp.can_be_inited_from_templates()\n tp = super(TranslationProjectForm, self).save(commit)\n project = tp.project\n config = ObjectConfig(project)\n mappings = config.get(\"pootle.core.lang_mapping\", {})\n mappings = dict((v, k) for k, v in mappings.iteritems())\n if not self.cleaned_data[\"fs_code\"]:\n if tp.language.code in mappings:\n del mappings[tp.language.code]\n else:\n mappings[tp.language.code] = self.cleaned_data[\"fs_code\"]\n config[\"pootle.core.lang_mapping\"] = dict(\n (v, k) for k, v in mappings.iteritems())\n if initialize_from_templates:\n def _enqueue_job():\n queue = get_queue('default')\n queue.enqueue(\n update_translation_project,\n tp,\n response_url)\n connection.on_commit(_enqueue_job)\n return tp\n", "path": "pootle/apps/pootle_project/forms.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nfrom django import forms\nfrom django.db import connection\nfrom django.forms.models import BaseModelFormSet\n\nfrom django_rq.queues import get_queue\n\nfrom pootle.core.signals import update_revisions\nfrom pootle.core.utils.db import useable_connection\nfrom pootle.i18n.gettext import ugettext as _\nfrom pootle_config.utils import ObjectConfig\nfrom pootle_language.models import Language\nfrom pootle_misc.forms import LiberalModelChoiceField\nfrom pootle_project.models import Project\nfrom pootle_translationproject.models import TranslationProject\nfrom pootle_translationproject.signals import (tp_init_failed_async,\n tp_inited_async)\n\n\ndef update_translation_project(tp, response_url):\n \"\"\"Wraps translation project initializing to allow it to be running\n as RQ job.\n \"\"\"\n try:\n with useable_connection():\n tp.init_from_templates()\n except Exception as e:\n tp_init_failed_async.send(sender=tp.__class__, instance=tp)\n raise e\n tp_inited_async.send(sender=tp.__class__,\n instance=tp, response_url=response_url)\n\n\nclass TranslationProjectFormSet(BaseModelFormSet):\n\n def __init__(self, *args, **kwargs):\n self.response_url = kwargs.pop(\"response_url\")\n super(TranslationProjectFormSet, self).__init__(*args, **kwargs)\n self.queryset = self.queryset.select_related(\"language\", \"project\")\n\n def save_new(self, form, commit=True):\n return form.save(\n response_url=self.response_url,\n commit=commit)\n\n def delete_existing(self, tp, commit=True):\n config = ObjectConfig(tp.project)\n mapping = config.get(\"pootle.core.lang_mapping\", {})\n if tp.language.code in mapping:\n del mapping[tp.language.code]\n config[\"pootle.core.lang_mapping\"] = mapping\n super(TranslationProjectFormSet, self).delete_existing(\n tp, commit=commit)\n\n\nclass TranslationProjectForm(forms.ModelForm):\n\n language = LiberalModelChoiceField(\n label=_(\"Language\"),\n queryset=Language.objects.all(),\n widget=forms.Select(\n attrs={\n 'class': 'js-select2 select2-language'}))\n project = forms.ModelChoiceField(\n queryset=Project.objects.all(),\n widget=forms.HiddenInput())\n\n fs_code = forms.CharField(\n label=_(\"Filesystem language code\"),\n required=False)\n\n class Meta(object):\n prefix = \"existing_language\"\n model = TranslationProject\n fields = ('language', 'project')\n\n def __init__(self, *args, **kwargs):\n \"\"\"If this form is not bound, it must be called with an initial value\n for Project.\n \"\"\"\n super(TranslationProjectForm, self).__init__(*args, **kwargs)\n if kwargs.get(\"instance\"):\n project_id = kwargs[\"instance\"].project.pk\n project = kwargs[\"instance\"].project\n language = kwargs[\"instance\"].language\n mappings = project.config.get(\"pootle.core.lang_mapping\", {})\n mappings = dict((v, k) for k, v in mappings.iteritems())\n mapped = mappings.get(language.code)\n self.fields[\"fs_code\"].initial = mapped\n else:\n project_id = kwargs[\"initial\"][\"project\"]\n self.fields[\"language\"].queryset = (\n self.fields[\"language\"].queryset.exclude(\n translationproject__project_id=project_id))\n self.fields[\"project\"].queryset = self.fields[\n \"project\"].queryset.filter(pk=project_id)\n\n def clean(self):\n project = self.cleaned_data.get(\"project\")\n language = self.cleaned_data.get(\"language\")\n if project and language:\n mapped_code = self.cleaned_data[\"fs_code\"]\n mapping = project.config.get(\"pootle.core.lang_mapping\", {})\n if mapped_code:\n tps = project.translationproject_set.all()\n lang_codes = tps.values_list(\"language__code\", flat=True)\n bad_fs_code = (\n (mapped_code in mapping.keys()\n and not mapping.get(mapped_code) == language.code)\n or mapped_code in lang_codes)\n if bad_fs_code:\n self.errors[\"fs_code\"] = self.error_class(\n [_(\"Unable to add mapped code '%(mapped_code)s' for \"\n \"language '%(code)s'. Mapped filesystem codes must \"\n \"be unique and cannot be in use with an existing \"\n \"Translation Project\")\n % dict(mapped_code=mapped_code, code=language.code)])\n if language.code in mapping.keys():\n self.errors[\"language\"] = self.error_class(\n [_(\"Unable to add language '%s'. \"\n \"Another language is already mapped to this code\")\n % language.code])\n\n def save(self, response_url=None, commit=True):\n tp = self.instance\n initialize_from_templates = False\n if tp.id is None:\n initialize_from_templates = tp.can_be_inited_from_templates()\n tp = super(TranslationProjectForm, self).save(commit)\n project = tp.project\n config = ObjectConfig(project)\n mappings = config.get(\"pootle.core.lang_mapping\", {})\n mappings = dict((v, k) for k, v in mappings.iteritems())\n if not self.cleaned_data[\"fs_code\"]:\n if tp.language.code in mappings:\n del mappings[tp.language.code]\n context = self.instance.project.directory\n update_revisions.send(\n context.__class__,\n instance=context,\n keys=[\"stats\"])\n else:\n mappings[tp.language.code] = self.cleaned_data[\"fs_code\"]\n context = self.instance.project.directory\n update_revisions.send(\n context.__class__,\n instance=context,\n keys=[\"stats\"])\n config[\"pootle.core.lang_mapping\"] = dict(\n (v, k) for k, v in mappings.iteritems())\n if initialize_from_templates:\n def _enqueue_job():\n queue = get_queue('default')\n queue.enqueue(\n update_translation_project,\n tp,\n response_url)\n connection.on_commit(_enqueue_job)\n return tp\n", "path": "pootle/apps/pootle_project/forms.py"}]} | 1,971 | 281 |
gh_patches_debug_19141 | rasdani/github-patches | git_diff | pypi__warehouse-2849 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Blacklisting project does not purge the cache
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `warehouse/admin/utils.py`
Content:
```
1 # Licensed under the Apache License, Version 2.0 (the "License");
2 # you may not use this file except in compliance with the License.
3 # You may obtain a copy of the License at
4 #
5 # http://www.apache.org/licenses/LICENSE-2.0
6 #
7 # Unless required by applicable law or agreed to in writing, software
8 # distributed under the License is distributed on an "AS IS" BASIS,
9 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
10 # See the License for the specific language governing permissions and
11 # limitations under the License.
12
13 from packaging.utils import canonicalize_name
14 from pyramid.httpexceptions import HTTPSeeOther
15
16 from warehouse.packaging.models import (
17 Project, Release, Dependency, File, Role, JournalEntry, release_classifiers
18 )
19
20
21 def confirm_project(project, request):
22 confirm = request.POST.get("confirm")
23 project_name = project.normalized_name
24 if not confirm:
25 request.session.flash(
26 "Must confirm the request.",
27 queue="error",
28 )
29 raise HTTPSeeOther(
30 request.route_path(
31 'admin.project.detail',
32 project_name=project_name
33 )
34 )
35 if canonicalize_name(confirm) != project.normalized_name:
36 request.session.flash(
37 f"{confirm!r} is not the same as {project.normalized_name!r}",
38 queue="error",
39 )
40 raise HTTPSeeOther(
41 request.route_path(
42 'admin.project.detail',
43 project_name=project_name
44 )
45 )
46
47
48 def remove_project(project, request):
49 # TODO: We don't actually delete files from the data store. We should add
50 # some kind of garbage collection at some point.
51
52 request.db.add(
53 JournalEntry(
54 name=project.name,
55 action="remove",
56 submitted_by=request.user,
57 submitted_from=request.remote_addr,
58 )
59 )
60 request.db.query(Role).filter(Role.project == project).delete()
61 request.db.query(File).filter(File.name == project.name).delete()
62 (request.db.query(Dependency).filter(Dependency.name == project.name)
63 .delete())
64 (request.db.execute(release_classifiers.delete()
65 .where(release_classifiers.c.name ==
66 project.name)))
67 request.db.query(Release).filter(Release.name == project.name).delete()
68 request.db.query(Project).filter(Project.name == project.name).delete()
69
70 request.session.flash(
71 f"Successfully deleted the project {project.name!r}.",
72 queue="success",
73 )
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/warehouse/admin/utils.py b/warehouse/admin/utils.py
--- a/warehouse/admin/utils.py
+++ b/warehouse/admin/utils.py
@@ -64,8 +64,22 @@
(request.db.execute(release_classifiers.delete()
.where(release_classifiers.c.name ==
project.name)))
- request.db.query(Release).filter(Release.name == project.name).delete()
- request.db.query(Project).filter(Project.name == project.name).delete()
+
+ # Load the following objects into the session and individually delete them
+ # so they are included in `session.deleted` and their cache keys are purged
+
+ # Delete releases first, otherwise they will get cascade-deleted by the
+ # project deletion and won't be purged
+ for release in (
+ request.db.query(Release)
+ .filter(Release.name == project.name)
+ .all()):
+ request.db.delete(release)
+
+ # Finally, delete the project
+ request.db.delete(
+ request.db.query(Project).filter(Project.name == project.name).one()
+ )
request.session.flash(
f"Successfully deleted the project {project.name!r}.",
| {"golden_diff": "diff --git a/warehouse/admin/utils.py b/warehouse/admin/utils.py\n--- a/warehouse/admin/utils.py\n+++ b/warehouse/admin/utils.py\n@@ -64,8 +64,22 @@\n (request.db.execute(release_classifiers.delete()\n .where(release_classifiers.c.name ==\n project.name)))\n- request.db.query(Release).filter(Release.name == project.name).delete()\n- request.db.query(Project).filter(Project.name == project.name).delete()\n+\n+ # Load the following objects into the session and individually delete them\n+ # so they are included in `session.deleted` and their cache keys are purged\n+\n+ # Delete releases first, otherwise they will get cascade-deleted by the\n+ # project deletion and won't be purged\n+ for release in (\n+ request.db.query(Release)\n+ .filter(Release.name == project.name)\n+ .all()):\n+ request.db.delete(release)\n+\n+ # Finally, delete the project\n+ request.db.delete(\n+ request.db.query(Project).filter(Project.name == project.name).one()\n+ )\n \n request.session.flash(\n f\"Successfully deleted the project {project.name!r}.\",\n", "issue": "Blacklisting project does not purge the cache\n\n", "before_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom packaging.utils import canonicalize_name\nfrom pyramid.httpexceptions import HTTPSeeOther\n\nfrom warehouse.packaging.models import (\n Project, Release, Dependency, File, Role, JournalEntry, release_classifiers\n)\n\n\ndef confirm_project(project, request):\n confirm = request.POST.get(\"confirm\")\n project_name = project.normalized_name\n if not confirm:\n request.session.flash(\n \"Must confirm the request.\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(\n 'admin.project.detail',\n project_name=project_name\n )\n )\n if canonicalize_name(confirm) != project.normalized_name:\n request.session.flash(\n f\"{confirm!r} is not the same as {project.normalized_name!r}\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(\n 'admin.project.detail',\n project_name=project_name\n )\n )\n\n\ndef remove_project(project, request):\n # TODO: We don't actually delete files from the data store. We should add\n # some kind of garbage collection at some point.\n\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"remove\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.query(Role).filter(Role.project == project).delete()\n request.db.query(File).filter(File.name == project.name).delete()\n (request.db.query(Dependency).filter(Dependency.name == project.name)\n .delete())\n (request.db.execute(release_classifiers.delete()\n .where(release_classifiers.c.name ==\n project.name)))\n request.db.query(Release).filter(Release.name == project.name).delete()\n request.db.query(Project).filter(Project.name == project.name).delete()\n\n request.session.flash(\n f\"Successfully deleted the project {project.name!r}.\",\n queue=\"success\",\n )\n", "path": "warehouse/admin/utils.py"}], "after_files": [{"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom packaging.utils import canonicalize_name\nfrom pyramid.httpexceptions import HTTPSeeOther\n\nfrom warehouse.packaging.models import (\n Project, Release, Dependency, File, Role, JournalEntry, release_classifiers\n)\n\n\ndef confirm_project(project, request):\n confirm = request.POST.get(\"confirm\")\n project_name = project.normalized_name\n if not confirm:\n request.session.flash(\n \"Must confirm the request.\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(\n 'admin.project.detail',\n project_name=project_name\n )\n )\n if canonicalize_name(confirm) != project.normalized_name:\n request.session.flash(\n f\"{confirm!r} is not the same as {project.normalized_name!r}\",\n queue=\"error\",\n )\n raise HTTPSeeOther(\n request.route_path(\n 'admin.project.detail',\n project_name=project_name\n )\n )\n\n\ndef remove_project(project, request):\n # TODO: We don't actually delete files from the data store. We should add\n # some kind of garbage collection at some point.\n\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"remove\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.query(Role).filter(Role.project == project).delete()\n request.db.query(File).filter(File.name == project.name).delete()\n (request.db.query(Dependency).filter(Dependency.name == project.name)\n .delete())\n (request.db.execute(release_classifiers.delete()\n .where(release_classifiers.c.name ==\n project.name)))\n\n # Load the following objects into the session and individually delete them\n # so they are included in `session.deleted` and their cache keys are purged\n\n # Delete releases first, otherwise they will get cascade-deleted by the\n # project deletion and won't be purged\n for release in (\n request.db.query(Release)\n .filter(Release.name == project.name)\n .all()):\n request.db.delete(release)\n\n # Finally, delete the project\n request.db.delete(\n request.db.query(Project).filter(Project.name == project.name).one()\n )\n\n request.session.flash(\n f\"Successfully deleted the project {project.name!r}.\",\n queue=\"success\",\n )\n", "path": "warehouse/admin/utils.py"}]} | 929 | 263 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.