problem_id
stringlengths
18
22
source
stringclasses
1 value
task_type
stringclasses
1 value
in_source_id
stringlengths
13
58
prompt
stringlengths
1.1k
25.4k
golden_diff
stringlengths
145
5.13k
verification_info
stringlengths
582
39.1k
num_tokens
int64
271
4.1k
num_tokens_diff
int64
47
1.02k
gh_patches_debug_34765
rasdani/github-patches
git_diff
crytic__slither-1909
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [Bug] contract reports ether as locked when ether is sent in Yul The following contract reports ether as locked despite it being sent in a Yul block ``` contract FPLockedEther { receive() payable external {} function yulSendEther() external { bool success; assembly { success := call(gas(), caller(), balance(address()), 0,0,0,0) } } } ``` ``` Contract locking ether found: Contract FPLockedEther (locked-ether.sol#1-13) has payable functions: - FPLockedEther.receive() (locked-ether.sol#2-3) But does not have a function to withdraw the ether Reference: https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether ``` It could be that the IR is incorrect here as it should not be a `SOLIDITY_CALL` ``` Contract FPLockedEther Function FPLockedEther.receive() (*) Function FPLockedEther.yulSendEther() (*) Expression: success = call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(gas()(),caller()(),balance(uint256)(address()()),0,0,0,0) IRs: TMP_0(uint256) = SOLIDITY_CALL gas()() TMP_1(address) := msg.sender(address) TMP_2 = CONVERT this to address TMP_3(uint256) = SOLIDITY_CALL balance(uint256)(TMP_2) TMP_4(uint256) = SOLIDITY_CALL call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(TMP_0,TMP_1,TMP_3,0,0,0,0) success(bool) := TMP_4(uint256) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slither/detectors/attributes/locked_ether.py` Content: ``` 1 """ 2 Check if ethers are locked in the contract 3 """ 4 from typing import List 5 6 from slither.core.declarations.contract import Contract 7 from slither.detectors.abstract_detector import ( 8 AbstractDetector, 9 DetectorClassification, 10 DETECTOR_INFO, 11 ) 12 from slither.slithir.operations import ( 13 HighLevelCall, 14 LowLevelCall, 15 Send, 16 Transfer, 17 NewContract, 18 LibraryCall, 19 InternalCall, 20 ) 21 from slither.utils.output import Output 22 23 24 class LockedEther(AbstractDetector): # pylint: disable=too-many-nested-blocks 25 26 ARGUMENT = "locked-ether" 27 HELP = "Contracts that lock ether" 28 IMPACT = DetectorClassification.MEDIUM 29 CONFIDENCE = DetectorClassification.HIGH 30 31 WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether" 32 33 WIKI_TITLE = "Contracts that lock Ether" 34 WIKI_DESCRIPTION = "Contract with a `payable` function, but without a withdrawal capacity." 35 36 # region wiki_exploit_scenario 37 WIKI_EXPLOIT_SCENARIO = """ 38 ```solidity 39 pragma solidity 0.4.24; 40 contract Locked{ 41 function receive() payable public{ 42 } 43 } 44 ``` 45 Every Ether sent to `Locked` will be lost.""" 46 # endregion wiki_exploit_scenario 47 48 WIKI_RECOMMENDATION = "Remove the payable attribute or add a withdraw function." 49 50 @staticmethod 51 def do_no_send_ether(contract: Contract) -> bool: 52 functions = contract.all_functions_called 53 to_explore = functions 54 explored = [] 55 while to_explore: # pylint: disable=too-many-nested-blocks 56 functions = to_explore 57 explored += to_explore 58 to_explore = [] 59 for function in functions: 60 calls = [c.name for c in function.internal_calls] 61 if "suicide(address)" in calls or "selfdestruct(address)" in calls: 62 return False 63 for node in function.nodes: 64 for ir in node.irs: 65 if isinstance( 66 ir, 67 (Send, Transfer, HighLevelCall, LowLevelCall, NewContract), 68 ): 69 if ir.call_value and ir.call_value != 0: 70 return False 71 if isinstance(ir, (LowLevelCall)): 72 if ir.function_name in ["delegatecall", "callcode"]: 73 return False 74 # If a new internal call or librarycall 75 # Add it to the list to explore 76 # InternalCall if to follow internal call in libraries 77 if isinstance(ir, (InternalCall, LibraryCall)): 78 if not ir.function in explored: 79 to_explore.append(ir.function) 80 81 return True 82 83 def _detect(self) -> List[Output]: 84 results = [] 85 86 for contract in self.compilation_unit.contracts_derived: 87 if contract.is_signature_only(): 88 continue 89 funcs_payable = [function for function in contract.functions if function.payable] 90 if funcs_payable: 91 if self.do_no_send_ether(contract): 92 info: DETECTOR_INFO = ["Contract locking ether found:\n"] 93 info += ["\tContract ", contract, " has payable functions:\n"] 94 for function in funcs_payable: 95 info += ["\t - ", function, "\n"] 96 info += "\tBut does not have a function to withdraw the ether\n" 97 98 json = self.generate_result(info) 99 100 results.append(json) 101 102 return results 103 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slither/detectors/attributes/locked_ether.py b/slither/detectors/attributes/locked_ether.py --- a/slither/detectors/attributes/locked_ether.py +++ b/slither/detectors/attributes/locked_ether.py @@ -3,7 +3,7 @@ """ from typing import List -from slither.core.declarations.contract import Contract +from slither.core.declarations import Contract, SolidityFunction from slither.detectors.abstract_detector import ( AbstractDetector, DetectorClassification, @@ -17,7 +17,9 @@ NewContract, LibraryCall, InternalCall, + SolidityCall, ) +from slither.slithir.variables import Constant from slither.utils.output import Output @@ -68,8 +70,28 @@ ): if ir.call_value and ir.call_value != 0: return False - if isinstance(ir, (LowLevelCall)): - if ir.function_name in ["delegatecall", "callcode"]: + if isinstance(ir, (LowLevelCall)) and ir.function_name in [ + "delegatecall", + "callcode", + ]: + return False + if isinstance(ir, SolidityCall): + call_can_send_ether = ir.function in [ + SolidityFunction( + "delegatecall(uint256,uint256,uint256,uint256,uint256,uint256)" + ), + SolidityFunction( + "callcode(uint256,uint256,uint256,uint256,uint256,uint256,uint256)" + ), + SolidityFunction( + "call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)" + ), + ] + nonzero_call_value = call_can_send_ether and ( + not isinstance(ir.arguments[2], Constant) + or ir.arguments[2].value != 0 + ) + if nonzero_call_value: return False # If a new internal call or librarycall # Add it to the list to explore
{"golden_diff": "diff --git a/slither/detectors/attributes/locked_ether.py b/slither/detectors/attributes/locked_ether.py\n--- a/slither/detectors/attributes/locked_ether.py\n+++ b/slither/detectors/attributes/locked_ether.py\n@@ -3,7 +3,7 @@\n \"\"\"\n from typing import List\n \n-from slither.core.declarations.contract import Contract\n+from slither.core.declarations import Contract, SolidityFunction\n from slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n@@ -17,7 +17,9 @@\n NewContract,\n LibraryCall,\n InternalCall,\n+ SolidityCall,\n )\n+from slither.slithir.variables import Constant\n from slither.utils.output import Output\n \n \n@@ -68,8 +70,28 @@\n ):\n if ir.call_value and ir.call_value != 0:\n return False\n- if isinstance(ir, (LowLevelCall)):\n- if ir.function_name in [\"delegatecall\", \"callcode\"]:\n+ if isinstance(ir, (LowLevelCall)) and ir.function_name in [\n+ \"delegatecall\",\n+ \"callcode\",\n+ ]:\n+ return False\n+ if isinstance(ir, SolidityCall):\n+ call_can_send_ether = ir.function in [\n+ SolidityFunction(\n+ \"delegatecall(uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ SolidityFunction(\n+ \"callcode(uint256,uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ SolidityFunction(\n+ \"call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)\"\n+ ),\n+ ]\n+ nonzero_call_value = call_can_send_ether and (\n+ not isinstance(ir.arguments[2], Constant)\n+ or ir.arguments[2].value != 0\n+ )\n+ if nonzero_call_value:\n return False\n # If a new internal call or librarycall\n # Add it to the list to explore\n", "issue": "[Bug] contract reports ether as locked when ether is sent in Yul\nThe following contract reports ether as locked despite it being sent in a Yul block\r\n```\r\ncontract FPLockedEther {\r\n receive() payable external {}\r\n\r\n function yulSendEther() external {\r\n bool success;\r\n assembly {\r\n success := call(gas(), caller(), balance(address()), 0,0,0,0)\r\n }\r\n }\r\n}\r\n```\r\n```\r\nContract locking ether found:\r\n\tContract FPLockedEther (locked-ether.sol#1-13) has payable functions:\r\n\t - FPLockedEther.receive() (locked-ether.sol#2-3)\r\n\tBut does not have a function to withdraw the ether\r\nReference: https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether\r\n```\r\n\r\nIt could be that the IR is incorrect here as it should not be a `SOLIDITY_CALL`\r\n```\r\nContract FPLockedEther\r\n\tFunction FPLockedEther.receive() (*)\r\n\tFunction FPLockedEther.yulSendEther() (*)\r\n\t\tExpression: success = call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(gas()(),caller()(),balance(uint256)(address()()),0,0,0,0)\r\n\t\tIRs:\r\n\t\t\tTMP_0(uint256) = SOLIDITY_CALL gas()()\r\n\t\t\tTMP_1(address) := msg.sender(address)\r\n\t\t\tTMP_2 = CONVERT this to address\r\n\t\t\tTMP_3(uint256) = SOLIDITY_CALL balance(uint256)(TMP_2)\r\n\t\t\tTMP_4(uint256) = SOLIDITY_CALL call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)(TMP_0,TMP_1,TMP_3,0,0,0,0)\r\n\t\t\tsuccess(bool) := TMP_4(uint256)\r\n```\n", "before_files": [{"content": "\"\"\"\n Check if ethers are locked in the contract\n\"\"\"\nfrom typing import List\n\nfrom slither.core.declarations.contract import Contract\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import (\n HighLevelCall,\n LowLevelCall,\n Send,\n Transfer,\n NewContract,\n LibraryCall,\n InternalCall,\n)\nfrom slither.utils.output import Output\n\n\nclass LockedEther(AbstractDetector): # pylint: disable=too-many-nested-blocks\n\n ARGUMENT = \"locked-ether\"\n HELP = \"Contracts that lock ether\"\n IMPACT = DetectorClassification.MEDIUM\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether\"\n\n WIKI_TITLE = \"Contracts that lock Ether\"\n WIKI_DESCRIPTION = \"Contract with a `payable` function, but without a withdrawal capacity.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\npragma solidity 0.4.24;\ncontract Locked{\n function receive() payable public{\n }\n}\n```\nEvery Ether sent to `Locked` will be lost.\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Remove the payable attribute or add a withdraw function.\"\n\n @staticmethod\n def do_no_send_ether(contract: Contract) -> bool:\n functions = contract.all_functions_called\n to_explore = functions\n explored = []\n while to_explore: # pylint: disable=too-many-nested-blocks\n functions = to_explore\n explored += to_explore\n to_explore = []\n for function in functions:\n calls = [c.name for c in function.internal_calls]\n if \"suicide(address)\" in calls or \"selfdestruct(address)\" in calls:\n return False\n for node in function.nodes:\n for ir in node.irs:\n if isinstance(\n ir,\n (Send, Transfer, HighLevelCall, LowLevelCall, NewContract),\n ):\n if ir.call_value and ir.call_value != 0:\n return False\n if isinstance(ir, (LowLevelCall)):\n if ir.function_name in [\"delegatecall\", \"callcode\"]:\n return False\n # If a new internal call or librarycall\n # Add it to the list to explore\n # InternalCall if to follow internal call in libraries\n if isinstance(ir, (InternalCall, LibraryCall)):\n if not ir.function in explored:\n to_explore.append(ir.function)\n\n return True\n\n def _detect(self) -> List[Output]:\n results = []\n\n for contract in self.compilation_unit.contracts_derived:\n if contract.is_signature_only():\n continue\n funcs_payable = [function for function in contract.functions if function.payable]\n if funcs_payable:\n if self.do_no_send_ether(contract):\n info: DETECTOR_INFO = [\"Contract locking ether found:\\n\"]\n info += [\"\\tContract \", contract, \" has payable functions:\\n\"]\n for function in funcs_payable:\n info += [\"\\t - \", function, \"\\n\"]\n info += \"\\tBut does not have a function to withdraw the ether\\n\"\n\n json = self.generate_result(info)\n\n results.append(json)\n\n return results\n", "path": "slither/detectors/attributes/locked_ether.py"}], "after_files": [{"content": "\"\"\"\n Check if ethers are locked in the contract\n\"\"\"\nfrom typing import List\n\nfrom slither.core.declarations import Contract, SolidityFunction\nfrom slither.detectors.abstract_detector import (\n AbstractDetector,\n DetectorClassification,\n DETECTOR_INFO,\n)\nfrom slither.slithir.operations import (\n HighLevelCall,\n LowLevelCall,\n Send,\n Transfer,\n NewContract,\n LibraryCall,\n InternalCall,\n SolidityCall,\n)\nfrom slither.slithir.variables import Constant\nfrom slither.utils.output import Output\n\n\nclass LockedEther(AbstractDetector): # pylint: disable=too-many-nested-blocks\n\n ARGUMENT = \"locked-ether\"\n HELP = \"Contracts that lock ether\"\n IMPACT = DetectorClassification.MEDIUM\n CONFIDENCE = DetectorClassification.HIGH\n\n WIKI = \"https://github.com/crytic/slither/wiki/Detector-Documentation#contracts-that-lock-ether\"\n\n WIKI_TITLE = \"Contracts that lock Ether\"\n WIKI_DESCRIPTION = \"Contract with a `payable` function, but without a withdrawal capacity.\"\n\n # region wiki_exploit_scenario\n WIKI_EXPLOIT_SCENARIO = \"\"\"\n```solidity\npragma solidity 0.4.24;\ncontract Locked{\n function receive() payable public{\n }\n}\n```\nEvery Ether sent to `Locked` will be lost.\"\"\"\n # endregion wiki_exploit_scenario\n\n WIKI_RECOMMENDATION = \"Remove the payable attribute or add a withdraw function.\"\n\n @staticmethod\n def do_no_send_ether(contract: Contract) -> bool:\n functions = contract.all_functions_called\n to_explore = functions\n explored = []\n while to_explore: # pylint: disable=too-many-nested-blocks\n functions = to_explore\n explored += to_explore\n to_explore = []\n for function in functions:\n calls = [c.name for c in function.internal_calls]\n if \"suicide(address)\" in calls or \"selfdestruct(address)\" in calls:\n return False\n for node in function.nodes:\n for ir in node.irs:\n if isinstance(\n ir,\n (Send, Transfer, HighLevelCall, LowLevelCall, NewContract),\n ):\n if ir.call_value and ir.call_value != 0:\n return False\n if isinstance(ir, (LowLevelCall)) and ir.function_name in [\n \"delegatecall\",\n \"callcode\",\n ]:\n return False\n if isinstance(ir, SolidityCall):\n call_can_send_ether = ir.function in [\n SolidityFunction(\n \"delegatecall(uint256,uint256,uint256,uint256,uint256,uint256)\"\n ),\n SolidityFunction(\n \"callcode(uint256,uint256,uint256,uint256,uint256,uint256,uint256)\"\n ),\n SolidityFunction(\n \"call(uint256,uint256,uint256,uint256,uint256,uint256,uint256)\"\n ),\n ]\n nonzero_call_value = call_can_send_ether and (\n not isinstance(ir.arguments[2], Constant)\n or ir.arguments[2].value != 0\n )\n if nonzero_call_value:\n return False\n # If a new internal call or librarycall\n # Add it to the list to explore\n # InternalCall if to follow internal call in libraries\n if isinstance(ir, (InternalCall, LibraryCall)):\n if not ir.function in explored:\n to_explore.append(ir.function)\n\n return True\n\n def _detect(self) -> List[Output]:\n results = []\n\n for contract in self.compilation_unit.contracts_derived:\n if contract.is_signature_only():\n continue\n funcs_payable = [function for function in contract.functions if function.payable]\n if funcs_payable:\n if self.do_no_send_ether(contract):\n info: DETECTOR_INFO = [\"Contract locking ether found:\\n\"]\n info += [\"\\tContract \", contract, \" has payable functions:\\n\"]\n for function in funcs_payable:\n info += [\"\\t - \", function, \"\\n\"]\n info += \"\\tBut does not have a function to withdraw the ether\\n\"\n\n json = self.generate_result(info)\n\n results.append(json)\n\n return results\n", "path": "slither/detectors/attributes/locked_ether.py"}]}
1,643
485
gh_patches_debug_22330
rasdani/github-patches
git_diff
comic__grand-challenge.org-1744
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Markdown preview fails CSRF validation checks Caused by the name change of the CSRF cookie. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/grandchallenge/core/widgets.py` Content: ``` 1 from django import forms 2 from markdownx.widgets import AdminMarkdownxWidget, MarkdownxWidget 3 4 5 class JSONEditorWidget(forms.Textarea): 6 template_name = "jsoneditor/jsoneditor_widget.html" 7 8 def __init__(self, schema=None, attrs=None): 9 super().__init__(attrs) 10 self.schema = schema 11 12 def get_context(self, name, value, attrs): 13 context = super().get_context(name, value, attrs) 14 context.update({"schema": self.schema}) 15 return context 16 17 class Media: 18 css = { 19 "all": ( 20 "https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.css", 21 ) 22 } 23 js = ( 24 "https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.js", 25 ) 26 27 28 class MarkdownEditorWidget(MarkdownxWidget): 29 class Media(MarkdownxWidget.Media): 30 js = [ 31 *MarkdownxWidget.Media.js, 32 "vendor/js/markdown-toolbar-element/index.umd.js", 33 ] 34 35 36 class MarkdownEditorAdminWidget(AdminMarkdownxWidget): 37 class Media(AdminMarkdownxWidget.Media): 38 css = { 39 "all": [ 40 *AdminMarkdownxWidget.Media.css["all"], 41 "vendor/css/base.min.css", 42 "vendor/fa/css/all.css", 43 ] 44 } 45 js = [ 46 *AdminMarkdownxWidget.Media.js, 47 "vendor/js/markdown-toolbar-element/index.umd.js", 48 ] 49 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/grandchallenge/core/widgets.py b/app/grandchallenge/core/widgets.py --- a/app/grandchallenge/core/widgets.py +++ b/app/grandchallenge/core/widgets.py @@ -26,23 +26,29 @@ class MarkdownEditorWidget(MarkdownxWidget): - class Media(MarkdownxWidget.Media): - js = [ - *MarkdownxWidget.Media.js, - "vendor/js/markdown-toolbar-element/index.umd.js", - ] + @property + def media(self): + return forms.Media( + js=( + "js/markdownx.js", + "vendor/js/markdown-toolbar-element/index.umd.js", + ) + ) class MarkdownEditorAdminWidget(AdminMarkdownxWidget): - class Media(AdminMarkdownxWidget.Media): - css = { - "all": [ - *AdminMarkdownxWidget.Media.css["all"], - "vendor/css/base.min.css", - "vendor/fa/css/all.css", - ] - } - js = [ - *AdminMarkdownxWidget.Media.js, - "vendor/js/markdown-toolbar-element/index.umd.js", - ] + @property + def media(self): + return forms.Media( + css={ + "all": [ + *AdminMarkdownxWidget.Media.css["all"], + "vendor/css/base.min.css", + "vendor/fa/css/all.css", + ] + }, + js=[ + "js/markdownx.js", + "vendor/js/markdown-toolbar-element/index.umd.js", + ], + )
{"golden_diff": "diff --git a/app/grandchallenge/core/widgets.py b/app/grandchallenge/core/widgets.py\n--- a/app/grandchallenge/core/widgets.py\n+++ b/app/grandchallenge/core/widgets.py\n@@ -26,23 +26,29 @@\n \n \n class MarkdownEditorWidget(MarkdownxWidget):\n- class Media(MarkdownxWidget.Media):\n- js = [\n- *MarkdownxWidget.Media.js,\n- \"vendor/js/markdown-toolbar-element/index.umd.js\",\n- ]\n+ @property\n+ def media(self):\n+ return forms.Media(\n+ js=(\n+ \"js/markdownx.js\",\n+ \"vendor/js/markdown-toolbar-element/index.umd.js\",\n+ )\n+ )\n \n \n class MarkdownEditorAdminWidget(AdminMarkdownxWidget):\n- class Media(AdminMarkdownxWidget.Media):\n- css = {\n- \"all\": [\n- *AdminMarkdownxWidget.Media.css[\"all\"],\n- \"vendor/css/base.min.css\",\n- \"vendor/fa/css/all.css\",\n- ]\n- }\n- js = [\n- *AdminMarkdownxWidget.Media.js,\n- \"vendor/js/markdown-toolbar-element/index.umd.js\",\n- ]\n+ @property\n+ def media(self):\n+ return forms.Media(\n+ css={\n+ \"all\": [\n+ *AdminMarkdownxWidget.Media.css[\"all\"],\n+ \"vendor/css/base.min.css\",\n+ \"vendor/fa/css/all.css\",\n+ ]\n+ },\n+ js=[\n+ \"js/markdownx.js\",\n+ \"vendor/js/markdown-toolbar-element/index.umd.js\",\n+ ],\n+ )\n", "issue": "Markdown preview fails CSRF validation checks\nCaused by the name change of the CSRF cookie.\n", "before_files": [{"content": "from django import forms\nfrom markdownx.widgets import AdminMarkdownxWidget, MarkdownxWidget\n\n\nclass JSONEditorWidget(forms.Textarea):\n template_name = \"jsoneditor/jsoneditor_widget.html\"\n\n def __init__(self, schema=None, attrs=None):\n super().__init__(attrs)\n self.schema = schema\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context.update({\"schema\": self.schema})\n return context\n\n class Media:\n css = {\n \"all\": (\n \"https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.css\",\n )\n }\n js = (\n \"https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.js\",\n )\n\n\nclass MarkdownEditorWidget(MarkdownxWidget):\n class Media(MarkdownxWidget.Media):\n js = [\n *MarkdownxWidget.Media.js,\n \"vendor/js/markdown-toolbar-element/index.umd.js\",\n ]\n\n\nclass MarkdownEditorAdminWidget(AdminMarkdownxWidget):\n class Media(AdminMarkdownxWidget.Media):\n css = {\n \"all\": [\n *AdminMarkdownxWidget.Media.css[\"all\"],\n \"vendor/css/base.min.css\",\n \"vendor/fa/css/all.css\",\n ]\n }\n js = [\n *AdminMarkdownxWidget.Media.js,\n \"vendor/js/markdown-toolbar-element/index.umd.js\",\n ]\n", "path": "app/grandchallenge/core/widgets.py"}], "after_files": [{"content": "from django import forms\nfrom markdownx.widgets import AdminMarkdownxWidget, MarkdownxWidget\n\n\nclass JSONEditorWidget(forms.Textarea):\n template_name = \"jsoneditor/jsoneditor_widget.html\"\n\n def __init__(self, schema=None, attrs=None):\n super().__init__(attrs)\n self.schema = schema\n\n def get_context(self, name, value, attrs):\n context = super().get_context(name, value, attrs)\n context.update({\"schema\": self.schema})\n return context\n\n class Media:\n css = {\n \"all\": (\n \"https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.css\",\n )\n }\n js = (\n \"https://cdnjs.cloudflare.com/ajax/libs/jsoneditor/5.25.0/jsoneditor.min.js\",\n )\n\n\nclass MarkdownEditorWidget(MarkdownxWidget):\n @property\n def media(self):\n return forms.Media(\n js=(\n \"js/markdownx.js\",\n \"vendor/js/markdown-toolbar-element/index.umd.js\",\n )\n )\n\n\nclass MarkdownEditorAdminWidget(AdminMarkdownxWidget):\n @property\n def media(self):\n return forms.Media(\n css={\n \"all\": [\n *AdminMarkdownxWidget.Media.css[\"all\"],\n \"vendor/css/base.min.css\",\n \"vendor/fa/css/all.css\",\n ]\n },\n js=[\n \"js/markdownx.js\",\n \"vendor/js/markdown-toolbar-element/index.umd.js\",\n ],\n )\n", "path": "app/grandchallenge/core/widgets.py"}]}
685
355
gh_patches_debug_19406
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-441
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- E3002 error thrown when using parameter lists *cfn-lint version cfn-lint 0.8.3 *Description of issue. When using SSM parameters that are type list, the linter doesn't properly recognize them as a list and thows an error E3002 Specifically E3002 Property PreferredAvailabilityZones should be of type List or Parameter should be a list for resource cacheRedisV1 us-east-1.yaml:249:7 The parameter section for this is: Parameters: azList: Type: "AWS::SSM::Parameter::Value<List<String>>" Description: "The list of AZs from Parameter Store" Default: '/regionSettings/azList' The resource is defined as: cacheForumRedisV1: Type: AWS::ElastiCache::CacheCluster Properties: PreferredAvailabilityZones: !Ref azList --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/resources/properties/Properties.py` Content: ``` 1 """ 2 Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import six 18 from cfnlint import CloudFormationLintRule 19 from cfnlint import RuleMatch 20 import cfnlint.helpers 21 22 23 class Properties(CloudFormationLintRule): 24 """Check Base Resource Configuration""" 25 id = 'E3002' 26 shortdesc = 'Resource properties are valid' 27 description = 'Making sure that resources properties ' + \ 28 'are properly configured' 29 source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#properties' 30 tags = ['resources'] 31 32 def __init__(self): 33 super(Properties, self).__init__() 34 self.cfn = {} 35 self.resourcetypes = {} 36 self.propertytypes = {} 37 self.parameternames = {} 38 39 def primitivetypecheck(self, value, primtype, proppath): 40 """ 41 Check primitive types. 42 Only check that a primitive type is actual a primitive type: 43 - If its JSON let it go 44 - If its Conditions check each sub path of the condition 45 - If its a object make sure its a valid function and function 46 - If its a list raise an error 47 48 """ 49 50 matches = [] 51 if isinstance(value, dict) and primtype == 'Json': 52 return matches 53 if isinstance(value, dict): 54 if len(value) == 1: 55 for sub_key, sub_value in value.items(): 56 if sub_key in cfnlint.helpers.CONDITION_FUNCTIONS: 57 # not erroring on bad Ifs but not need to account for it 58 # so the rule doesn't error out 59 if isinstance(sub_value, list): 60 if len(sub_value) == 3: 61 matches.extend(self.primitivetypecheck( 62 sub_value[1], primtype, proppath + ['Fn::If', 1])) 63 matches.extend(self.primitivetypecheck( 64 sub_value[2], primtype, proppath + ['Fn::If', 2])) 65 elif sub_key not in ['Fn::Base64', 'Fn::GetAtt', 'Fn::GetAZs', 'Fn::ImportValue', 66 'Fn::Join', 'Fn::Split', 'Fn::FindInMap', 'Fn::Select', 'Ref', 67 'Fn::If', 'Fn::Contains', 'Fn::Sub', 'Fn::Cidr']: 68 message = 'Property %s has an illegal function %s' % ('/'.join(map(str, proppath)), sub_key) 69 matches.append(RuleMatch(proppath, message)) 70 else: 71 message = 'Property is an object instead of %s at %s' % (primtype, '/'.join(map(str, proppath))) 72 matches.append(RuleMatch(proppath, message)) 73 elif isinstance(value, list): 74 message = 'Property should be of type %s not List at %s' % (primtype, '/'.join(map(str, proppath))) 75 matches.append(RuleMatch(proppath, message)) 76 77 return matches 78 79 def check_list_for_condition(self, text, prop, parenttype, resourcename, propspec, path): 80 """Checks lists that are a dict for conditions""" 81 matches = [] 82 if len(text[prop]) == 1: 83 for sub_key, sub_value in text[prop].items(): 84 if sub_key in cfnlint.helpers.CONDITION_FUNCTIONS: 85 if len(sub_value) == 3: 86 for if_i, if_v in enumerate(sub_value[1:]): 87 condition_path = path[:] + [sub_key, if_i + 1] 88 if isinstance(if_v, list): 89 for index, item in enumerate(if_v): 90 arrproppath = condition_path[:] 91 92 arrproppath.append(index) 93 matches.extend(self.propertycheck( 94 item, propspec['ItemType'], 95 parenttype, resourcename, arrproppath, False)) 96 elif isinstance(if_v, dict): 97 if len(if_v) == 1: 98 for d_k, d_v in if_v.items(): 99 if d_k != 'Ref' or d_v != 'AWS::NoValue': 100 message = 'Property {0} should be of type List for resource {1} at {2}' 101 matches.append( 102 RuleMatch( 103 condition_path, 104 message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path))))) 105 else: 106 message = 'Property {0} should be of type List for resource {1} at {2}' 107 matches.append( 108 RuleMatch( 109 condition_path, 110 message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path))))) 111 else: 112 message = 'Property {0} should be of type List for resource {1} at {2}' 113 matches.append( 114 RuleMatch( 115 condition_path, 116 message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path))))) 117 118 else: 119 message = 'Invalid !If condition specified at %s' % ('/'.join(map(str, path))) 120 matches.append(RuleMatch(path, message)) 121 else: 122 message = 'Property is an object instead of List at %s' % ('/'.join(map(str, path))) 123 matches.append(RuleMatch(path, message)) 124 else: 125 message = 'Property is an object instead of List at %s' % ('/'.join(map(str, path))) 126 matches.append(RuleMatch(path, message)) 127 128 return matches 129 130 def check_exceptions(self, parenttype, proptype, text): 131 """ 132 Checks for exceptions to the spec 133 - Start with handling exceptions for templated code. 134 """ 135 templated_exceptions = { 136 'AWS::ApiGateway::RestApi': ['BodyS3Location'], 137 'AWS::Lambda::Function': ['Code'], 138 'AWS::ElasticBeanstalk::ApplicationVersion': ['SourceBundle'], 139 } 140 141 exceptions = templated_exceptions.get(parenttype, []) 142 if proptype in exceptions: 143 if isinstance(text, six.string_types): 144 return True 145 146 return False 147 148 def propertycheck(self, text, proptype, parenttype, resourcename, path, root): 149 """Check individual properties""" 150 151 parameternames = self.parameternames 152 matches = [] 153 if root: 154 specs = self.resourcetypes 155 resourcetype = parenttype 156 else: 157 specs = self.propertytypes 158 resourcetype = str.format('{0}.{1}', parenttype, proptype) 159 # Handle tags 160 if resourcetype not in specs: 161 if proptype in specs: 162 resourcetype = proptype 163 else: 164 resourcetype = str.format('{0}.{1}', parenttype, proptype) 165 else: 166 resourcetype = str.format('{0}.{1}', parenttype, proptype) 167 168 resourcespec = specs[resourcetype].get('Properties', {}) 169 supports_additional_properties = specs[resourcetype].get('AdditionalProperties', False) 170 171 if text == 'AWS::NoValue': 172 return matches 173 if not isinstance(text, dict): 174 if not self.check_exceptions(parenttype, proptype, text): 175 message = 'Expecting an object at %s' % ('/'.join(map(str, path))) 176 matches.append(RuleMatch(path, message)) 177 return matches 178 179 for prop in text: 180 proppath = path[:] 181 proppath.append(prop) 182 if prop not in resourcespec: 183 if prop in cfnlint.helpers.CONDITION_FUNCTIONS: 184 cond_values = self.cfn.get_condition_values(text[prop]) 185 for cond_value in cond_values: 186 matches.extend(self.propertycheck( 187 cond_value['Value'], proptype, parenttype, resourcename, 188 proppath + cond_value['Path'], root)) 189 elif not supports_additional_properties: 190 message = 'Invalid Property %s' % ('/'.join(map(str, proppath))) 191 matches.append(RuleMatch(proppath, message)) 192 else: 193 if 'Type' in resourcespec[prop]: 194 if resourcespec[prop]['Type'] == 'List': 195 if 'PrimitiveItemType' not in resourcespec[prop]: 196 if isinstance(text[prop], list): 197 for index, item in enumerate(text[prop]): 198 arrproppath = proppath[:] 199 arrproppath.append(index) 200 matches.extend(self.propertycheck( 201 item, resourcespec[prop]['ItemType'], 202 parenttype, resourcename, arrproppath, False)) 203 elif (isinstance(text[prop], dict)): 204 # A list can be be specific as a Conditional 205 matches.extend( 206 self.check_list_for_condition( 207 text, prop, parenttype, resourcename, resourcespec[prop], proppath) 208 ) 209 else: 210 message = 'Property {0} should be of type List for resource {1}' 211 matches.append( 212 RuleMatch( 213 proppath, 214 message.format(prop, resourcename))) 215 else: 216 if isinstance(text[prop], list): 217 primtype = resourcespec[prop]['PrimitiveItemType'] 218 for index, item in enumerate(text[prop]): 219 arrproppath = proppath[:] 220 arrproppath.append(index) 221 matches.extend(self.primitivetypecheck(item, primtype, arrproppath)) 222 elif isinstance(text[prop], dict): 223 if 'Ref' in text[prop]: 224 ref = text[prop]['Ref'] 225 if ref in parameternames: 226 param_type = self.cfn.template['Parameters'][ref]['Type'] 227 if param_type: 228 if not param_type.startswith('List<') and not param_type == 'CommaDelimitedList': 229 message = 'Property {0} should be of type List or Parameter should ' \ 230 'be a list for resource {1}' 231 matches.append( 232 RuleMatch( 233 proppath, 234 message.format(prop, resourcename))) 235 else: 236 message = 'Property {0} should be of type List for resource {1}' 237 matches.append( 238 RuleMatch( 239 proppath, 240 message.format(prop, resourcename))) 241 else: 242 message = 'Property {0} should be of type List for resource {1}' 243 matches.append( 244 RuleMatch( 245 proppath, 246 message.format(prop, resourcename))) 247 else: 248 if resourcespec[prop]['Type'] not in ['Map']: 249 matches.extend(self.propertycheck( 250 text[prop], resourcespec[prop]['Type'], parenttype, 251 resourcename, proppath, False)) 252 elif 'PrimitiveType' in resourcespec[prop]: 253 primtype = resourcespec[prop]['PrimitiveType'] 254 matches.extend(self.primitivetypecheck(text[prop], primtype, proppath)) 255 256 return matches 257 258 def match(self, cfn): 259 """Check CloudFormation Properties""" 260 matches = [] 261 self.cfn = cfn 262 263 resourcespecs = cfnlint.helpers.RESOURCE_SPECS[cfn.regions[0]] 264 self.resourcetypes = resourcespecs['ResourceTypes'] 265 self.propertytypes = resourcespecs['PropertyTypes'] 266 self.parameternames = self.cfn.get_parameter_names() 267 for resourcename, resourcevalue in cfn.get_resources().items(): 268 if 'Properties' in resourcevalue and 'Type' in resourcevalue: 269 resourcetype = resourcevalue.get('Type', None) 270 if resourcetype.startswith('Custom::'): 271 resourcetype = 'AWS::CloudFormation::CustomResource' 272 if resourcetype in self.resourcetypes: 273 path = ['Resources', resourcename, 'Properties'] 274 matches.extend(self.propertycheck( 275 resourcevalue.get('Properties', {}), '', 276 resourcetype, resourcename, path, True)) 277 278 return matches 279 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/resources/properties/Properties.py b/src/cfnlint/rules/resources/properties/Properties.py --- a/src/cfnlint/rules/resources/properties/Properties.py +++ b/src/cfnlint/rules/resources/properties/Properties.py @@ -225,7 +225,7 @@ if ref in parameternames: param_type = self.cfn.template['Parameters'][ref]['Type'] if param_type: - if not param_type.startswith('List<') and not param_type == 'CommaDelimitedList': + if 'List<' not in param_type and '<List' not in param_type and not param_type == 'CommaDelimitedList': message = 'Property {0} should be of type List or Parameter should ' \ 'be a list for resource {1}' matches.append(
{"golden_diff": "diff --git a/src/cfnlint/rules/resources/properties/Properties.py b/src/cfnlint/rules/resources/properties/Properties.py\n--- a/src/cfnlint/rules/resources/properties/Properties.py\n+++ b/src/cfnlint/rules/resources/properties/Properties.py\n@@ -225,7 +225,7 @@\n if ref in parameternames:\n param_type = self.cfn.template['Parameters'][ref]['Type']\n if param_type:\n- if not param_type.startswith('List<') and not param_type == 'CommaDelimitedList':\n+ if 'List<' not in param_type and '<List' not in param_type and not param_type == 'CommaDelimitedList':\n message = 'Property {0} should be of type List or Parameter should ' \\\n 'be a list for resource {1}'\n matches.append(\n", "issue": "E3002 error thrown when using parameter lists\n*cfn-lint version cfn-lint 0.8.3\r\n\r\n*Description of issue. When using SSM parameters that are type list, the linter doesn't properly recognize them as a list and thows an error E3002 \r\n\r\nSpecifically E3002 Property PreferredAvailabilityZones should be of type List or Parameter should be a list for resource cacheRedisV1\r\nus-east-1.yaml:249:7\r\n\r\nThe parameter section for this is:\r\nParameters:\r\n azList:\r\n Type: \"AWS::SSM::Parameter::Value<List<String>>\"\r\n Description: \"The list of AZs from Parameter Store\"\r\n Default: '/regionSettings/azList'\r\n\r\nThe resource is defined as:\r\ncacheForumRedisV1:\r\n Type: AWS::ElastiCache::CacheCluster\r\n Properties:\r\n PreferredAvailabilityZones: !Ref azList\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass Properties(CloudFormationLintRule):\n \"\"\"Check Base Resource Configuration\"\"\"\n id = 'E3002'\n shortdesc = 'Resource properties are valid'\n description = 'Making sure that resources properties ' + \\\n 'are properly configured'\n source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#properties'\n tags = ['resources']\n\n def __init__(self):\n super(Properties, self).__init__()\n self.cfn = {}\n self.resourcetypes = {}\n self.propertytypes = {}\n self.parameternames = {}\n\n def primitivetypecheck(self, value, primtype, proppath):\n \"\"\"\n Check primitive types.\n Only check that a primitive type is actual a primitive type:\n - If its JSON let it go\n - If its Conditions check each sub path of the condition\n - If its a object make sure its a valid function and function\n - If its a list raise an error\n\n \"\"\"\n\n matches = []\n if isinstance(value, dict) and primtype == 'Json':\n return matches\n if isinstance(value, dict):\n if len(value) == 1:\n for sub_key, sub_value in value.items():\n if sub_key in cfnlint.helpers.CONDITION_FUNCTIONS:\n # not erroring on bad Ifs but not need to account for it\n # so the rule doesn't error out\n if isinstance(sub_value, list):\n if len(sub_value) == 3:\n matches.extend(self.primitivetypecheck(\n sub_value[1], primtype, proppath + ['Fn::If', 1]))\n matches.extend(self.primitivetypecheck(\n sub_value[2], primtype, proppath + ['Fn::If', 2]))\n elif sub_key not in ['Fn::Base64', 'Fn::GetAtt', 'Fn::GetAZs', 'Fn::ImportValue',\n 'Fn::Join', 'Fn::Split', 'Fn::FindInMap', 'Fn::Select', 'Ref',\n 'Fn::If', 'Fn::Contains', 'Fn::Sub', 'Fn::Cidr']:\n message = 'Property %s has an illegal function %s' % ('/'.join(map(str, proppath)), sub_key)\n matches.append(RuleMatch(proppath, message))\n else:\n message = 'Property is an object instead of %s at %s' % (primtype, '/'.join(map(str, proppath)))\n matches.append(RuleMatch(proppath, message))\n elif isinstance(value, list):\n message = 'Property should be of type %s not List at %s' % (primtype, '/'.join(map(str, proppath)))\n matches.append(RuleMatch(proppath, message))\n\n return matches\n\n def check_list_for_condition(self, text, prop, parenttype, resourcename, propspec, path):\n \"\"\"Checks lists that are a dict for conditions\"\"\"\n matches = []\n if len(text[prop]) == 1:\n for sub_key, sub_value in text[prop].items():\n if sub_key in cfnlint.helpers.CONDITION_FUNCTIONS:\n if len(sub_value) == 3:\n for if_i, if_v in enumerate(sub_value[1:]):\n condition_path = path[:] + [sub_key, if_i + 1]\n if isinstance(if_v, list):\n for index, item in enumerate(if_v):\n arrproppath = condition_path[:]\n\n arrproppath.append(index)\n matches.extend(self.propertycheck(\n item, propspec['ItemType'],\n parenttype, resourcename, arrproppath, False))\n elif isinstance(if_v, dict):\n if len(if_v) == 1:\n for d_k, d_v in if_v.items():\n if d_k != 'Ref' or d_v != 'AWS::NoValue':\n message = 'Property {0} should be of type List for resource {1} at {2}'\n matches.append(\n RuleMatch(\n condition_path,\n message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path)))))\n else:\n message = 'Property {0} should be of type List for resource {1} at {2}'\n matches.append(\n RuleMatch(\n condition_path,\n message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path)))))\n else:\n message = 'Property {0} should be of type List for resource {1} at {2}'\n matches.append(\n RuleMatch(\n condition_path,\n message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path)))))\n\n else:\n message = 'Invalid !If condition specified at %s' % ('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n else:\n message = 'Property is an object instead of List at %s' % ('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n else:\n message = 'Property is an object instead of List at %s' % ('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def check_exceptions(self, parenttype, proptype, text):\n \"\"\"\n Checks for exceptions to the spec\n - Start with handling exceptions for templated code.\n \"\"\"\n templated_exceptions = {\n 'AWS::ApiGateway::RestApi': ['BodyS3Location'],\n 'AWS::Lambda::Function': ['Code'],\n 'AWS::ElasticBeanstalk::ApplicationVersion': ['SourceBundle'],\n }\n\n exceptions = templated_exceptions.get(parenttype, [])\n if proptype in exceptions:\n if isinstance(text, six.string_types):\n return True\n\n return False\n\n def propertycheck(self, text, proptype, parenttype, resourcename, path, root):\n \"\"\"Check individual properties\"\"\"\n\n parameternames = self.parameternames\n matches = []\n if root:\n specs = self.resourcetypes\n resourcetype = parenttype\n else:\n specs = self.propertytypes\n resourcetype = str.format('{0}.{1}', parenttype, proptype)\n # Handle tags\n if resourcetype not in specs:\n if proptype in specs:\n resourcetype = proptype\n else:\n resourcetype = str.format('{0}.{1}', parenttype, proptype)\n else:\n resourcetype = str.format('{0}.{1}', parenttype, proptype)\n\n resourcespec = specs[resourcetype].get('Properties', {})\n supports_additional_properties = specs[resourcetype].get('AdditionalProperties', False)\n\n if text == 'AWS::NoValue':\n return matches\n if not isinstance(text, dict):\n if not self.check_exceptions(parenttype, proptype, text):\n message = 'Expecting an object at %s' % ('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n return matches\n\n for prop in text:\n proppath = path[:]\n proppath.append(prop)\n if prop not in resourcespec:\n if prop in cfnlint.helpers.CONDITION_FUNCTIONS:\n cond_values = self.cfn.get_condition_values(text[prop])\n for cond_value in cond_values:\n matches.extend(self.propertycheck(\n cond_value['Value'], proptype, parenttype, resourcename,\n proppath + cond_value['Path'], root))\n elif not supports_additional_properties:\n message = 'Invalid Property %s' % ('/'.join(map(str, proppath)))\n matches.append(RuleMatch(proppath, message))\n else:\n if 'Type' in resourcespec[prop]:\n if resourcespec[prop]['Type'] == 'List':\n if 'PrimitiveItemType' not in resourcespec[prop]:\n if isinstance(text[prop], list):\n for index, item in enumerate(text[prop]):\n arrproppath = proppath[:]\n arrproppath.append(index)\n matches.extend(self.propertycheck(\n item, resourcespec[prop]['ItemType'],\n parenttype, resourcename, arrproppath, False))\n elif (isinstance(text[prop], dict)):\n # A list can be be specific as a Conditional\n matches.extend(\n self.check_list_for_condition(\n text, prop, parenttype, resourcename, resourcespec[prop], proppath)\n )\n else:\n message = 'Property {0} should be of type List for resource {1}'\n matches.append(\n RuleMatch(\n proppath,\n message.format(prop, resourcename)))\n else:\n if isinstance(text[prop], list):\n primtype = resourcespec[prop]['PrimitiveItemType']\n for index, item in enumerate(text[prop]):\n arrproppath = proppath[:]\n arrproppath.append(index)\n matches.extend(self.primitivetypecheck(item, primtype, arrproppath))\n elif isinstance(text[prop], dict):\n if 'Ref' in text[prop]:\n ref = text[prop]['Ref']\n if ref in parameternames:\n param_type = self.cfn.template['Parameters'][ref]['Type']\n if param_type:\n if not param_type.startswith('List<') and not param_type == 'CommaDelimitedList':\n message = 'Property {0} should be of type List or Parameter should ' \\\n 'be a list for resource {1}'\n matches.append(\n RuleMatch(\n proppath,\n message.format(prop, resourcename)))\n else:\n message = 'Property {0} should be of type List for resource {1}'\n matches.append(\n RuleMatch(\n proppath,\n message.format(prop, resourcename)))\n else:\n message = 'Property {0} should be of type List for resource {1}'\n matches.append(\n RuleMatch(\n proppath,\n message.format(prop, resourcename)))\n else:\n if resourcespec[prop]['Type'] not in ['Map']:\n matches.extend(self.propertycheck(\n text[prop], resourcespec[prop]['Type'], parenttype,\n resourcename, proppath, False))\n elif 'PrimitiveType' in resourcespec[prop]:\n primtype = resourcespec[prop]['PrimitiveType']\n matches.extend(self.primitivetypecheck(text[prop], primtype, proppath))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n self.cfn = cfn\n\n resourcespecs = cfnlint.helpers.RESOURCE_SPECS[cfn.regions[0]]\n self.resourcetypes = resourcespecs['ResourceTypes']\n self.propertytypes = resourcespecs['PropertyTypes']\n self.parameternames = self.cfn.get_parameter_names()\n for resourcename, resourcevalue in cfn.get_resources().items():\n if 'Properties' in resourcevalue and 'Type' in resourcevalue:\n resourcetype = resourcevalue.get('Type', None)\n if resourcetype.startswith('Custom::'):\n resourcetype = 'AWS::CloudFormation::CustomResource'\n if resourcetype in self.resourcetypes:\n path = ['Resources', resourcename, 'Properties']\n matches.extend(self.propertycheck(\n resourcevalue.get('Properties', {}), '',\n resourcetype, resourcename, path, True))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/Properties.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass Properties(CloudFormationLintRule):\n \"\"\"Check Base Resource Configuration\"\"\"\n id = 'E3002'\n shortdesc = 'Resource properties are valid'\n description = 'Making sure that resources properties ' + \\\n 'are properly configured'\n source_url = 'https://github.com/awslabs/cfn-python-lint/blob/master/docs/cfn-resource-specification.md#properties'\n tags = ['resources']\n\n def __init__(self):\n super(Properties, self).__init__()\n self.cfn = {}\n self.resourcetypes = {}\n self.propertytypes = {}\n self.parameternames = {}\n\n def primitivetypecheck(self, value, primtype, proppath):\n \"\"\"\n Check primitive types.\n Only check that a primitive type is actual a primitive type:\n - If its JSON let it go\n - If its Conditions check each sub path of the condition\n - If its a object make sure its a valid function and function\n - If its a list raise an error\n\n \"\"\"\n\n matches = []\n if isinstance(value, dict) and primtype == 'Json':\n return matches\n if isinstance(value, dict):\n if len(value) == 1:\n for sub_key, sub_value in value.items():\n if sub_key in cfnlint.helpers.CONDITION_FUNCTIONS:\n # not erroring on bad Ifs but not need to account for it\n # so the rule doesn't error out\n if isinstance(sub_value, list):\n if len(sub_value) == 3:\n matches.extend(self.primitivetypecheck(\n sub_value[1], primtype, proppath + ['Fn::If', 1]))\n matches.extend(self.primitivetypecheck(\n sub_value[2], primtype, proppath + ['Fn::If', 2]))\n elif sub_key not in ['Fn::Base64', 'Fn::GetAtt', 'Fn::GetAZs', 'Fn::ImportValue',\n 'Fn::Join', 'Fn::Split', 'Fn::FindInMap', 'Fn::Select', 'Ref',\n 'Fn::If', 'Fn::Contains', 'Fn::Sub', 'Fn::Cidr']:\n message = 'Property %s has an illegal function %s' % ('/'.join(map(str, proppath)), sub_key)\n matches.append(RuleMatch(proppath, message))\n else:\n message = 'Property is an object instead of %s at %s' % (primtype, '/'.join(map(str, proppath)))\n matches.append(RuleMatch(proppath, message))\n elif isinstance(value, list):\n message = 'Property should be of type %s not List at %s' % (primtype, '/'.join(map(str, proppath)))\n matches.append(RuleMatch(proppath, message))\n\n return matches\n\n def check_list_for_condition(self, text, prop, parenttype, resourcename, propspec, path):\n \"\"\"Checks lists that are a dict for conditions\"\"\"\n matches = []\n if len(text[prop]) == 1:\n for sub_key, sub_value in text[prop].items():\n if sub_key in cfnlint.helpers.CONDITION_FUNCTIONS:\n if len(sub_value) == 3:\n for if_i, if_v in enumerate(sub_value[1:]):\n condition_path = path[:] + [sub_key, if_i + 1]\n if isinstance(if_v, list):\n for index, item in enumerate(if_v):\n arrproppath = condition_path[:]\n\n arrproppath.append(index)\n matches.extend(self.propertycheck(\n item, propspec['ItemType'],\n parenttype, resourcename, arrproppath, False))\n elif isinstance(if_v, dict):\n if len(if_v) == 1:\n for d_k, d_v in if_v.items():\n if d_k != 'Ref' or d_v != 'AWS::NoValue':\n message = 'Property {0} should be of type List for resource {1} at {2}'\n matches.append(\n RuleMatch(\n condition_path,\n message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path)))))\n else:\n message = 'Property {0} should be of type List for resource {1} at {2}'\n matches.append(\n RuleMatch(\n condition_path,\n message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path)))))\n else:\n message = 'Property {0} should be of type List for resource {1} at {2}'\n matches.append(\n RuleMatch(\n condition_path,\n message.format(prop, resourcename, ('/'.join(str(x) for x in condition_path)))))\n\n else:\n message = 'Invalid !If condition specified at %s' % ('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n else:\n message = 'Property is an object instead of List at %s' % ('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n else:\n message = 'Property is an object instead of List at %s' % ('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n\n return matches\n\n def check_exceptions(self, parenttype, proptype, text):\n \"\"\"\n Checks for exceptions to the spec\n - Start with handling exceptions for templated code.\n \"\"\"\n templated_exceptions = {\n 'AWS::ApiGateway::RestApi': ['BodyS3Location'],\n 'AWS::Lambda::Function': ['Code'],\n 'AWS::ElasticBeanstalk::ApplicationVersion': ['SourceBundle'],\n }\n\n exceptions = templated_exceptions.get(parenttype, [])\n if proptype in exceptions:\n if isinstance(text, six.string_types):\n return True\n\n return False\n\n def propertycheck(self, text, proptype, parenttype, resourcename, path, root):\n \"\"\"Check individual properties\"\"\"\n\n parameternames = self.parameternames\n matches = []\n if root:\n specs = self.resourcetypes\n resourcetype = parenttype\n else:\n specs = self.propertytypes\n resourcetype = str.format('{0}.{1}', parenttype, proptype)\n # Handle tags\n if resourcetype not in specs:\n if proptype in specs:\n resourcetype = proptype\n else:\n resourcetype = str.format('{0}.{1}', parenttype, proptype)\n else:\n resourcetype = str.format('{0}.{1}', parenttype, proptype)\n\n resourcespec = specs[resourcetype].get('Properties', {})\n supports_additional_properties = specs[resourcetype].get('AdditionalProperties', False)\n\n if text == 'AWS::NoValue':\n return matches\n if not isinstance(text, dict):\n if not self.check_exceptions(parenttype, proptype, text):\n message = 'Expecting an object at %s' % ('/'.join(map(str, path)))\n matches.append(RuleMatch(path, message))\n return matches\n\n for prop in text:\n proppath = path[:]\n proppath.append(prop)\n if prop not in resourcespec:\n if prop in cfnlint.helpers.CONDITION_FUNCTIONS:\n cond_values = self.cfn.get_condition_values(text[prop])\n for cond_value in cond_values:\n matches.extend(self.propertycheck(\n cond_value['Value'], proptype, parenttype, resourcename,\n proppath + cond_value['Path'], root))\n elif not supports_additional_properties:\n message = 'Invalid Property %s' % ('/'.join(map(str, proppath)))\n matches.append(RuleMatch(proppath, message))\n else:\n if 'Type' in resourcespec[prop]:\n if resourcespec[prop]['Type'] == 'List':\n if 'PrimitiveItemType' not in resourcespec[prop]:\n if isinstance(text[prop], list):\n for index, item in enumerate(text[prop]):\n arrproppath = proppath[:]\n arrproppath.append(index)\n matches.extend(self.propertycheck(\n item, resourcespec[prop]['ItemType'],\n parenttype, resourcename, arrproppath, False))\n elif (isinstance(text[prop], dict)):\n # A list can be be specific as a Conditional\n matches.extend(\n self.check_list_for_condition(\n text, prop, parenttype, resourcename, resourcespec[prop], proppath)\n )\n else:\n message = 'Property {0} should be of type List for resource {1}'\n matches.append(\n RuleMatch(\n proppath,\n message.format(prop, resourcename)))\n else:\n if isinstance(text[prop], list):\n primtype = resourcespec[prop]['PrimitiveItemType']\n for index, item in enumerate(text[prop]):\n arrproppath = proppath[:]\n arrproppath.append(index)\n matches.extend(self.primitivetypecheck(item, primtype, arrproppath))\n elif isinstance(text[prop], dict):\n if 'Ref' in text[prop]:\n ref = text[prop]['Ref']\n if ref in parameternames:\n param_type = self.cfn.template['Parameters'][ref]['Type']\n if param_type:\n if 'List<' not in param_type and '<List' not in param_type and not param_type == 'CommaDelimitedList':\n message = 'Property {0} should be of type List or Parameter should ' \\\n 'be a list for resource {1}'\n matches.append(\n RuleMatch(\n proppath,\n message.format(prop, resourcename)))\n else:\n message = 'Property {0} should be of type List for resource {1}'\n matches.append(\n RuleMatch(\n proppath,\n message.format(prop, resourcename)))\n else:\n message = 'Property {0} should be of type List for resource {1}'\n matches.append(\n RuleMatch(\n proppath,\n message.format(prop, resourcename)))\n else:\n if resourcespec[prop]['Type'] not in ['Map']:\n matches.extend(self.propertycheck(\n text[prop], resourcespec[prop]['Type'], parenttype,\n resourcename, proppath, False))\n elif 'PrimitiveType' in resourcespec[prop]:\n primtype = resourcespec[prop]['PrimitiveType']\n matches.extend(self.primitivetypecheck(text[prop], primtype, proppath))\n\n return matches\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Properties\"\"\"\n matches = []\n self.cfn = cfn\n\n resourcespecs = cfnlint.helpers.RESOURCE_SPECS[cfn.regions[0]]\n self.resourcetypes = resourcespecs['ResourceTypes']\n self.propertytypes = resourcespecs['PropertyTypes']\n self.parameternames = self.cfn.get_parameter_names()\n for resourcename, resourcevalue in cfn.get_resources().items():\n if 'Properties' in resourcevalue and 'Type' in resourcevalue:\n resourcetype = resourcevalue.get('Type', None)\n if resourcetype.startswith('Custom::'):\n resourcetype = 'AWS::CloudFormation::CustomResource'\n if resourcetype in self.resourcetypes:\n path = ['Resources', resourcename, 'Properties']\n matches.extend(self.propertycheck(\n resourcevalue.get('Properties', {}), '',\n resourcetype, resourcename, path, True))\n\n return matches\n", "path": "src/cfnlint/rules/resources/properties/Properties.py"}]}
3,918
184
gh_patches_debug_17287
rasdani/github-patches
git_diff
streamlit__streamlit-999
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- PyDeck warning Mapbox API key not set # Summary Migrated a deck_gl chart to PyDeck API. When page is run get a UserWarning that the Mapbox API key is not set. Old deck_gl_chart() function does not display the warning. # Steps to reproduce 1: Get a personal mapbox token, Verify it is set using streamlit config show [mapbox] ``` # Configure Streamlit to use a custom Mapbox token for elements like st.deck_gl_chart and st.map. If you don't do this you'll be using Streamlit's own token, which has limitations and is not guaranteed to always work. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels) # Default: "pk.eyJ1IjoidGhpYWdvdCIsImEiOiJjamh3bm85NnkwMng4M3dydnNveWwzeWNzIn0.vCBDzNsEF2uFSFk2AM0WZQ" # The value below was set in C:\Users\...path...\.streamlit\config.toml token = "pk.eyJ1Ijoia25......................." ``` 2. Run a PyDeck chart, any demo should do 3. Inspect the output from streamlit run app.py in the shell ## Expected behavior: map displays, no message in shell ## Actual behavior: Map displays, shell displays a UserWarning ``` You can now view your Streamlit app in your browser. URL: http://localhost:8501 c:\apps\anaconda3\envs\ccadash\lib\site-packages\pydeck\bindings\deck.py:82: UserWarning: Mapbox API key is not set. This may impact available features of pydeck. UserWarning, ``` ## Is this a regression? That is, did this use to work the way you expected in the past? yes # Debug info - Streamlit version:0.53.0 - Python version: 3.7.3 - Using Conda? PipEnv? PyEnv? Pex? Conda - OS version: Windows 10 - Browser version: Chrome Version 79.0.3945.117 (Official Build) (64-bit) # Additional information If needed, add any other context about the problem here. For exmaple, did this bug come from https://discuss.streamlit.io or another site? Link the original source here! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/streamlit/bootstrap.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # Copyright 2018-2020 Streamlit Inc. 3 # 4 # Licensed under the Apache License, Version 2.0 (the "License"); 5 # you may not use this file except in compliance with the License. 6 # You may obtain a copy of the License at 7 # 8 # http://www.apache.org/licenses/LICENSE-2.0 9 # 10 # Unless required by applicable law or agreed to in writing, software 11 # distributed under the License is distributed on an "AS IS" BASIS, 12 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 # See the License for the specific language governing permissions and 14 # limitations under the License. 15 16 import os 17 import signal 18 import sys 19 20 import click 21 import tornado.ioloop 22 23 from streamlit import config 24 from streamlit import net_util 25 from streamlit import url_util 26 from streamlit import env_util 27 from streamlit import util 28 from streamlit.Report import Report 29 from streamlit.logger import get_logger 30 from streamlit.server.Server import Server 31 32 LOGGER = get_logger(__name__) 33 34 # Wait for 1 second before opening a browser. This gives old tabs a chance to 35 # reconnect. 36 # This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS. 37 BROWSER_WAIT_TIMEOUT_SEC = 1 38 39 40 def _set_up_signal_handler(): 41 LOGGER.debug("Setting up signal handler") 42 43 def signal_handler(signal_number, stack_frame): 44 # The server will shut down its threads and stop the ioloop 45 Server.get_current().stop() 46 47 signal.signal(signal.SIGTERM, signal_handler) 48 signal.signal(signal.SIGINT, signal_handler) 49 if sys.platform == "win32": 50 signal.signal(signal.SIGBREAK, signal_handler) 51 else: 52 signal.signal(signal.SIGQUIT, signal_handler) 53 54 55 def _fix_sys_path(script_path): 56 """Add the script's folder to the sys path. 57 58 Python normally does this automatically, but since we exec the script 59 ourselves we need to do it instead. 60 """ 61 sys.path.insert(0, os.path.dirname(script_path)) 62 63 64 def _fix_matplotlib_crash(): 65 """Set Matplotlib backend to avoid a crash. 66 67 The default Matplotlib backend crashes Python on OSX when run on a thread 68 that's not the main thread, so here we set a safer backend as a fix. 69 Users can always disable this behavior by setting the config 70 runner.fixMatplotlib = false. 71 72 This fix is OS-independent. We didn't see a good reason to make this 73 Mac-only. Consistency within Streamlit seemed more important. 74 """ 75 if config.get_option("runner.fixMatplotlib"): 76 try: 77 # TODO: a better option may be to set 78 # os.environ["MPLBACKEND"] = "Agg". We'd need to do this towards 79 # the top of __init__.py, before importing anything that imports 80 # pandas (which imports matplotlib). Alternately, we could set 81 # this environment variable in a new entrypoint defined in 82 # setup.py. Both of these introduce additional trickiness: they 83 # need to run without consulting streamlit.config.get_option, 84 # because this would import streamlit, and therefore matplotlib. 85 import matplotlib 86 87 matplotlib.use("Agg") 88 except ImportError: 89 pass 90 91 92 def _fix_tornado_crash(): 93 """Set default asyncio policy to be compatible with Tornado 6. 94 95 Tornado 6 (at least) is not compatible with the default 96 asyncio implementation on Windows. So here we 97 pick the older SelectorEventLoopPolicy when the OS is Windows 98 if the known-incompatible default policy is in use. 99 100 This has to happen as early as possible to make it a low priority and 101 overrideable 102 103 See: https://github.com/tornadoweb/tornado/issues/2608 104 105 FIXME: if/when tornado supports the defaults in asyncio, 106 remove and bump tornado requirement for py38 107 """ 108 if env_util.IS_WINDOWS and sys.version_info >= (3, 8): 109 import asyncio 110 111 try: 112 from asyncio import ( 113 WindowsProactorEventLoopPolicy, 114 WindowsSelectorEventLoopPolicy, 115 ) 116 except ImportError: 117 pass 118 # Not affected 119 else: 120 if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy: 121 # WindowsProactorEventLoopPolicy is not compatible with 122 # Tornado 6 fallback to the pre-3.8 default of Selector 123 asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy()) 124 125 126 def _fix_sys_argv(script_path, args): 127 """sys.argv needs to exclude streamlit arguments and parameters 128 and be set to what a user's script may expect. 129 """ 130 import sys 131 132 sys.argv = [script_path] + list(args) 133 134 135 def _on_server_start(server): 136 _print_url() 137 138 def maybe_open_browser(): 139 if config.get_option("server.headless"): 140 # Don't open browser when in headless mode. 141 return 142 143 if server.browser_is_connected: 144 # Don't auto-open browser if there's already a browser connected. 145 # This can happen if there's an old tab repeatedly trying to 146 # connect, and it happens to success before we launch the browser. 147 return 148 149 if config.is_manually_set("browser.serverAddress"): 150 addr = config.get_option("browser.serverAddress") 151 else: 152 addr = "localhost" 153 154 util.open_browser(Report.get_url(addr)) 155 156 # Schedule the browser to open using the IO Loop on the main thread, but 157 # only if no other browser connects within 1s. 158 ioloop = tornado.ioloop.IOLoop.current() 159 ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser) 160 161 162 def _print_url(): 163 title_message = "You can now view your Streamlit app in your browser." 164 named_urls = [] 165 166 if config.is_manually_set("browser.serverAddress"): 167 named_urls = [ 168 ("URL", Report.get_url(config.get_option("browser.serverAddress"))) 169 ] 170 171 elif config.get_option("server.headless"): 172 named_urls = [ 173 ("Network URL", Report.get_url(net_util.get_internal_ip())), 174 ("External URL", Report.get_url(net_util.get_external_ip())), 175 ] 176 177 else: 178 named_urls = [ 179 ("Local URL", Report.get_url("localhost")), 180 ("Network URL", Report.get_url(net_util.get_internal_ip())), 181 ] 182 183 click.secho("") 184 click.secho(" %s" % title_message, fg="blue", bold=True) 185 click.secho("") 186 187 for url_name, url in named_urls: 188 url_util.print_url(url_name, url) 189 190 click.secho("") 191 192 193 def run(script_path, command_line, args): 194 """Run a script in a separate thread and start a server for the app. 195 196 This starts a blocking ioloop. 197 198 Parameters 199 ---------- 200 script_path : str 201 command_line : str 202 args : [str] 203 204 """ 205 _fix_sys_path(script_path) 206 _fix_matplotlib_crash() 207 _fix_tornado_crash() 208 _fix_sys_argv(script_path, args) 209 210 # Install a signal handler that will shut down the ioloop 211 # and close all our threads 212 _set_up_signal_handler() 213 214 ioloop = tornado.ioloop.IOLoop.current() 215 216 # Create and start the server. 217 server = Server(ioloop, script_path, command_line) 218 server.start(_on_server_start) 219 220 # (Must com after start(), because this starts a new thread and start() may 221 # call sys.exit() which doesn't kill other threads. 222 server.add_preheated_report_session() 223 224 # Start the ioloop. This function will not return until the 225 # server is shut down. 226 ioloop.start() 227 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/streamlit/bootstrap.py b/lib/streamlit/bootstrap.py --- a/lib/streamlit/bootstrap.py +++ b/lib/streamlit/bootstrap.py @@ -159,6 +159,12 @@ ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser) +def _fix_pydeck_mapbox_api_warning(): + """Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception""" + + os.environ["MAPBOX_API_KEY"] = config.get_option("mapbox.token") + + def _print_url(): title_message = "You can now view your Streamlit app in your browser." named_urls = [] @@ -206,6 +212,7 @@ _fix_matplotlib_crash() _fix_tornado_crash() _fix_sys_argv(script_path, args) + _fix_pydeck_mapbox_api_warning() # Install a signal handler that will shut down the ioloop # and close all our threads
{"golden_diff": "diff --git a/lib/streamlit/bootstrap.py b/lib/streamlit/bootstrap.py\n--- a/lib/streamlit/bootstrap.py\n+++ b/lib/streamlit/bootstrap.py\n@@ -159,6 +159,12 @@\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n \n \n+def _fix_pydeck_mapbox_api_warning():\n+ \"\"\"Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception\"\"\"\n+\n+ os.environ[\"MAPBOX_API_KEY\"] = config.get_option(\"mapbox.token\")\n+\n+\n def _print_url():\n title_message = \"You can now view your Streamlit app in your browser.\"\n named_urls = []\n@@ -206,6 +212,7 @@\n _fix_matplotlib_crash()\n _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n+ _fix_pydeck_mapbox_api_warning()\n \n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n", "issue": "PyDeck warning Mapbox API key not set\n# Summary\r\n\r\nMigrated a deck_gl chart to PyDeck API. When page is run get a UserWarning that the Mapbox API key is not set. Old deck_gl_chart() function does not display the warning. \r\n\r\n# Steps to reproduce\r\n\r\n1: Get a personal mapbox token, Verify it is set using streamlit config show\r\n\r\n[mapbox]\r\n\r\n```\r\n# Configure Streamlit to use a custom Mapbox token for elements like st.deck_gl_chart and st.map. If you don't do this you'll be using Streamlit's own token, which has limitations and is not guaranteed to always work. To get a token for yourself, create an account at https://mapbox.com. It's free! (for moderate usage levels)\r\n# Default: \"pk.eyJ1IjoidGhpYWdvdCIsImEiOiJjamh3bm85NnkwMng4M3dydnNveWwzeWNzIn0.vCBDzNsEF2uFSFk2AM0WZQ\"\r\n# The value below was set in C:\\Users\\...path...\\.streamlit\\config.toml\r\ntoken = \"pk.eyJ1Ijoia25.......................\"\r\n```\r\n\r\n2. Run a PyDeck chart, any demo should do\r\n3. Inspect the output from streamlit run app.py in the shell\r\n\r\n## Expected behavior:\r\n\r\nmap displays, no message in shell\r\n\r\n## Actual behavior:\r\n\r\nMap displays, shell displays a UserWarning\r\n\r\n```\r\nYou can now view your Streamlit app in your browser.\r\n\r\n URL: http://localhost:8501\r\n\r\nc:\\apps\\anaconda3\\envs\\ccadash\\lib\\site-packages\\pydeck\\bindings\\deck.py:82: UserWarning: Mapbox API key is not set. This may impact available features of pydeck.\r\n UserWarning,\r\n```\r\n\r\n## Is this a regression?\r\n\r\nThat is, did this use to work the way you expected in the past?\r\nyes\r\n\r\n# Debug info\r\n\r\n- Streamlit version:0.53.0\r\n- Python version: 3.7.3\r\n- Using Conda? PipEnv? PyEnv? Pex? Conda\r\n- OS version: Windows 10\r\n- Browser version: Chrome Version 79.0.3945.117 (Official Build) (64-bit)\r\n\r\n# Additional information\r\n\r\nIf needed, add any other context about the problem here. For exmaple, did this bug come from https://discuss.streamlit.io or another site? Link the original source here!\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport signal\nimport sys\n\nimport click\nimport tornado.ioloop\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import url_util\nfrom streamlit import env_util\nfrom streamlit import util\nfrom streamlit.Report import Report\nfrom streamlit.logger import get_logger\nfrom streamlit.server.Server import Server\n\nLOGGER = get_logger(__name__)\n\n# Wait for 1 second before opening a browser. This gives old tabs a chance to\n# reconnect.\n# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\nBROWSER_WAIT_TIMEOUT_SEC = 1\n\n\ndef _set_up_signal_handler():\n LOGGER.debug(\"Setting up signal handler\")\n\n def signal_handler(signal_number, stack_frame):\n # The server will shut down its threads and stop the ioloop\n Server.get_current().stop()\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n if sys.platform == \"win32\":\n signal.signal(signal.SIGBREAK, signal_handler)\n else:\n signal.signal(signal.SIGQUIT, signal_handler)\n\n\ndef _fix_sys_path(script_path):\n \"\"\"Add the script's folder to the sys path.\n\n Python normally does this automatically, but since we exec the script\n ourselves we need to do it instead.\n \"\"\"\n sys.path.insert(0, os.path.dirname(script_path))\n\n\ndef _fix_matplotlib_crash():\n \"\"\"Set Matplotlib backend to avoid a crash.\n\n The default Matplotlib backend crashes Python on OSX when run on a thread\n that's not the main thread, so here we set a safer backend as a fix.\n Users can always disable this behavior by setting the config\n runner.fixMatplotlib = false.\n\n This fix is OS-independent. We didn't see a good reason to make this\n Mac-only. Consistency within Streamlit seemed more important.\n \"\"\"\n if config.get_option(\"runner.fixMatplotlib\"):\n try:\n # TODO: a better option may be to set\n # os.environ[\"MPLBACKEND\"] = \"Agg\". We'd need to do this towards\n # the top of __init__.py, before importing anything that imports\n # pandas (which imports matplotlib). Alternately, we could set\n # this environment variable in a new entrypoint defined in\n # setup.py. Both of these introduce additional trickiness: they\n # need to run without consulting streamlit.config.get_option,\n # because this would import streamlit, and therefore matplotlib.\n import matplotlib\n\n matplotlib.use(\"Agg\")\n except ImportError:\n pass\n\n\ndef _fix_tornado_crash():\n \"\"\"Set default asyncio policy to be compatible with Tornado 6.\n\n Tornado 6 (at least) is not compatible with the default\n asyncio implementation on Windows. So here we\n pick the older SelectorEventLoopPolicy when the OS is Windows\n if the known-incompatible default policy is in use.\n\n This has to happen as early as possible to make it a low priority and\n overrideable\n\n See: https://github.com/tornadoweb/tornado/issues/2608\n\n FIXME: if/when tornado supports the defaults in asyncio,\n remove and bump tornado requirement for py38\n \"\"\"\n if env_util.IS_WINDOWS and sys.version_info >= (3, 8):\n import asyncio\n\n try:\n from asyncio import (\n WindowsProactorEventLoopPolicy,\n WindowsSelectorEventLoopPolicy,\n )\n except ImportError:\n pass\n # Not affected\n else:\n if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:\n # WindowsProactorEventLoopPolicy is not compatible with\n # Tornado 6 fallback to the pre-3.8 default of Selector\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n \"\"\"\n import sys\n\n sys.argv = [script_path] + list(args)\n\n\ndef _on_server_start(server):\n _print_url()\n\n def maybe_open_browser():\n if config.get_option(\"server.headless\"):\n # Don't open browser when in headless mode.\n return\n\n if server.browser_is_connected:\n # Don't auto-open browser if there's already a browser connected.\n # This can happen if there's an old tab repeatedly trying to\n # connect, and it happens to success before we launch the browser.\n return\n\n if config.is_manually_set(\"browser.serverAddress\"):\n addr = config.get_option(\"browser.serverAddress\")\n else:\n addr = \"localhost\"\n\n util.open_browser(Report.get_url(addr))\n\n # Schedule the browser to open using the IO Loop on the main thread, but\n # only if no other browser connects within 1s.\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n\n\ndef _print_url():\n title_message = \"You can now view your Streamlit app in your browser.\"\n named_urls = []\n\n if config.is_manually_set(\"browser.serverAddress\"):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"browser.serverAddress\")))\n ]\n\n elif config.get_option(\"server.headless\"):\n named_urls = [\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n (\"External URL\", Report.get_url(net_util.get_external_ip())),\n ]\n\n else:\n named_urls = [\n (\"Local URL\", Report.get_url(\"localhost\")),\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n ]\n\n click.secho(\"\")\n click.secho(\" %s\" % title_message, fg=\"blue\", bold=True)\n click.secho(\"\")\n\n for url_name, url in named_urls:\n url_util.print_url(url_name, url)\n\n click.secho(\"\")\n\n\ndef run(script_path, command_line, args):\n \"\"\"Run a script in a separate thread and start a server for the app.\n\n This starts a blocking ioloop.\n\n Parameters\n ----------\n script_path : str\n command_line : str\n args : [str]\n\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n\n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n _set_up_signal_handler()\n\n ioloop = tornado.ioloop.IOLoop.current()\n\n # Create and start the server.\n server = Server(ioloop, script_path, command_line)\n server.start(_on_server_start)\n\n # (Must com after start(), because this starts a new thread and start() may\n # call sys.exit() which doesn't kill other threads.\n server.add_preheated_report_session()\n\n # Start the ioloop. This function will not return until the\n # server is shut down.\n ioloop.start()\n", "path": "lib/streamlit/bootstrap.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright 2018-2020 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport signal\nimport sys\n\nimport click\nimport tornado.ioloop\n\nfrom streamlit import config\nfrom streamlit import net_util\nfrom streamlit import url_util\nfrom streamlit import env_util\nfrom streamlit import util\nfrom streamlit.Report import Report\nfrom streamlit.logger import get_logger\nfrom streamlit.server.Server import Server\n\nLOGGER = get_logger(__name__)\n\n# Wait for 1 second before opening a browser. This gives old tabs a chance to\n# reconnect.\n# This must be >= 2 * WebSocketConnection.ts#RECONNECT_WAIT_TIME_MS.\nBROWSER_WAIT_TIMEOUT_SEC = 1\n\n\ndef _set_up_signal_handler():\n LOGGER.debug(\"Setting up signal handler\")\n\n def signal_handler(signal_number, stack_frame):\n # The server will shut down its threads and stop the ioloop\n Server.get_current().stop()\n\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGINT, signal_handler)\n if sys.platform == \"win32\":\n signal.signal(signal.SIGBREAK, signal_handler)\n else:\n signal.signal(signal.SIGQUIT, signal_handler)\n\n\ndef _fix_sys_path(script_path):\n \"\"\"Add the script's folder to the sys path.\n\n Python normally does this automatically, but since we exec the script\n ourselves we need to do it instead.\n \"\"\"\n sys.path.insert(0, os.path.dirname(script_path))\n\n\ndef _fix_matplotlib_crash():\n \"\"\"Set Matplotlib backend to avoid a crash.\n\n The default Matplotlib backend crashes Python on OSX when run on a thread\n that's not the main thread, so here we set a safer backend as a fix.\n Users can always disable this behavior by setting the config\n runner.fixMatplotlib = false.\n\n This fix is OS-independent. We didn't see a good reason to make this\n Mac-only. Consistency within Streamlit seemed more important.\n \"\"\"\n if config.get_option(\"runner.fixMatplotlib\"):\n try:\n # TODO: a better option may be to set\n # os.environ[\"MPLBACKEND\"] = \"Agg\". We'd need to do this towards\n # the top of __init__.py, before importing anything that imports\n # pandas (which imports matplotlib). Alternately, we could set\n # this environment variable in a new entrypoint defined in\n # setup.py. Both of these introduce additional trickiness: they\n # need to run without consulting streamlit.config.get_option,\n # because this would import streamlit, and therefore matplotlib.\n import matplotlib\n\n matplotlib.use(\"Agg\")\n except ImportError:\n pass\n\n\ndef _fix_tornado_crash():\n \"\"\"Set default asyncio policy to be compatible with Tornado 6.\n\n Tornado 6 (at least) is not compatible with the default\n asyncio implementation on Windows. So here we\n pick the older SelectorEventLoopPolicy when the OS is Windows\n if the known-incompatible default policy is in use.\n\n This has to happen as early as possible to make it a low priority and\n overrideable\n\n See: https://github.com/tornadoweb/tornado/issues/2608\n\n FIXME: if/when tornado supports the defaults in asyncio,\n remove and bump tornado requirement for py38\n \"\"\"\n if env_util.IS_WINDOWS and sys.version_info >= (3, 8):\n import asyncio\n\n try:\n from asyncio import (\n WindowsProactorEventLoopPolicy,\n WindowsSelectorEventLoopPolicy,\n )\n except ImportError:\n pass\n # Not affected\n else:\n if type(asyncio.get_event_loop_policy()) is WindowsProactorEventLoopPolicy:\n # WindowsProactorEventLoopPolicy is not compatible with\n # Tornado 6 fallback to the pre-3.8 default of Selector\n asyncio.set_event_loop_policy(WindowsSelectorEventLoopPolicy())\n\n\ndef _fix_sys_argv(script_path, args):\n \"\"\"sys.argv needs to exclude streamlit arguments and parameters\n and be set to what a user's script may expect.\n \"\"\"\n import sys\n\n sys.argv = [script_path] + list(args)\n\n\ndef _on_server_start(server):\n _print_url()\n\n def maybe_open_browser():\n if config.get_option(\"server.headless\"):\n # Don't open browser when in headless mode.\n return\n\n if server.browser_is_connected:\n # Don't auto-open browser if there's already a browser connected.\n # This can happen if there's an old tab repeatedly trying to\n # connect, and it happens to success before we launch the browser.\n return\n\n if config.is_manually_set(\"browser.serverAddress\"):\n addr = config.get_option(\"browser.serverAddress\")\n else:\n addr = \"localhost\"\n\n util.open_browser(Report.get_url(addr))\n\n # Schedule the browser to open using the IO Loop on the main thread, but\n # only if no other browser connects within 1s.\n ioloop = tornado.ioloop.IOLoop.current()\n ioloop.call_later(BROWSER_WAIT_TIMEOUT_SEC, maybe_open_browser)\n\n\ndef _fix_pydeck_mapbox_api_warning():\n \"\"\"Sets MAPBOX_API_KEY environment variable needed for PyDeck otherwise it will throw an exception\"\"\"\n\n os.environ[\"MAPBOX_API_KEY\"] = config.get_option(\"mapbox.token\")\n\n\ndef _print_url():\n title_message = \"You can now view your Streamlit app in your browser.\"\n named_urls = []\n\n if config.is_manually_set(\"browser.serverAddress\"):\n named_urls = [\n (\"URL\", Report.get_url(config.get_option(\"browser.serverAddress\")))\n ]\n\n elif config.get_option(\"server.headless\"):\n named_urls = [\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n (\"External URL\", Report.get_url(net_util.get_external_ip())),\n ]\n\n else:\n named_urls = [\n (\"Local URL\", Report.get_url(\"localhost\")),\n (\"Network URL\", Report.get_url(net_util.get_internal_ip())),\n ]\n\n click.secho(\"\")\n click.secho(\" %s\" % title_message, fg=\"blue\", bold=True)\n click.secho(\"\")\n\n for url_name, url in named_urls:\n url_util.print_url(url_name, url)\n\n click.secho(\"\")\n\n\ndef run(script_path, command_line, args):\n \"\"\"Run a script in a separate thread and start a server for the app.\n\n This starts a blocking ioloop.\n\n Parameters\n ----------\n script_path : str\n command_line : str\n args : [str]\n\n \"\"\"\n _fix_sys_path(script_path)\n _fix_matplotlib_crash()\n _fix_tornado_crash()\n _fix_sys_argv(script_path, args)\n _fix_pydeck_mapbox_api_warning()\n\n # Install a signal handler that will shut down the ioloop\n # and close all our threads\n _set_up_signal_handler()\n\n ioloop = tornado.ioloop.IOLoop.current()\n\n # Create and start the server.\n server = Server(ioloop, script_path, command_line)\n server.start(_on_server_start)\n\n # (Must com after start(), because this starts a new thread and start() may\n # call sys.exit() which doesn't kill other threads.\n server.add_preheated_report_session()\n\n # Start the ioloop. This function will not return until the\n # server is shut down.\n ioloop.start()\n", "path": "lib/streamlit/bootstrap.py"}]}
3,102
221
gh_patches_debug_34538
rasdani/github-patches
git_diff
python-discord__bot-480
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Aliasing for !resources command. Currently the !resources command links to both the [PyDis resource](https://pythondiscord.com/pages/resources/ ) page, and the [PyDis tools](https://pythondiscord.com/pages/tools/) page. I feel that the alias of !tools should be added to the command, or the links should be separated into two commands, to both make it easier for users to access the tools page, but also increase awareness of the page. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bot/cogs/site.py` Content: ``` 1 import logging 2 3 from discord import Colour, Embed 4 from discord.ext.commands import Bot, Cog, Context, group 5 6 from bot.constants import Channels, STAFF_ROLES, URLs 7 from bot.decorators import redirect_output 8 from bot.pagination import LinePaginator 9 10 log = logging.getLogger(__name__) 11 12 PAGES_URL = f"{URLs.site_schema}{URLs.site}/pages" 13 14 15 class Site(Cog): 16 """Commands for linking to different parts of the site.""" 17 18 def __init__(self, bot: Bot): 19 self.bot = bot 20 21 @group(name="site", aliases=("s",), invoke_without_command=True) 22 async def site_group(self, ctx: Context) -> None: 23 """Commands for getting info about our website.""" 24 await ctx.invoke(self.bot.get_command("help"), "site") 25 26 @site_group.command(name="home", aliases=("about",)) 27 async def site_main(self, ctx: Context) -> None: 28 """Info about the website itself.""" 29 url = f"{URLs.site_schema}{URLs.site}/" 30 31 embed = Embed(title="Python Discord website") 32 embed.set_footer(text=url) 33 embed.colour = Colour.blurple() 34 embed.description = ( 35 f"[Our official website]({url}) is an open-source community project " 36 "created with Python and Flask. It contains information about the server " 37 "itself, lets you sign up for upcoming events, has its own wiki, contains " 38 "a list of valuable learning resources, and much more." 39 ) 40 41 await ctx.send(embed=embed) 42 43 @site_group.command(name="resources") 44 async def site_resources(self, ctx: Context) -> None: 45 """Info about the site's Resources page.""" 46 learning_url = f"{PAGES_URL}/resources" 47 tools_url = f"{PAGES_URL}/tools" 48 49 embed = Embed(title="Resources & Tools") 50 embed.set_footer(text=f"{learning_url} | {tools_url}") 51 embed.colour = Colour.blurple() 52 embed.description = ( 53 f"The [Resources page]({learning_url}) on our website contains a " 54 "list of hand-selected goodies that we regularly recommend " 55 f"to both beginners and experts. The [Tools page]({tools_url}) " 56 "contains a couple of the most popular tools for programming in " 57 "Python." 58 ) 59 60 await ctx.send(embed=embed) 61 62 @site_group.command(name="help") 63 async def site_help(self, ctx: Context) -> None: 64 """Info about the site's Getting Help page.""" 65 url = f"{PAGES_URL}/asking-good-questions" 66 67 embed = Embed(title="Asking Good Questions") 68 embed.set_footer(text=url) 69 embed.colour = Colour.blurple() 70 embed.description = ( 71 "Asking the right question about something that's new to you can sometimes be tricky. " 72 f"To help with this, we've created a [guide to asking good questions]({url}) on our website. " 73 "It contains everything you need to get the very best help from our community." 74 ) 75 76 await ctx.send(embed=embed) 77 78 @site_group.command(name="faq") 79 async def site_faq(self, ctx: Context) -> None: 80 """Info about the site's FAQ page.""" 81 url = f"{PAGES_URL}/frequently-asked-questions" 82 83 embed = Embed(title="FAQ") 84 embed.set_footer(text=url) 85 embed.colour = Colour.blurple() 86 embed.description = ( 87 "As the largest Python community on Discord, we get hundreds of questions every day. " 88 "Many of these questions have been asked before. We've compiled a list of the most " 89 "frequently asked questions along with their answers, which can be found on " 90 f"our [FAQ page]({url})." 91 ) 92 93 await ctx.send(embed=embed) 94 95 @site_group.command(aliases=['r', 'rule'], name='rules') 96 @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES) 97 async def site_rules(self, ctx: Context, *rules: int) -> None: 98 """Provides a link to all rules or, if specified, displays specific rule(s).""" 99 rules_embed = Embed(title='Rules', color=Colour.blurple()) 100 rules_embed.url = f"{PAGES_URL}/rules" 101 102 if not rules: 103 # Rules were not submitted. Return the default description. 104 rules_embed.description = ( 105 "The rules and guidelines that apply to this community can be found on" 106 f" our [rules page]({PAGES_URL}/rules). We expect" 107 " all members of the community to have read and understood these." 108 ) 109 110 await ctx.send(embed=rules_embed) 111 return 112 113 full_rules = await self.bot.api_client.get('rules', params={'link_format': 'md'}) 114 invalid_indices = tuple( 115 pick 116 for pick in rules 117 if pick < 0 or pick >= len(full_rules) 118 ) 119 120 if invalid_indices: 121 indices = ', '.join(map(str, invalid_indices)) 122 await ctx.send(f":x: Invalid rule indices {indices}") 123 return 124 125 final_rules = tuple(f"**{pick}.** {full_rules[pick]}" for pick in rules) 126 127 await LinePaginator.paginate(final_rules, ctx, rules_embed, max_lines=3) 128 129 130 def setup(bot: Bot) -> None: 131 """Site cog load.""" 132 bot.add_cog(Site(bot)) 133 log.info("Cog loaded: Site") 134 ``` Path: `bot/cogs/alias.py` Content: ``` 1 import inspect 2 import logging 3 from typing import Union 4 5 from discord import Colour, Embed, Member, User 6 from discord.ext.commands import Bot, Cog, Command, Context, clean_content, command, group 7 8 from bot.cogs.watchchannels.watchchannel import proxy_user 9 from bot.converters import TagNameConverter 10 from bot.pagination import LinePaginator 11 12 log = logging.getLogger(__name__) 13 14 15 class Alias (Cog): 16 """Aliases for commonly used commands.""" 17 18 def __init__(self, bot: Bot): 19 self.bot = bot 20 21 async def invoke(self, ctx: Context, cmd_name: str, *args, **kwargs) -> None: 22 """Invokes a command with args and kwargs.""" 23 log.debug(f"{cmd_name} was invoked through an alias") 24 cmd = self.bot.get_command(cmd_name) 25 if not cmd: 26 return log.warning(f'Did not find command "{cmd_name}" to invoke.') 27 elif not await cmd.can_run(ctx): 28 return log.warning( 29 f'{str(ctx.author)} tried to run the command "{cmd_name}"' 30 ) 31 32 await ctx.invoke(cmd, *args, **kwargs) 33 34 @command(name='aliases') 35 async def aliases_command(self, ctx: Context) -> None: 36 """Show configured aliases on the bot.""" 37 embed = Embed( 38 title='Configured aliases', 39 colour=Colour.blue() 40 ) 41 await LinePaginator.paginate( 42 ( 43 f"• `{ctx.prefix}{value.name}` " 44 f"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`" 45 for name, value in inspect.getmembers(self) 46 if isinstance(value, Command) and name.endswith('_alias') 47 ), 48 ctx, embed, empty=False, max_lines=20 49 ) 50 51 @command(name="resources", aliases=("resource",), hidden=True) 52 async def site_resources_alias(self, ctx: Context) -> None: 53 """Alias for invoking <prefix>site resources.""" 54 await self.invoke(ctx, "site resources") 55 56 @command(name="watch", hidden=True) 57 async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None: 58 """Alias for invoking <prefix>bigbrother watch [user] [reason].""" 59 await self.invoke(ctx, "bigbrother watch", user, reason=reason) 60 61 @command(name="unwatch", hidden=True) 62 async def bigbrother_unwatch_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None: 63 """Alias for invoking <prefix>bigbrother unwatch [user] [reason].""" 64 await self.invoke(ctx, "bigbrother unwatch", user, reason=reason) 65 66 @command(name="home", hidden=True) 67 async def site_home_alias(self, ctx: Context) -> None: 68 """Alias for invoking <prefix>site home.""" 69 await self.invoke(ctx, "site home") 70 71 @command(name="faq", hidden=True) 72 async def site_faq_alias(self, ctx: Context) -> None: 73 """Alias for invoking <prefix>site faq.""" 74 await self.invoke(ctx, "site faq") 75 76 @command(name="rules", hidden=True) 77 async def site_rules_alias(self, ctx: Context) -> None: 78 """Alias for invoking <prefix>site rules.""" 79 await self.invoke(ctx, "site rules") 80 81 @command(name="reload", hidden=True) 82 async def cogs_reload_alias(self, ctx: Context, *, cog_name: str) -> None: 83 """Alias for invoking <prefix>cogs reload [cog_name].""" 84 await self.invoke(ctx, "cogs reload", cog_name) 85 86 @command(name="defon", hidden=True) 87 async def defcon_enable_alias(self, ctx: Context) -> None: 88 """Alias for invoking <prefix>defcon enable.""" 89 await self.invoke(ctx, "defcon enable") 90 91 @command(name="defoff", hidden=True) 92 async def defcon_disable_alias(self, ctx: Context) -> None: 93 """Alias for invoking <prefix>defcon disable.""" 94 await self.invoke(ctx, "defcon disable") 95 96 @command(name="exception", hidden=True) 97 async def tags_get_traceback_alias(self, ctx: Context) -> None: 98 """Alias for invoking <prefix>tags get traceback.""" 99 await self.invoke(ctx, "tags get", tag_name="traceback") 100 101 @group(name="get", 102 aliases=("show", "g"), 103 hidden=True, 104 invoke_without_command=True) 105 async def get_group_alias(self, ctx: Context) -> None: 106 """Group for reverse aliases for commands like `tags get`, allowing for `get tags` or `get docs`.""" 107 pass 108 109 @get_group_alias.command(name="tags", aliases=("tag", "t"), hidden=True) 110 async def tags_get_alias( 111 self, ctx: Context, *, tag_name: TagNameConverter = None 112 ) -> None: 113 """ 114 Alias for invoking <prefix>tags get [tag_name]. 115 116 tag_name: str - tag to be viewed. 117 """ 118 await self.invoke(ctx, "tags get", tag_name=tag_name) 119 120 @get_group_alias.command(name="docs", aliases=("doc", "d"), hidden=True) 121 async def docs_get_alias( 122 self, ctx: Context, symbol: clean_content = None 123 ) -> None: 124 """Alias for invoking <prefix>docs get [symbol].""" 125 await self.invoke(ctx, "docs get", symbol) 126 127 @command(name="nominate", hidden=True) 128 async def nomination_add_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None: 129 """Alias for invoking <prefix>talentpool add [user] [reason].""" 130 await self.invoke(ctx, "talentpool add", user, reason=reason) 131 132 @command(name="unnominate", hidden=True) 133 async def nomination_end_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None: 134 """Alias for invoking <prefix>nomination end [user] [reason].""" 135 await self.invoke(ctx, "nomination end", user, reason=reason) 136 137 @command(name="nominees", hidden=True) 138 async def nominees_alias(self, ctx: Context) -> None: 139 """Alias for invoking <prefix>tp watched.""" 140 await self.invoke(ctx, "talentpool watched") 141 142 143 def setup(bot: Bot) -> None: 144 """Alias cog load.""" 145 bot.add_cog(Alias(bot)) 146 log.info("Cog loaded: Alias") 147 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py --- a/bot/cogs/alias.py +++ b/bot/cogs/alias.py @@ -53,6 +53,11 @@ """Alias for invoking <prefix>site resources.""" await self.invoke(ctx, "site resources") + @command(name="tools", hidden=True) + async def site_tools_alias(self, ctx: Context) -> None: + """Alias for invoking <prefix>site tools.""" + await self.invoke(ctx, "site tools") + @command(name="watch", hidden=True) async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None: """Alias for invoking <prefix>bigbrother watch [user] [reason].""" diff --git a/bot/cogs/site.py b/bot/cogs/site.py --- a/bot/cogs/site.py +++ b/bot/cogs/site.py @@ -44,17 +44,29 @@ async def site_resources(self, ctx: Context) -> None: """Info about the site's Resources page.""" learning_url = f"{PAGES_URL}/resources" - tools_url = f"{PAGES_URL}/tools" - embed = Embed(title="Resources & Tools") - embed.set_footer(text=f"{learning_url} | {tools_url}") + embed = Embed(title="Resources") + embed.set_footer(text=f"{learning_url}") embed.colour = Colour.blurple() embed.description = ( f"The [Resources page]({learning_url}) on our website contains a " - "list of hand-selected goodies that we regularly recommend " - f"to both beginners and experts. The [Tools page]({tools_url}) " - "contains a couple of the most popular tools for programming in " - "Python." + "list of hand-selected learning resources that we regularly recommend " + f"to both beginners and experts." + ) + + await ctx.send(embed=embed) + + @site_group.command(name="tools") + async def site_tools(self, ctx: Context) -> None: + """Info about the site's Tools page.""" + tools_url = f"{PAGES_URL}/tools" + + embed = Embed(title="Tools") + embed.set_footer(text=f"{tools_url}") + embed.colour = Colour.blurple() + embed.description = ( + f"The [Tools page]({tools_url}) on our website contains a " + f"couple of the most popular tools for programming in Python." ) await ctx.send(embed=embed)
{"golden_diff": "diff --git a/bot/cogs/alias.py b/bot/cogs/alias.py\n--- a/bot/cogs/alias.py\n+++ b/bot/cogs/alias.py\n@@ -53,6 +53,11 @@\n \"\"\"Alias for invoking <prefix>site resources.\"\"\"\n await self.invoke(ctx, \"site resources\")\n \n+ @command(name=\"tools\", hidden=True)\n+ async def site_tools_alias(self, ctx: Context) -> None:\n+ \"\"\"Alias for invoking <prefix>site tools.\"\"\"\n+ await self.invoke(ctx, \"site tools\")\n+\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother watch [user] [reason].\"\"\"\ndiff --git a/bot/cogs/site.py b/bot/cogs/site.py\n--- a/bot/cogs/site.py\n+++ b/bot/cogs/site.py\n@@ -44,17 +44,29 @@\n async def site_resources(self, ctx: Context) -> None:\n \"\"\"Info about the site's Resources page.\"\"\"\n learning_url = f\"{PAGES_URL}/resources\"\n- tools_url = f\"{PAGES_URL}/tools\"\n \n- embed = Embed(title=\"Resources & Tools\")\n- embed.set_footer(text=f\"{learning_url} | {tools_url}\")\n+ embed = Embed(title=\"Resources\")\n+ embed.set_footer(text=f\"{learning_url}\")\n embed.colour = Colour.blurple()\n embed.description = (\n f\"The [Resources page]({learning_url}) on our website contains a \"\n- \"list of hand-selected goodies that we regularly recommend \"\n- f\"to both beginners and experts. The [Tools page]({tools_url}) \"\n- \"contains a couple of the most popular tools for programming in \"\n- \"Python.\"\n+ \"list of hand-selected learning resources that we regularly recommend \"\n+ f\"to both beginners and experts.\"\n+ )\n+\n+ await ctx.send(embed=embed)\n+\n+ @site_group.command(name=\"tools\")\n+ async def site_tools(self, ctx: Context) -> None:\n+ \"\"\"Info about the site's Tools page.\"\"\"\n+ tools_url = f\"{PAGES_URL}/tools\"\n+\n+ embed = Embed(title=\"Tools\")\n+ embed.set_footer(text=f\"{tools_url}\")\n+ embed.colour = Colour.blurple()\n+ embed.description = (\n+ f\"The [Tools page]({tools_url}) on our website contains a \"\n+ f\"couple of the most popular tools for programming in Python.\"\n )\n \n await ctx.send(embed=embed)\n", "issue": "Aliasing for !resources command.\nCurrently the !resources command links to both the [PyDis resource](https://pythondiscord.com/pages/resources/\r\n) page, and the [PyDis tools](https://pythondiscord.com/pages/tools/) page.\r\n\r\nI feel that the alias of !tools should be added to the command, or the links should be separated into two commands, to both make it easier for users to access the tools page, but also increase awareness of the page.\n", "before_files": [{"content": "import logging\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, STAFF_ROLES, URLs\nfrom bot.decorators import redirect_output\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nPAGES_URL = f\"{URLs.site_schema}{URLs.site}/pages\"\n\n\nclass Site(Cog):\n \"\"\"Commands for linking to different parts of the site.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @group(name=\"site\", aliases=(\"s\",), invoke_without_command=True)\n async def site_group(self, ctx: Context) -> None:\n \"\"\"Commands for getting info about our website.\"\"\"\n await ctx.invoke(self.bot.get_command(\"help\"), \"site\")\n\n @site_group.command(name=\"home\", aliases=(\"about\",))\n async def site_main(self, ctx: Context) -> None:\n \"\"\"Info about the website itself.\"\"\"\n url = f\"{URLs.site_schema}{URLs.site}/\"\n\n embed = Embed(title=\"Python Discord website\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n f\"[Our official website]({url}) is an open-source community project \"\n \"created with Python and Flask. It contains information about the server \"\n \"itself, lets you sign up for upcoming events, has its own wiki, contains \"\n \"a list of valuable learning resources, and much more.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"resources\")\n async def site_resources(self, ctx: Context) -> None:\n \"\"\"Info about the site's Resources page.\"\"\"\n learning_url = f\"{PAGES_URL}/resources\"\n tools_url = f\"{PAGES_URL}/tools\"\n\n embed = Embed(title=\"Resources & Tools\")\n embed.set_footer(text=f\"{learning_url} | {tools_url}\")\n embed.colour = Colour.blurple()\n embed.description = (\n f\"The [Resources page]({learning_url}) on our website contains a \"\n \"list of hand-selected goodies that we regularly recommend \"\n f\"to both beginners and experts. The [Tools page]({tools_url}) \"\n \"contains a couple of the most popular tools for programming in \"\n \"Python.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"help\")\n async def site_help(self, ctx: Context) -> None:\n \"\"\"Info about the site's Getting Help page.\"\"\"\n url = f\"{PAGES_URL}/asking-good-questions\"\n\n embed = Embed(title=\"Asking Good Questions\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n \"Asking the right question about something that's new to you can sometimes be tricky. \"\n f\"To help with this, we've created a [guide to asking good questions]({url}) on our website. \"\n \"It contains everything you need to get the very best help from our community.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"faq\")\n async def site_faq(self, ctx: Context) -> None:\n \"\"\"Info about the site's FAQ page.\"\"\"\n url = f\"{PAGES_URL}/frequently-asked-questions\"\n\n embed = Embed(title=\"FAQ\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n \"As the largest Python community on Discord, we get hundreds of questions every day. \"\n \"Many of these questions have been asked before. We've compiled a list of the most \"\n \"frequently asked questions along with their answers, which can be found on \"\n f\"our [FAQ page]({url}).\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(aliases=['r', 'rule'], name='rules')\n @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)\n async def site_rules(self, ctx: Context, *rules: int) -> None:\n \"\"\"Provides a link to all rules or, if specified, displays specific rule(s).\"\"\"\n rules_embed = Embed(title='Rules', color=Colour.blurple())\n rules_embed.url = f\"{PAGES_URL}/rules\"\n\n if not rules:\n # Rules were not submitted. Return the default description.\n rules_embed.description = (\n \"The rules and guidelines that apply to this community can be found on\"\n f\" our [rules page]({PAGES_URL}/rules). We expect\"\n \" all members of the community to have read and understood these.\"\n )\n\n await ctx.send(embed=rules_embed)\n return\n\n full_rules = await self.bot.api_client.get('rules', params={'link_format': 'md'})\n invalid_indices = tuple(\n pick\n for pick in rules\n if pick < 0 or pick >= len(full_rules)\n )\n\n if invalid_indices:\n indices = ', '.join(map(str, invalid_indices))\n await ctx.send(f\":x: Invalid rule indices {indices}\")\n return\n\n final_rules = tuple(f\"**{pick}.** {full_rules[pick]}\" for pick in rules)\n\n await LinePaginator.paginate(final_rules, ctx, rules_embed, max_lines=3)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Site cog load.\"\"\"\n bot.add_cog(Site(bot))\n log.info(\"Cog loaded: Site\")\n", "path": "bot/cogs/site.py"}, {"content": "import inspect\nimport logging\nfrom typing import Union\n\nfrom discord import Colour, Embed, Member, User\nfrom discord.ext.commands import Bot, Cog, Command, Context, clean_content, command, group\n\nfrom bot.cogs.watchchannels.watchchannel import proxy_user\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Alias (Cog):\n \"\"\"Aliases for commonly used commands.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n async def invoke(self, ctx: Context, cmd_name: str, *args, **kwargs) -> None:\n \"\"\"Invokes a command with args and kwargs.\"\"\"\n log.debug(f\"{cmd_name} was invoked through an alias\")\n cmd = self.bot.get_command(cmd_name)\n if not cmd:\n return log.warning(f'Did not find command \"{cmd_name}\" to invoke.')\n elif not await cmd.can_run(ctx):\n return log.warning(\n f'{str(ctx.author)} tried to run the command \"{cmd_name}\"'\n )\n\n await ctx.invoke(cmd, *args, **kwargs)\n\n @command(name='aliases')\n async def aliases_command(self, ctx: Context) -> None:\n \"\"\"Show configured aliases on the bot.\"\"\"\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"\u2022 `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )\n\n @command(name=\"resources\", aliases=(\"resource\",), hidden=True)\n async def site_resources_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site resources.\"\"\"\n await self.invoke(ctx, \"site resources\")\n\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother watch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother watch\", user, reason=reason)\n\n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother unwatch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother unwatch\", user, reason=reason)\n\n @command(name=\"home\", hidden=True)\n async def site_home_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site home.\"\"\"\n await self.invoke(ctx, \"site home\")\n\n @command(name=\"faq\", hidden=True)\n async def site_faq_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site faq.\"\"\"\n await self.invoke(ctx, \"site faq\")\n\n @command(name=\"rules\", hidden=True)\n async def site_rules_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site rules.\"\"\"\n await self.invoke(ctx, \"site rules\")\n\n @command(name=\"reload\", hidden=True)\n async def cogs_reload_alias(self, ctx: Context, *, cog_name: str) -> None:\n \"\"\"Alias for invoking <prefix>cogs reload [cog_name].\"\"\"\n await self.invoke(ctx, \"cogs reload\", cog_name)\n\n @command(name=\"defon\", hidden=True)\n async def defcon_enable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon enable.\"\"\"\n await self.invoke(ctx, \"defcon enable\")\n\n @command(name=\"defoff\", hidden=True)\n async def defcon_disable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon disable.\"\"\"\n await self.invoke(ctx, \"defcon disable\")\n\n @command(name=\"exception\", hidden=True)\n async def tags_get_traceback_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tags get traceback.\"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=\"traceback\")\n\n @group(name=\"get\",\n aliases=(\"show\", \"g\"),\n hidden=True,\n invoke_without_command=True)\n async def get_group_alias(self, ctx: Context) -> None:\n \"\"\"Group for reverse aliases for commands like `tags get`, allowing for `get tags` or `get docs`.\"\"\"\n pass\n\n @get_group_alias.command(name=\"tags\", aliases=(\"tag\", \"t\"), hidden=True)\n async def tags_get_alias(\n self, ctx: Context, *, tag_name: TagNameConverter = None\n ) -> None:\n \"\"\"\n Alias for invoking <prefix>tags get [tag_name].\n\n tag_name: str - tag to be viewed.\n \"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=tag_name)\n\n @get_group_alias.command(name=\"docs\", aliases=(\"doc\", \"d\"), hidden=True)\n async def docs_get_alias(\n self, ctx: Context, symbol: clean_content = None\n ) -> None:\n \"\"\"Alias for invoking <prefix>docs get [symbol].\"\"\"\n await self.invoke(ctx, \"docs get\", symbol)\n\n @command(name=\"nominate\", hidden=True)\n async def nomination_add_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>talentpool add [user] [reason].\"\"\"\n await self.invoke(ctx, \"talentpool add\", user, reason=reason)\n\n @command(name=\"unnominate\", hidden=True)\n async def nomination_end_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>nomination end [user] [reason].\"\"\"\n await self.invoke(ctx, \"nomination end\", user, reason=reason)\n\n @command(name=\"nominees\", hidden=True)\n async def nominees_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tp watched.\"\"\"\n await self.invoke(ctx, \"talentpool watched\")\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Alias cog load.\"\"\"\n bot.add_cog(Alias(bot))\n log.info(\"Cog loaded: Alias\")\n", "path": "bot/cogs/alias.py"}], "after_files": [{"content": "import logging\n\nfrom discord import Colour, Embed\nfrom discord.ext.commands import Bot, Cog, Context, group\n\nfrom bot.constants import Channels, STAFF_ROLES, URLs\nfrom bot.decorators import redirect_output\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\nPAGES_URL = f\"{URLs.site_schema}{URLs.site}/pages\"\n\n\nclass Site(Cog):\n \"\"\"Commands for linking to different parts of the site.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n @group(name=\"site\", aliases=(\"s\",), invoke_without_command=True)\n async def site_group(self, ctx: Context) -> None:\n \"\"\"Commands for getting info about our website.\"\"\"\n await ctx.invoke(self.bot.get_command(\"help\"), \"site\")\n\n @site_group.command(name=\"home\", aliases=(\"about\",))\n async def site_main(self, ctx: Context) -> None:\n \"\"\"Info about the website itself.\"\"\"\n url = f\"{URLs.site_schema}{URLs.site}/\"\n\n embed = Embed(title=\"Python Discord website\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n f\"[Our official website]({url}) is an open-source community project \"\n \"created with Python and Flask. It contains information about the server \"\n \"itself, lets you sign up for upcoming events, has its own wiki, contains \"\n \"a list of valuable learning resources, and much more.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"resources\")\n async def site_resources(self, ctx: Context) -> None:\n \"\"\"Info about the site's Resources page.\"\"\"\n learning_url = f\"{PAGES_URL}/resources\"\n\n embed = Embed(title=\"Resources\")\n embed.set_footer(text=f\"{learning_url}\")\n embed.colour = Colour.blurple()\n embed.description = (\n f\"The [Resources page]({learning_url}) on our website contains a \"\n \"list of hand-selected learning resources that we regularly recommend \"\n f\"to both beginners and experts.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"tools\")\n async def site_tools(self, ctx: Context) -> None:\n \"\"\"Info about the site's Tools page.\"\"\"\n tools_url = f\"{PAGES_URL}/tools\"\n\n embed = Embed(title=\"Tools\")\n embed.set_footer(text=f\"{tools_url}\")\n embed.colour = Colour.blurple()\n embed.description = (\n f\"The [Tools page]({tools_url}) on our website contains a \"\n f\"couple of the most popular tools for programming in Python.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"help\")\n async def site_help(self, ctx: Context) -> None:\n \"\"\"Info about the site's Getting Help page.\"\"\"\n url = f\"{PAGES_URL}/asking-good-questions\"\n\n embed = Embed(title=\"Asking Good Questions\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n \"Asking the right question about something that's new to you can sometimes be tricky. \"\n f\"To help with this, we've created a [guide to asking good questions]({url}) on our website. \"\n \"It contains everything you need to get the very best help from our community.\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(name=\"faq\")\n async def site_faq(self, ctx: Context) -> None:\n \"\"\"Info about the site's FAQ page.\"\"\"\n url = f\"{PAGES_URL}/frequently-asked-questions\"\n\n embed = Embed(title=\"FAQ\")\n embed.set_footer(text=url)\n embed.colour = Colour.blurple()\n embed.description = (\n \"As the largest Python community on Discord, we get hundreds of questions every day. \"\n \"Many of these questions have been asked before. We've compiled a list of the most \"\n \"frequently asked questions along with their answers, which can be found on \"\n f\"our [FAQ page]({url}).\"\n )\n\n await ctx.send(embed=embed)\n\n @site_group.command(aliases=['r', 'rule'], name='rules')\n @redirect_output(destination_channel=Channels.bot, bypass_roles=STAFF_ROLES)\n async def site_rules(self, ctx: Context, *rules: int) -> None:\n \"\"\"Provides a link to all rules or, if specified, displays specific rule(s).\"\"\"\n rules_embed = Embed(title='Rules', color=Colour.blurple())\n rules_embed.url = f\"{PAGES_URL}/rules\"\n\n if not rules:\n # Rules were not submitted. Return the default description.\n rules_embed.description = (\n \"The rules and guidelines that apply to this community can be found on\"\n f\" our [rules page]({PAGES_URL}/rules). We expect\"\n \" all members of the community to have read and understood these.\"\n )\n\n await ctx.send(embed=rules_embed)\n return\n\n full_rules = await self.bot.api_client.get('rules', params={'link_format': 'md'})\n invalid_indices = tuple(\n pick\n for pick in rules\n if pick < 0 or pick >= len(full_rules)\n )\n\n if invalid_indices:\n indices = ', '.join(map(str, invalid_indices))\n await ctx.send(f\":x: Invalid rule indices {indices}\")\n return\n\n final_rules = tuple(f\"**{pick}.** {full_rules[pick]}\" for pick in rules)\n\n await LinePaginator.paginate(final_rules, ctx, rules_embed, max_lines=3)\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Site cog load.\"\"\"\n bot.add_cog(Site(bot))\n log.info(\"Cog loaded: Site\")\n", "path": "bot/cogs/site.py"}, {"content": "import inspect\nimport logging\nfrom typing import Union\n\nfrom discord import Colour, Embed, Member, User\nfrom discord.ext.commands import Bot, Cog, Command, Context, clean_content, command, group\n\nfrom bot.cogs.watchchannels.watchchannel import proxy_user\nfrom bot.converters import TagNameConverter\nfrom bot.pagination import LinePaginator\n\nlog = logging.getLogger(__name__)\n\n\nclass Alias (Cog):\n \"\"\"Aliases for commonly used commands.\"\"\"\n\n def __init__(self, bot: Bot):\n self.bot = bot\n\n async def invoke(self, ctx: Context, cmd_name: str, *args, **kwargs) -> None:\n \"\"\"Invokes a command with args and kwargs.\"\"\"\n log.debug(f\"{cmd_name} was invoked through an alias\")\n cmd = self.bot.get_command(cmd_name)\n if not cmd:\n return log.warning(f'Did not find command \"{cmd_name}\" to invoke.')\n elif not await cmd.can_run(ctx):\n return log.warning(\n f'{str(ctx.author)} tried to run the command \"{cmd_name}\"'\n )\n\n await ctx.invoke(cmd, *args, **kwargs)\n\n @command(name='aliases')\n async def aliases_command(self, ctx: Context) -> None:\n \"\"\"Show configured aliases on the bot.\"\"\"\n embed = Embed(\n title='Configured aliases',\n colour=Colour.blue()\n )\n await LinePaginator.paginate(\n (\n f\"\u2022 `{ctx.prefix}{value.name}` \"\n f\"=> `{ctx.prefix}{name[:-len('_alias')].replace('_', ' ')}`\"\n for name, value in inspect.getmembers(self)\n if isinstance(value, Command) and name.endswith('_alias')\n ),\n ctx, embed, empty=False, max_lines=20\n )\n\n @command(name=\"resources\", aliases=(\"resource\",), hidden=True)\n async def site_resources_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site resources.\"\"\"\n await self.invoke(ctx, \"site resources\")\n\n @command(name=\"tools\", hidden=True)\n async def site_tools_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site tools.\"\"\"\n await self.invoke(ctx, \"site tools\")\n\n @command(name=\"watch\", hidden=True)\n async def bigbrother_watch_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother watch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother watch\", user, reason=reason)\n\n @command(name=\"unwatch\", hidden=True)\n async def bigbrother_unwatch_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>bigbrother unwatch [user] [reason].\"\"\"\n await self.invoke(ctx, \"bigbrother unwatch\", user, reason=reason)\n\n @command(name=\"home\", hidden=True)\n async def site_home_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site home.\"\"\"\n await self.invoke(ctx, \"site home\")\n\n @command(name=\"faq\", hidden=True)\n async def site_faq_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site faq.\"\"\"\n await self.invoke(ctx, \"site faq\")\n\n @command(name=\"rules\", hidden=True)\n async def site_rules_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>site rules.\"\"\"\n await self.invoke(ctx, \"site rules\")\n\n @command(name=\"reload\", hidden=True)\n async def cogs_reload_alias(self, ctx: Context, *, cog_name: str) -> None:\n \"\"\"Alias for invoking <prefix>cogs reload [cog_name].\"\"\"\n await self.invoke(ctx, \"cogs reload\", cog_name)\n\n @command(name=\"defon\", hidden=True)\n async def defcon_enable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon enable.\"\"\"\n await self.invoke(ctx, \"defcon enable\")\n\n @command(name=\"defoff\", hidden=True)\n async def defcon_disable_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>defcon disable.\"\"\"\n await self.invoke(ctx, \"defcon disable\")\n\n @command(name=\"exception\", hidden=True)\n async def tags_get_traceback_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tags get traceback.\"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=\"traceback\")\n\n @group(name=\"get\",\n aliases=(\"show\", \"g\"),\n hidden=True,\n invoke_without_command=True)\n async def get_group_alias(self, ctx: Context) -> None:\n \"\"\"Group for reverse aliases for commands like `tags get`, allowing for `get tags` or `get docs`.\"\"\"\n pass\n\n @get_group_alias.command(name=\"tags\", aliases=(\"tag\", \"t\"), hidden=True)\n async def tags_get_alias(\n self, ctx: Context, *, tag_name: TagNameConverter = None\n ) -> None:\n \"\"\"\n Alias for invoking <prefix>tags get [tag_name].\n\n tag_name: str - tag to be viewed.\n \"\"\"\n await self.invoke(ctx, \"tags get\", tag_name=tag_name)\n\n @get_group_alias.command(name=\"docs\", aliases=(\"doc\", \"d\"), hidden=True)\n async def docs_get_alias(\n self, ctx: Context, symbol: clean_content = None\n ) -> None:\n \"\"\"Alias for invoking <prefix>docs get [symbol].\"\"\"\n await self.invoke(ctx, \"docs get\", symbol)\n\n @command(name=\"nominate\", hidden=True)\n async def nomination_add_alias(self, ctx: Context, user: Union[Member, User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>talentpool add [user] [reason].\"\"\"\n await self.invoke(ctx, \"talentpool add\", user, reason=reason)\n\n @command(name=\"unnominate\", hidden=True)\n async def nomination_end_alias(self, ctx: Context, user: Union[User, proxy_user], *, reason: str) -> None:\n \"\"\"Alias for invoking <prefix>nomination end [user] [reason].\"\"\"\n await self.invoke(ctx, \"nomination end\", user, reason=reason)\n\n @command(name=\"nominees\", hidden=True)\n async def nominees_alias(self, ctx: Context) -> None:\n \"\"\"Alias for invoking <prefix>tp watched.\"\"\"\n await self.invoke(ctx, \"talentpool watched\")\n\n\ndef setup(bot: Bot) -> None:\n \"\"\"Alias cog load.\"\"\"\n bot.add_cog(Alias(bot))\n log.info(\"Cog loaded: Alias\")\n", "path": "bot/cogs/alias.py"}]}
3,619
588
gh_patches_debug_31054
rasdani/github-patches
git_diff
robocorp__rpaframework-662
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `RPA.core.webdriver` used by Selenium can't download latest Mac arm64 webdriver 💻 System affected: M1/M2 Macs with no working `chromedriver` available in PATH under Chrome browser version **106.0.5249.61** and onwards. [Fix](https://github.com/SergeyPirogov/webdriver_manager/issues/446#issuecomment-1274558712) is already merged upstream but has not yet been released in PyPI. ### Reproduce ```python inv code.test-robot -r browser -t "Open Browser With Dict Options" ``` ### Solution The desired fix would be to bump the `webdriver-manager` package `>3.8.3` once such version gets released. ### Workaround Meanwhile, we can implement the [workaround](https://github.com/SergeyPirogov/webdriver_manager/pull/445/files#diff-38d3c158b7a31027012330b5537df014f3ec50ffa657a2787d449bd3e3c9367bL32-R33) ourselves too as described above (`mac64_m1` -> `mac_arm64`) so we can hit the right web driver download URL. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `packages/core/src/RPA/core/webdriver.py` Content: ``` 1 import contextlib 2 import logging 3 import os 4 import platform 5 import stat 6 from pathlib import Path 7 from typing import Optional 8 9 import requests 10 from requests import Response 11 from selenium import webdriver 12 from selenium.webdriver.common.service import Service 13 from selenium.webdriver.remote.webdriver import WebDriver 14 from webdriver_manager.chrome import ChromeDriverManager 15 from webdriver_manager.core.download_manager import WDMDownloadManager 16 from webdriver_manager.core.http import WDMHttpClient 17 from webdriver_manager.core.manager import DriverManager 18 from webdriver_manager.core.utils import os_name as get_os_name 19 from webdriver_manager.firefox import GeckoDriverManager 20 from webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager 21 from webdriver_manager.opera import OperaDriverManager 22 23 from RPA.core.robocorp import robocorp_home 24 25 26 LOGGER = logging.getLogger(__name__) 27 28 DRIVER_ROOT = robocorp_home() / "webdrivers" 29 DRIVER_PREFERENCE = { 30 "Windows": ["Chrome", "Firefox", "ChromiumEdge"], 31 "Linux": ["Chrome", "Firefox", "ChromiumEdge"], 32 "Darwin": ["Chrome", "Firefox", "ChromiumEdge", "Safari"], 33 "default": ["Chrome", "Firefox"], 34 } 35 AVAILABLE_DRIVERS = { 36 # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`. 37 "chrome": ChromeDriverManager, 38 "firefox": GeckoDriverManager, 39 "gecko": GeckoDriverManager, 40 "mozilla": GeckoDriverManager, 41 # NOTE: Selenium 4 dropped support for Opera. 42 # (https://github.com/SeleniumHQ/selenium/issues/10835) 43 "opera": OperaDriverManager, 44 # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`. 45 "edge": EdgeChromiumDriverManager, 46 "chromiumedge": EdgeChromiumDriverManager, 47 # NOTE: IE is discontinued and not supported/encouraged anymore. 48 "ie": IEDriverManager, 49 } 50 51 52 class Downloader(WDMHttpClient): 53 54 """Custom downloader which disables download progress reporting.""" 55 56 def get(self, url, **kwargs) -> Response: 57 resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs) 58 self.validate_response(resp) 59 return resp 60 61 62 @contextlib.contextmanager 63 def suppress_logging(): 64 """Suppress webdriver-manager logging.""" 65 wdm_log = "WDM_LOG" 66 original_value = os.getenv(wdm_log, "") 67 try: 68 os.environ[wdm_log] = str(logging.NOTSET) 69 yield 70 finally: 71 os.environ[wdm_log] = original_value 72 73 74 def start(browser: str, service: Optional[Service] = None, **options) -> WebDriver: 75 """Start a webdriver with the given options.""" 76 browser = browser.strip() 77 webdriver_factory = getattr(webdriver, browser, None) 78 if not webdriver_factory: 79 raise ValueError(f"Unsupported browser: {browser}") 80 81 # NOTE: It is recommended to pass a `service` rather than deprecated `options`. 82 driver = webdriver_factory(service=service, **options) 83 return driver 84 85 86 def _to_manager(browser: str, root: Path = DRIVER_ROOT) -> DriverManager: 87 browser = browser.strip() 88 manager_factory = AVAILABLE_DRIVERS.get(browser.lower()) 89 if not manager_factory: 90 raise ValueError( 91 f"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})" 92 ) 93 94 download_manager = WDMDownloadManager(Downloader()) 95 manager = manager_factory(path=str(root), download_manager=download_manager) 96 return manager 97 98 99 def _set_executable(path: str) -> None: 100 st = os.stat(path) 101 os.chmod( 102 path, 103 st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC, 104 ) 105 106 107 def download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]: 108 """Download a webdriver binary for the given browser and return the path to it.""" 109 manager = _to_manager(browser, root) 110 driver = manager.driver 111 resolved_os = getattr(driver, "os_type", driver.get_os_type()) 112 os_name = get_os_name() 113 if os_name not in resolved_os: 114 LOGGER.warning( 115 "Attempting to download incompatible driver for OS %r on OS %r! Skip", 116 resolved_os, 117 os_name, 118 ) 119 return None # incompatible driver download attempt 120 121 with suppress_logging(): 122 path: str = manager.install() 123 if platform.system() != "Windows": 124 _set_executable(path) 125 LOGGER.debug("Downloaded webdriver to: %s", path) 126 return path 127 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/packages/core/src/RPA/core/webdriver.py b/packages/core/src/RPA/core/webdriver.py --- a/packages/core/src/RPA/core/webdriver.py +++ b/packages/core/src/RPA/core/webdriver.py @@ -7,6 +7,7 @@ from typing import Optional import requests +from packaging import version from requests import Response from selenium import webdriver from selenium.webdriver.common.service import Service @@ -53,7 +54,23 @@ """Custom downloader which disables download progress reporting.""" + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.driver = None + + def _fix_mac_arm_url(self, url) -> str: + if "m1" not in self.driver.get_os_type(): + return url + + # FIXME(cmin764): Remove this when the issue below gets closed + # https://github.com/SergeyPirogov/webdriver_manager/issues/446 + browser_version = self.driver.get_version() + if version.parse(browser_version) >= version.parse("106.0.5249.61"): + url = url.replace("mac64_m1", "mac_arm64") + return url + def get(self, url, **kwargs) -> Response: + url = self._fix_mac_arm_url(url) resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs) self.validate_response(resp) return resp @@ -91,8 +108,10 @@ f"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})" ) - download_manager = WDMDownloadManager(Downloader()) + downloader = Downloader() + download_manager = WDMDownloadManager(downloader) manager = manager_factory(path=str(root), download_manager=download_manager) + downloader.driver = manager.driver return manager
{"golden_diff": "diff --git a/packages/core/src/RPA/core/webdriver.py b/packages/core/src/RPA/core/webdriver.py\n--- a/packages/core/src/RPA/core/webdriver.py\n+++ b/packages/core/src/RPA/core/webdriver.py\n@@ -7,6 +7,7 @@\n from typing import Optional\n \n import requests\n+from packaging import version\n from requests import Response\n from selenium import webdriver\n from selenium.webdriver.common.service import Service\n@@ -53,7 +54,23 @@\n \n \"\"\"Custom downloader which disables download progress reporting.\"\"\"\n \n+ def __init__(self, *args, **kwargs):\n+ super().__init__(*args, **kwargs)\n+ self.driver = None\n+\n+ def _fix_mac_arm_url(self, url) -> str:\n+ if \"m1\" not in self.driver.get_os_type():\n+ return url\n+\n+ # FIXME(cmin764): Remove this when the issue below gets closed\n+ # https://github.com/SergeyPirogov/webdriver_manager/issues/446\n+ browser_version = self.driver.get_version()\n+ if version.parse(browser_version) >= version.parse(\"106.0.5249.61\"):\n+ url = url.replace(\"mac64_m1\", \"mac_arm64\")\n+ return url\n+\n def get(self, url, **kwargs) -> Response:\n+ url = self._fix_mac_arm_url(url)\n resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)\n self.validate_response(resp)\n return resp\n@@ -91,8 +108,10 @@\n f\"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})\"\n )\n \n- download_manager = WDMDownloadManager(Downloader())\n+ downloader = Downloader()\n+ download_manager = WDMDownloadManager(downloader)\n manager = manager_factory(path=str(root), download_manager=download_manager)\n+ downloader.driver = manager.driver\n return manager\n", "issue": "`RPA.core.webdriver` used by Selenium can't download latest Mac arm64 webdriver\n\ud83d\udcbb System affected: M1/M2 Macs with no working `chromedriver` available in PATH under Chrome browser version **106.0.5249.61** and onwards.\r\n\r\n[Fix](https://github.com/SergeyPirogov/webdriver_manager/issues/446#issuecomment-1274558712) is already merged upstream but has not yet been released in PyPI.\r\n\r\n### Reproduce\r\n\r\n```python\r\ninv code.test-robot -r browser -t \"Open Browser With Dict Options\"\r\n```\r\n\r\n### Solution\r\nThe desired fix would be to bump the `webdriver-manager` package `>3.8.3` once such version gets released.\r\n\r\n### Workaround\r\nMeanwhile, we can implement the [workaround](https://github.com/SergeyPirogov/webdriver_manager/pull/445/files#diff-38d3c158b7a31027012330b5537df014f3ec50ffa657a2787d449bd3e3c9367bL32-R33) ourselves too as described above (`mac64_m1` -> `mac_arm64`) so we can hit the right web driver download URL.\n", "before_files": [{"content": "import contextlib\nimport logging\nimport os\nimport platform\nimport stat\nfrom pathlib import Path\nfrom typing import Optional\n\nimport requests\nfrom requests import Response\nfrom selenium import webdriver\nfrom selenium.webdriver.common.service import Service\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.core.download_manager import WDMDownloadManager\nfrom webdriver_manager.core.http import WDMHttpClient\nfrom webdriver_manager.core.manager import DriverManager\nfrom webdriver_manager.core.utils import os_name as get_os_name\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager\nfrom webdriver_manager.opera import OperaDriverManager\n\nfrom RPA.core.robocorp import robocorp_home\n\n\nLOGGER = logging.getLogger(__name__)\n\nDRIVER_ROOT = robocorp_home() / \"webdrivers\"\nDRIVER_PREFERENCE = {\n \"Windows\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\"],\n \"Linux\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\"],\n \"Darwin\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\", \"Safari\"],\n \"default\": [\"Chrome\", \"Firefox\"],\n}\nAVAILABLE_DRIVERS = {\n # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.\n \"chrome\": ChromeDriverManager,\n \"firefox\": GeckoDriverManager,\n \"gecko\": GeckoDriverManager,\n \"mozilla\": GeckoDriverManager,\n # NOTE: Selenium 4 dropped support for Opera.\n # (https://github.com/SeleniumHQ/selenium/issues/10835)\n \"opera\": OperaDriverManager,\n # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.\n \"edge\": EdgeChromiumDriverManager,\n \"chromiumedge\": EdgeChromiumDriverManager,\n # NOTE: IE is discontinued and not supported/encouraged anymore.\n \"ie\": IEDriverManager,\n}\n\n\nclass Downloader(WDMHttpClient):\n\n \"\"\"Custom downloader which disables download progress reporting.\"\"\"\n\n def get(self, url, **kwargs) -> Response:\n resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)\n self.validate_response(resp)\n return resp\n\n\[email protected]\ndef suppress_logging():\n \"\"\"Suppress webdriver-manager logging.\"\"\"\n wdm_log = \"WDM_LOG\"\n original_value = os.getenv(wdm_log, \"\")\n try:\n os.environ[wdm_log] = str(logging.NOTSET)\n yield\n finally:\n os.environ[wdm_log] = original_value\n\n\ndef start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:\n \"\"\"Start a webdriver with the given options.\"\"\"\n browser = browser.strip()\n webdriver_factory = getattr(webdriver, browser, None)\n if not webdriver_factory:\n raise ValueError(f\"Unsupported browser: {browser}\")\n\n # NOTE: It is recommended to pass a `service` rather than deprecated `options`.\n driver = webdriver_factory(service=service, **options)\n return driver\n\n\ndef _to_manager(browser: str, root: Path = DRIVER_ROOT) -> DriverManager:\n browser = browser.strip()\n manager_factory = AVAILABLE_DRIVERS.get(browser.lower())\n if not manager_factory:\n raise ValueError(\n f\"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})\"\n )\n\n download_manager = WDMDownloadManager(Downloader())\n manager = manager_factory(path=str(root), download_manager=download_manager)\n return manager\n\n\ndef _set_executable(path: str) -> None:\n st = os.stat(path)\n os.chmod(\n path,\n st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,\n )\n\n\ndef download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:\n \"\"\"Download a webdriver binary for the given browser and return the path to it.\"\"\"\n manager = _to_manager(browser, root)\n driver = manager.driver\n resolved_os = getattr(driver, \"os_type\", driver.get_os_type())\n os_name = get_os_name()\n if os_name not in resolved_os:\n LOGGER.warning(\n \"Attempting to download incompatible driver for OS %r on OS %r! Skip\",\n resolved_os,\n os_name,\n )\n return None # incompatible driver download attempt\n\n with suppress_logging():\n path: str = manager.install()\n if platform.system() != \"Windows\":\n _set_executable(path)\n LOGGER.debug(\"Downloaded webdriver to: %s\", path)\n return path\n", "path": "packages/core/src/RPA/core/webdriver.py"}], "after_files": [{"content": "import contextlib\nimport logging\nimport os\nimport platform\nimport stat\nfrom pathlib import Path\nfrom typing import Optional\n\nimport requests\nfrom packaging import version\nfrom requests import Response\nfrom selenium import webdriver\nfrom selenium.webdriver.common.service import Service\nfrom selenium.webdriver.remote.webdriver import WebDriver\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom webdriver_manager.core.download_manager import WDMDownloadManager\nfrom webdriver_manager.core.http import WDMHttpClient\nfrom webdriver_manager.core.manager import DriverManager\nfrom webdriver_manager.core.utils import os_name as get_os_name\nfrom webdriver_manager.firefox import GeckoDriverManager\nfrom webdriver_manager.microsoft import EdgeChromiumDriverManager, IEDriverManager\nfrom webdriver_manager.opera import OperaDriverManager\n\nfrom RPA.core.robocorp import robocorp_home\n\n\nLOGGER = logging.getLogger(__name__)\n\nDRIVER_ROOT = robocorp_home() / \"webdrivers\"\nDRIVER_PREFERENCE = {\n \"Windows\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\"],\n \"Linux\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\"],\n \"Darwin\": [\"Chrome\", \"Firefox\", \"ChromiumEdge\", \"Safari\"],\n \"default\": [\"Chrome\", \"Firefox\"],\n}\nAVAILABLE_DRIVERS = {\n # Driver names taken from `webdrivermanager` and adapted to `webdriver_manager`.\n \"chrome\": ChromeDriverManager,\n \"firefox\": GeckoDriverManager,\n \"gecko\": GeckoDriverManager,\n \"mozilla\": GeckoDriverManager,\n # NOTE: Selenium 4 dropped support for Opera.\n # (https://github.com/SeleniumHQ/selenium/issues/10835)\n \"opera\": OperaDriverManager,\n # NOTE: In Selenium 4 `Edge` is the same with `ChromiumEdge`.\n \"edge\": EdgeChromiumDriverManager,\n \"chromiumedge\": EdgeChromiumDriverManager,\n # NOTE: IE is discontinued and not supported/encouraged anymore.\n \"ie\": IEDriverManager,\n}\n\n\nclass Downloader(WDMHttpClient):\n\n \"\"\"Custom downloader which disables download progress reporting.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.driver = None\n\n def _fix_mac_arm_url(self, url) -> str:\n if \"m1\" not in self.driver.get_os_type():\n return url\n\n # FIXME(cmin764): Remove this when the issue below gets closed\n # https://github.com/SergeyPirogov/webdriver_manager/issues/446\n browser_version = self.driver.get_version()\n if version.parse(browser_version) >= version.parse(\"106.0.5249.61\"):\n url = url.replace(\"mac64_m1\", \"mac_arm64\")\n return url\n\n def get(self, url, **kwargs) -> Response:\n url = self._fix_mac_arm_url(url)\n resp = requests.get(url=url, verify=self._ssl_verify, stream=True, **kwargs)\n self.validate_response(resp)\n return resp\n\n\[email protected]\ndef suppress_logging():\n \"\"\"Suppress webdriver-manager logging.\"\"\"\n wdm_log = \"WDM_LOG\"\n original_value = os.getenv(wdm_log, \"\")\n try:\n os.environ[wdm_log] = str(logging.NOTSET)\n yield\n finally:\n os.environ[wdm_log] = original_value\n\n\ndef start(browser: str, service: Optional[Service] = None, **options) -> WebDriver:\n \"\"\"Start a webdriver with the given options.\"\"\"\n browser = browser.strip()\n webdriver_factory = getattr(webdriver, browser, None)\n if not webdriver_factory:\n raise ValueError(f\"Unsupported browser: {browser}\")\n\n # NOTE: It is recommended to pass a `service` rather than deprecated `options`.\n driver = webdriver_factory(service=service, **options)\n return driver\n\n\ndef _to_manager(browser: str, root: Path = DRIVER_ROOT) -> DriverManager:\n browser = browser.strip()\n manager_factory = AVAILABLE_DRIVERS.get(browser.lower())\n if not manager_factory:\n raise ValueError(\n f\"Unsupported browser {browser!r}! (choose from: {list(AVAILABLE_DRIVERS)})\"\n )\n\n downloader = Downloader()\n download_manager = WDMDownloadManager(downloader)\n manager = manager_factory(path=str(root), download_manager=download_manager)\n downloader.driver = manager.driver\n return manager\n\n\ndef _set_executable(path: str) -> None:\n st = os.stat(path)\n os.chmod(\n path,\n st.st_mode | stat.S_IXOTH | stat.S_IXGRP | stat.S_IEXEC,\n )\n\n\ndef download(browser: str, root: Path = DRIVER_ROOT) -> Optional[str]:\n \"\"\"Download a webdriver binary for the given browser and return the path to it.\"\"\"\n manager = _to_manager(browser, root)\n driver = manager.driver\n resolved_os = getattr(driver, \"os_type\", driver.get_os_type())\n os_name = get_os_name()\n if os_name not in resolved_os:\n LOGGER.warning(\n \"Attempting to download incompatible driver for OS %r on OS %r! Skip\",\n resolved_os,\n os_name,\n )\n return None # incompatible driver download attempt\n\n with suppress_logging():\n path: str = manager.install()\n if platform.system() != \"Windows\":\n _set_executable(path)\n LOGGER.debug(\"Downloaded webdriver to: %s\", path)\n return path\n", "path": "packages/core/src/RPA/core/webdriver.py"}]}
1,824
441
gh_patches_debug_7037
rasdani/github-patches
git_diff
DataDog__dd-trace-py-1080
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Exception raised with sizeof of some spans With `master`: ``` Exception in thread AgentWriter: Traceback (most recent call last): File "/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py", line 801, in __bootstrap_inner self.run() File "/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py", line 754, in run self.__target(*self.__args, **self.__kwargs) File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/_worker.py", line 67, in _target self.run_periodic() File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/internal/writer.py", line 65, in flush_queue traces_queue_size = sum(map(sizeof.sizeof, traces)) File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 31, in sizeof return _sizeof(o) File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o))) File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o))) File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o))) File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o))) File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 29, in _sizeof return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o))) File "/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py", line 8, in <genexpr> return (getattr(o, slot) for slot in o.__slots__) AttributeError: rate_limit ``` Might be an issue where the attribute is declared in `__slots__` but not set. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ddtrace/utils/sizeof.py` Content: ``` 1 import collections 2 import sys 3 from itertools import chain 4 5 6 def iter_object(o): 7 if hasattr(o, '__slots__'): 8 return (getattr(o, slot) for slot in o.__slots__) 9 elif hasattr(o, '__dict__'): 10 return list(o.__dict__.items()) 11 elif isinstance(o, dict): 12 # Make a copy to avoid corruption 13 return chain.from_iterable(list(o.items())) 14 elif isinstance(o, (list, set, frozenset, tuple, collections.deque)): 15 # Make a copy to avoid corruption 16 return iter(list(o)) 17 return [] 18 19 20 def sizeof(o): 21 """Returns the approximate memory footprint an object and all of its contents.""" 22 seen = set() 23 24 def _sizeof(o): 25 # do not double count the same object 26 if id(o) in seen: 27 return 0 28 seen.add(id(o)) 29 return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o))) 30 31 return _sizeof(o) 32 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ddtrace/utils/sizeof.py b/ddtrace/utils/sizeof.py --- a/ddtrace/utils/sizeof.py +++ b/ddtrace/utils/sizeof.py @@ -2,10 +2,16 @@ import sys from itertools import chain +_UNSET = object() + def iter_object(o): if hasattr(o, '__slots__'): - return (getattr(o, slot) for slot in o.__slots__) + return ( + s + for s in (getattr(o, slot, _UNSET) for slot in o.__slots__) + if s != _UNSET + ) elif hasattr(o, '__dict__'): return list(o.__dict__.items()) elif isinstance(o, dict):
{"golden_diff": "diff --git a/ddtrace/utils/sizeof.py b/ddtrace/utils/sizeof.py\n--- a/ddtrace/utils/sizeof.py\n+++ b/ddtrace/utils/sizeof.py\n@@ -2,10 +2,16 @@\n import sys\n from itertools import chain\n \n+_UNSET = object()\n+\n \n def iter_object(o):\n if hasattr(o, '__slots__'):\n- return (getattr(o, slot) for slot in o.__slots__)\n+ return (\n+ s\n+ for s in (getattr(o, slot, _UNSET) for slot in o.__slots__)\n+ if s != _UNSET\n+ )\n elif hasattr(o, '__dict__'):\n return list(o.__dict__.items())\n elif isinstance(o, dict):\n", "issue": "Exception raised with sizeof of some spans\nWith `master`:\r\n\r\n```\r\nException in thread AgentWriter:\r\nTraceback (most recent call last):\r\n File \"/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py\", line 801, in __bootstrap_inner\r\n self.run()\r\n File \"/opt/dogweb/pyenv/versions/2.7.12/lib/python2.7/threading.py\", line 754, in run\r\n self.__target(*self.__args, **self.__kwargs)\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/_worker.py\", line 67, in _target\r\n self.run_periodic()\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/internal/writer.py\", line 65, in flush_queue\r\n traces_queue_size = sum(map(sizeof.sizeof, traces))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 31, in sizeof\r\n return _sizeof(o)\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 29, in _sizeof\r\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\r\n File \"/opt/dogweb/lib/python2.7/site-packages/ddtrace/utils/sizeof.py\", line 8, in <genexpr>\r\n return (getattr(o, slot) for slot in o.__slots__)\r\nAttributeError: rate_limit\r\n```\r\n\r\nMight be an issue where the attribute is declared in `__slots__` but not set.\n", "before_files": [{"content": "import collections\nimport sys\nfrom itertools import chain\n\n\ndef iter_object(o):\n if hasattr(o, '__slots__'):\n return (getattr(o, slot) for slot in o.__slots__)\n elif hasattr(o, '__dict__'):\n return list(o.__dict__.items())\n elif isinstance(o, dict):\n # Make a copy to avoid corruption\n return chain.from_iterable(list(o.items()))\n elif isinstance(o, (list, set, frozenset, tuple, collections.deque)):\n # Make a copy to avoid corruption\n return iter(list(o))\n return []\n\n\ndef sizeof(o):\n \"\"\"Returns the approximate memory footprint an object and all of its contents.\"\"\"\n seen = set()\n\n def _sizeof(o):\n # do not double count the same object\n if id(o) in seen:\n return 0\n seen.add(id(o))\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\n\n return _sizeof(o)\n", "path": "ddtrace/utils/sizeof.py"}], "after_files": [{"content": "import collections\nimport sys\nfrom itertools import chain\n\n_UNSET = object()\n\n\ndef iter_object(o):\n if hasattr(o, '__slots__'):\n return (\n s\n for s in (getattr(o, slot, _UNSET) for slot in o.__slots__)\n if s != _UNSET\n )\n elif hasattr(o, '__dict__'):\n return list(o.__dict__.items())\n elif isinstance(o, dict):\n # Make a copy to avoid corruption\n return chain.from_iterable(list(o.items()))\n elif isinstance(o, (list, set, frozenset, tuple, collections.deque)):\n # Make a copy to avoid corruption\n return iter(list(o))\n return []\n\n\ndef sizeof(o):\n \"\"\"Returns the approximate memory footprint an object and all of its contents.\"\"\"\n seen = set()\n\n def _sizeof(o):\n # do not double count the same object\n if id(o) in seen:\n return 0\n seen.add(id(o))\n return sys.getsizeof(o) + sum(map(_sizeof, iter_object(o)))\n\n return _sizeof(o)\n", "path": "ddtrace/utils/sizeof.py"}]}
1,068
161
gh_patches_debug_29780
rasdani/github-patches
git_diff
apluslms__a-plus-1045
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Problems with Enrollment questionnaire and SISU enrollments Quite recently a new "PENDING" enrollment state was added to support enrollment questionnaires with courses where students are enrolled from SISU. On summer Y1 course this feature was first time in use in production, but does not appear to work properly. Students are set to PENDING state properly when fetched from SISU, but are not redirected to enrollment questionnaire properly as they should. Instead they get "You must enrol on this course through Student Information System." note which should not be shown in this case. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `course/viewbase.py` Content: ``` 1 from django.contrib import messages 2 from django.core.exceptions import PermissionDenied 3 from django.http import Http404 4 from django.shortcuts import get_object_or_404, redirect, render 5 from django.utils import translation 6 from django.utils.translation import gettext_lazy as _ 7 from django.utils.translation import get_language, get_language_info 8 9 from authorization.permissions import ACCESS 10 from exercise.cache.content import CachedContent 11 from lib.helpers import remove_query_param_from_url, update_url_params 12 from lib.viewbase import BaseTemplateView 13 from userprofile.viewbase import UserProfileMixin 14 from .cache.students import CachedStudent 15 from .exceptions import TranslationNotFound 16 from .permissions import ( 17 CourseVisiblePermission, 18 CourseModulePermission, 19 ) 20 from .models import Course, CourseInstance, CourseModule, UserTagging 21 22 23 class CourseMixin(UserProfileMixin): 24 course_kw = "course_slug" 25 26 def get_resource_objects(self): 27 super().get_resource_objects() 28 self.course = get_object_or_404( 29 Course, 30 url=self._get_kwarg(self.course_kw) 31 ) 32 self.note("course") 33 34 35 class CourseBaseView(CourseMixin, BaseTemplateView): 36 pass 37 38 39 class CourseInstanceBaseMixin(object): 40 course_kw = CourseMixin.course_kw 41 instance_kw = "instance_slug" 42 course_permission_classes = ( 43 CourseVisiblePermission, 44 ) 45 46 def get_permissions(self): 47 perms = super().get_permissions() 48 perms.extend((Perm() for Perm in self.course_permission_classes)) 49 return perms 50 51 # get_course_instance_object 52 53 def get_resource_objects(self): 54 super().get_resource_objects() 55 user = self.request.user 56 instance = self.get_course_instance_object() 57 if instance is not None: 58 self.instance = instance 59 self.course = self.instance.course 60 self.content = CachedContent(self.instance) 61 self.user_course_data = None 62 is_real_user = user.is_authenticated and not user.is_anonymous 63 if is_real_user: 64 self.user_course_data = self.instance.get_enrollment_for(user) 65 self.is_student = self.instance.is_student(user) 66 self.is_assistant = self.instance.is_assistant(user) 67 self.is_teacher = self.instance.is_teacher(user) 68 self.is_course_staff = self.is_teacher or self.is_assistant 69 self.get_taggings = lambda: CachedStudent(instance, user.id).data['tag_slugs'] 70 self.url_without_language = remove_query_param_from_url(self.request.get_full_path(), 'hl') 71 self.query_language = None 72 self.user_language = None 73 74 self.note( 75 "course", "instance", "content", "user_course_data", "is_student", "is_assistant", 76 "is_teacher", "is_course_staff", "get_taggings", "url_without_language", 77 "query_language", "user_language" 78 ) 79 80 # Try to find a language that is defined for this course instance 81 # and apply it 82 if self.instance.language: 83 instance_languages = self.instance.language.strip('|').split('|') 84 instance_def_language = instance_languages[0] 85 instance_languages = set(instance_languages) 86 87 languages = [] 88 if self.user_course_data and self.user_course_data.language: 89 languages.append(self.user_course_data.language) 90 if is_real_user and user.userprofile.language: 91 languages.append(user.userprofile.language) 92 languages.append(get_language()) 93 94 query_language = self.request.GET.get('hl') 95 if query_language: 96 if query_language[:2] in instance_languages: 97 language = query_language 98 if languages: 99 self.user_language = languages[0] 100 if self.user_language[:2] != query_language[:2]: 101 self.query_language = query_language 102 else: 103 raise TranslationNotFound 104 else: 105 for lang in languages: 106 if lang[:2] in instance_languages: 107 language = lang 108 break 109 else: 110 language = instance_def_language 111 112 language = language[:2] 113 # Override request.LANGUAGE_CODE. It is set in lib/middleware.py 114 # (class LocaleMiddleware) based on the userprofile.language. 115 # The middleware can not easily access the course context and 116 # the language from the enrollment. That is fixed here. 117 self.request.LANGUAGE_CODE = language 118 translation.activate(language) 119 120 def get_access_mode(self): 121 access_mode = super().get_access_mode() 122 123 if hasattr(self, 'instance'): 124 # Loosen the access mode if instance is public 125 show_for = self.instance.view_content_to 126 is_public = show_for == CourseInstance.VIEW_ACCESS.PUBLIC 127 access_mode_student = access_mode in (ACCESS.STUDENT, ACCESS.ENROLL) 128 if is_public and access_mode_student: 129 access_mode = ACCESS.ANONYMOUS 130 131 return access_mode 132 133 def handle_exception(self, exc): 134 if isinstance(exc, TranslationNotFound): 135 instance_languages = self.instance.language.strip("|").split("|") 136 url = remove_query_param_from_url(self.request.get_full_path(), 'hl') 137 for i, lang in enumerate(instance_languages): 138 instance_languages[i] = {"name": get_language_info(lang)['name'], "url": update_url_params(url, {'hl' : lang})} 139 return render(self.request, '404.html', {'error_msg': str(exc), 'languages': instance_languages}, status=404) 140 return super().handle_exception(exc) 141 142 class CourseInstanceMixin(CourseInstanceBaseMixin, UserProfileMixin): 143 def get_course_instance_object(self) -> CourseInstance: 144 return get_object_or_404( 145 CourseInstance.objects.prefetch_related('tabs'), 146 url=self.kwargs[self.instance_kw], 147 course__url=self.kwargs[self.course_kw], 148 ) 149 150 def handle_no_permission(self): 151 if (self.request.user.is_authenticated 152 and not self.is_student 153 and not self.is_course_staff 154 and self.get_access_mode() in [ACCESS.STUDENT, ACCESS.ENROLLED] 155 and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED): 156 # Redirect the user to the enrollment page instead of showing 157 # a 403 Forbidden error, if: 158 # - the user is signed in but not enrolled or staff 159 # - the page is not a teacher page (e.g. edit course) 160 # - the course is visible only to enrolled students 161 return redirect(self.instance.get_url('enroll')) 162 return super().handle_no_permission() 163 164 165 class CourseInstanceBaseView(CourseInstanceMixin, BaseTemplateView): 166 pass 167 168 169 class EnrollableViewMixin(CourseInstanceMixin): 170 access_mode = ACCESS.ENROLL 171 172 def get_common_objects(self): 173 self.enrolled = self.is_student 174 self.enrollable = ( 175 self.profile 176 and self.instance.is_enrollable(self.profile.user) 177 ) 178 self.note('enrolled', 'enrollable') 179 180 181 class CourseModuleBaseMixin(object): 182 module_kw = "module_slug" 183 module_permissions_classes = ( 184 CourseModulePermission, 185 ) 186 187 def get_permissions(self): 188 perms = super().get_permissions() 189 perms.extend((Perm() for Perm in self.module_permissions_classes)) 190 return perms 191 192 # get_course_module_object 193 194 def get_resource_objects(self): 195 super().get_resource_objects() 196 self.module = self.get_course_module_object() 197 self.note("module") 198 199 200 class CourseModuleMixin(CourseModuleBaseMixin, CourseInstanceMixin): 201 def get_course_module_object(self): 202 return get_object_or_404( 203 CourseModule, 204 url=self.kwargs[self.module_kw], 205 course_instance=self.instance 206 ) 207 208 209 class CourseModuleBaseView(CourseModuleMixin, BaseTemplateView): 210 pass 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/course/viewbase.py b/course/viewbase.py --- a/course/viewbase.py +++ b/course/viewbase.py @@ -11,13 +11,14 @@ from lib.helpers import remove_query_param_from_url, update_url_params from lib.viewbase import BaseTemplateView from userprofile.viewbase import UserProfileMixin +from exercise.models import LearningObject from .cache.students import CachedStudent from .exceptions import TranslationNotFound from .permissions import ( CourseVisiblePermission, CourseModulePermission, ) -from .models import Course, CourseInstance, CourseModule, UserTagging +from .models import Course, CourseInstance, CourseModule, UserTagging, Enrollment class CourseMixin(UserProfileMixin): @@ -158,6 +159,15 @@ # - the user is signed in but not enrolled or staff # - the page is not a teacher page (e.g. edit course) # - the course is visible only to enrolled students + # + # If SIS enrollment is applied and course requires enrollment questionnaire, + # redirect to the questionnaire instead. + enrollment = self.user_course_data + if enrollment and enrollment.status == Enrollment.ENROLLMENT_STATUS.PENDING: + exercise = LearningObject.objects.find_enrollment_exercise( + self.instance, self.profile.is_external) + if exercise: + return self.redirect(exercise.get_absolute_url()) return redirect(self.instance.get_url('enroll')) return super().handle_no_permission()
{"golden_diff": "diff --git a/course/viewbase.py b/course/viewbase.py\n--- a/course/viewbase.py\n+++ b/course/viewbase.py\n@@ -11,13 +11,14 @@\n from lib.helpers import remove_query_param_from_url, update_url_params\n from lib.viewbase import BaseTemplateView\n from userprofile.viewbase import UserProfileMixin\n+from exercise.models import LearningObject\n from .cache.students import CachedStudent\n from .exceptions import TranslationNotFound\n from .permissions import (\n CourseVisiblePermission,\n CourseModulePermission,\n )\n-from .models import Course, CourseInstance, CourseModule, UserTagging\n+from .models import Course, CourseInstance, CourseModule, UserTagging, Enrollment\n \n \n class CourseMixin(UserProfileMixin):\n@@ -158,6 +159,15 @@\n # - the user is signed in but not enrolled or staff\n # - the page is not a teacher page (e.g. edit course)\n # - the course is visible only to enrolled students\n+ #\n+ # If SIS enrollment is applied and course requires enrollment questionnaire,\n+ # redirect to the questionnaire instead.\n+ enrollment = self.user_course_data\n+ if enrollment and enrollment.status == Enrollment.ENROLLMENT_STATUS.PENDING:\n+ exercise = LearningObject.objects.find_enrollment_exercise(\n+ self.instance, self.profile.is_external)\n+ if exercise:\n+ return self.redirect(exercise.get_absolute_url())\n return redirect(self.instance.get_url('enroll'))\n return super().handle_no_permission()\n", "issue": "Problems with Enrollment questionnaire and SISU enrollments\nQuite recently a new \"PENDING\" enrollment state was added to support enrollment questionnaires with courses where students are enrolled from SISU. On summer Y1 course this feature was first time in use in production, but does not appear to work properly. Students are set to PENDING state properly when fetched from SISU, but are not redirected to enrollment questionnaire properly as they should. Instead they get \"You must enrol on this course through Student Information System.\" note which should not be shown in this case.\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import translation\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import get_language, get_language_info\n\nfrom authorization.permissions import ACCESS\nfrom exercise.cache.content import CachedContent\nfrom lib.helpers import remove_query_param_from_url, update_url_params\nfrom lib.viewbase import BaseTemplateView\nfrom userprofile.viewbase import UserProfileMixin\nfrom .cache.students import CachedStudent\nfrom .exceptions import TranslationNotFound\nfrom .permissions import (\n CourseVisiblePermission,\n CourseModulePermission,\n)\nfrom .models import Course, CourseInstance, CourseModule, UserTagging\n\n\nclass CourseMixin(UserProfileMixin):\n course_kw = \"course_slug\"\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.course = get_object_or_404(\n Course,\n url=self._get_kwarg(self.course_kw)\n )\n self.note(\"course\")\n\n\nclass CourseBaseView(CourseMixin, BaseTemplateView):\n pass\n\n\nclass CourseInstanceBaseMixin(object):\n course_kw = CourseMixin.course_kw\n instance_kw = \"instance_slug\"\n course_permission_classes = (\n CourseVisiblePermission,\n )\n\n def get_permissions(self):\n perms = super().get_permissions()\n perms.extend((Perm() for Perm in self.course_permission_classes))\n return perms\n\n # get_course_instance_object\n\n def get_resource_objects(self):\n super().get_resource_objects()\n user = self.request.user\n instance = self.get_course_instance_object()\n if instance is not None:\n self.instance = instance\n self.course = self.instance.course\n self.content = CachedContent(self.instance)\n self.user_course_data = None\n is_real_user = user.is_authenticated and not user.is_anonymous\n if is_real_user:\n self.user_course_data = self.instance.get_enrollment_for(user)\n self.is_student = self.instance.is_student(user)\n self.is_assistant = self.instance.is_assistant(user)\n self.is_teacher = self.instance.is_teacher(user)\n self.is_course_staff = self.is_teacher or self.is_assistant\n self.get_taggings = lambda: CachedStudent(instance, user.id).data['tag_slugs']\n self.url_without_language = remove_query_param_from_url(self.request.get_full_path(), 'hl')\n self.query_language = None\n self.user_language = None\n\n self.note(\n \"course\", \"instance\", \"content\", \"user_course_data\", \"is_student\", \"is_assistant\",\n \"is_teacher\", \"is_course_staff\", \"get_taggings\", \"url_without_language\",\n \"query_language\", \"user_language\"\n )\n\n # Try to find a language that is defined for this course instance\n # and apply it\n if self.instance.language:\n instance_languages = self.instance.language.strip('|').split('|')\n instance_def_language = instance_languages[0]\n instance_languages = set(instance_languages)\n\n languages = []\n if self.user_course_data and self.user_course_data.language:\n languages.append(self.user_course_data.language)\n if is_real_user and user.userprofile.language:\n languages.append(user.userprofile.language)\n languages.append(get_language())\n\n query_language = self.request.GET.get('hl')\n if query_language:\n if query_language[:2] in instance_languages:\n language = query_language\n if languages:\n self.user_language = languages[0]\n if self.user_language[:2] != query_language[:2]:\n self.query_language = query_language\n else:\n raise TranslationNotFound\n else:\n for lang in languages:\n if lang[:2] in instance_languages:\n language = lang\n break\n else:\n language = instance_def_language\n\n language = language[:2]\n # Override request.LANGUAGE_CODE. It is set in lib/middleware.py\n # (class LocaleMiddleware) based on the userprofile.language.\n # The middleware can not easily access the course context and\n # the language from the enrollment. That is fixed here.\n self.request.LANGUAGE_CODE = language\n translation.activate(language)\n\n def get_access_mode(self):\n access_mode = super().get_access_mode()\n\n if hasattr(self, 'instance'):\n # Loosen the access mode if instance is public\n show_for = self.instance.view_content_to\n is_public = show_for == CourseInstance.VIEW_ACCESS.PUBLIC\n access_mode_student = access_mode in (ACCESS.STUDENT, ACCESS.ENROLL)\n if is_public and access_mode_student:\n access_mode = ACCESS.ANONYMOUS\n\n return access_mode\n\n def handle_exception(self, exc):\n if isinstance(exc, TranslationNotFound):\n instance_languages = self.instance.language.strip(\"|\").split(\"|\")\n url = remove_query_param_from_url(self.request.get_full_path(), 'hl')\n for i, lang in enumerate(instance_languages):\n instance_languages[i] = {\"name\": get_language_info(lang)['name'], \"url\": update_url_params(url, {'hl' : lang})}\n return render(self.request, '404.html', {'error_msg': str(exc), 'languages': instance_languages}, status=404)\n return super().handle_exception(exc)\n\nclass CourseInstanceMixin(CourseInstanceBaseMixin, UserProfileMixin):\n def get_course_instance_object(self) -> CourseInstance:\n return get_object_or_404(\n CourseInstance.objects.prefetch_related('tabs'),\n url=self.kwargs[self.instance_kw],\n course__url=self.kwargs[self.course_kw],\n )\n\n def handle_no_permission(self):\n if (self.request.user.is_authenticated\n and not self.is_student\n and not self.is_course_staff\n and self.get_access_mode() in [ACCESS.STUDENT, ACCESS.ENROLLED]\n and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED):\n # Redirect the user to the enrollment page instead of showing\n # a 403 Forbidden error, if:\n # - the user is signed in but not enrolled or staff\n # - the page is not a teacher page (e.g. edit course)\n # - the course is visible only to enrolled students\n return redirect(self.instance.get_url('enroll'))\n return super().handle_no_permission()\n\n\nclass CourseInstanceBaseView(CourseInstanceMixin, BaseTemplateView):\n pass\n\n\nclass EnrollableViewMixin(CourseInstanceMixin):\n access_mode = ACCESS.ENROLL\n\n def get_common_objects(self):\n self.enrolled = self.is_student\n self.enrollable = (\n self.profile\n and self.instance.is_enrollable(self.profile.user)\n )\n self.note('enrolled', 'enrollable')\n\n\nclass CourseModuleBaseMixin(object):\n module_kw = \"module_slug\"\n module_permissions_classes = (\n CourseModulePermission,\n )\n\n def get_permissions(self):\n perms = super().get_permissions()\n perms.extend((Perm() for Perm in self.module_permissions_classes))\n return perms\n\n # get_course_module_object\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.module = self.get_course_module_object()\n self.note(\"module\")\n\n\nclass CourseModuleMixin(CourseModuleBaseMixin, CourseInstanceMixin):\n def get_course_module_object(self):\n return get_object_or_404(\n CourseModule,\n url=self.kwargs[self.module_kw],\n course_instance=self.instance\n )\n\n\nclass CourseModuleBaseView(CourseModuleMixin, BaseTemplateView):\n pass\n", "path": "course/viewbase.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils import translation\nfrom django.utils.translation import gettext_lazy as _\nfrom django.utils.translation import get_language, get_language_info\n\nfrom authorization.permissions import ACCESS\nfrom exercise.cache.content import CachedContent\nfrom lib.helpers import remove_query_param_from_url, update_url_params\nfrom lib.viewbase import BaseTemplateView\nfrom userprofile.viewbase import UserProfileMixin\nfrom exercise.models import LearningObject\nfrom .cache.students import CachedStudent\nfrom .exceptions import TranslationNotFound\nfrom .permissions import (\n CourseVisiblePermission,\n CourseModulePermission,\n)\nfrom .models import Course, CourseInstance, CourseModule, UserTagging, Enrollment\n\n\nclass CourseMixin(UserProfileMixin):\n course_kw = \"course_slug\"\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.course = get_object_or_404(\n Course,\n url=self._get_kwarg(self.course_kw)\n )\n self.note(\"course\")\n\n\nclass CourseBaseView(CourseMixin, BaseTemplateView):\n pass\n\n\nclass CourseInstanceBaseMixin(object):\n course_kw = CourseMixin.course_kw\n instance_kw = \"instance_slug\"\n course_permission_classes = (\n CourseVisiblePermission,\n )\n\n def get_permissions(self):\n perms = super().get_permissions()\n perms.extend((Perm() for Perm in self.course_permission_classes))\n return perms\n\n # get_course_instance_object\n\n def get_resource_objects(self):\n super().get_resource_objects()\n user = self.request.user\n instance = self.get_course_instance_object()\n if instance is not None:\n self.instance = instance\n self.course = self.instance.course\n self.content = CachedContent(self.instance)\n self.user_course_data = None\n is_real_user = user.is_authenticated and not user.is_anonymous\n if is_real_user:\n self.user_course_data = self.instance.get_enrollment_for(user)\n self.is_student = self.instance.is_student(user)\n self.is_assistant = self.instance.is_assistant(user)\n self.is_teacher = self.instance.is_teacher(user)\n self.is_course_staff = self.is_teacher or self.is_assistant\n self.get_taggings = lambda: CachedStudent(instance, user.id).data['tag_slugs']\n self.url_without_language = remove_query_param_from_url(self.request.get_full_path(), 'hl')\n self.query_language = None\n self.user_language = None\n\n self.note(\n \"course\", \"instance\", \"content\", \"user_course_data\", \"is_student\", \"is_assistant\",\n \"is_teacher\", \"is_course_staff\", \"get_taggings\", \"url_without_language\",\n \"query_language\", \"user_language\"\n )\n\n # Try to find a language that is defined for this course instance\n # and apply it\n if self.instance.language:\n instance_languages = self.instance.language.strip('|').split('|')\n instance_def_language = instance_languages[0]\n instance_languages = set(instance_languages)\n\n languages = []\n if self.user_course_data and self.user_course_data.language:\n languages.append(self.user_course_data.language)\n if is_real_user and user.userprofile.language:\n languages.append(user.userprofile.language)\n languages.append(get_language())\n\n query_language = self.request.GET.get('hl')\n if query_language:\n if query_language[:2] in instance_languages:\n language = query_language\n if languages:\n self.user_language = languages[0]\n if self.user_language[:2] != query_language[:2]:\n self.query_language = query_language\n else:\n raise TranslationNotFound\n else:\n for lang in languages:\n if lang[:2] in instance_languages:\n language = lang\n break\n else:\n language = instance_def_language\n\n language = language[:2]\n # Override request.LANGUAGE_CODE. It is set in lib/middleware.py\n # (class LocaleMiddleware) based on the userprofile.language.\n # The middleware can not easily access the course context and\n # the language from the enrollment. That is fixed here.\n self.request.LANGUAGE_CODE = language\n translation.activate(language)\n\n def get_access_mode(self):\n access_mode = super().get_access_mode()\n\n if hasattr(self, 'instance'):\n # Loosen the access mode if instance is public\n show_for = self.instance.view_content_to\n is_public = show_for == CourseInstance.VIEW_ACCESS.PUBLIC\n access_mode_student = access_mode in (ACCESS.STUDENT, ACCESS.ENROLL)\n if is_public and access_mode_student:\n access_mode = ACCESS.ANONYMOUS\n\n return access_mode\n\n def handle_exception(self, exc):\n if isinstance(exc, TranslationNotFound):\n instance_languages = self.instance.language.strip(\"|\").split(\"|\")\n url = remove_query_param_from_url(self.request.get_full_path(), 'hl')\n for i, lang in enumerate(instance_languages):\n instance_languages[i] = {\"name\": get_language_info(lang)['name'], \"url\": update_url_params(url, {'hl' : lang})}\n return render(self.request, '404.html', {'error_msg': str(exc), 'languages': instance_languages}, status=404)\n return super().handle_exception(exc)\n\nclass CourseInstanceMixin(CourseInstanceBaseMixin, UserProfileMixin):\n def get_course_instance_object(self) -> CourseInstance:\n return get_object_or_404(\n CourseInstance.objects.prefetch_related('tabs'),\n url=self.kwargs[self.instance_kw],\n course__url=self.kwargs[self.course_kw],\n )\n\n def handle_no_permission(self):\n if (self.request.user.is_authenticated\n and not self.is_student\n and not self.is_course_staff\n and self.get_access_mode() in [ACCESS.STUDENT, ACCESS.ENROLLED]\n and self.instance.view_content_to == CourseInstance.VIEW_ACCESS.ENROLLED):\n # Redirect the user to the enrollment page instead of showing\n # a 403 Forbidden error, if:\n # - the user is signed in but not enrolled or staff\n # - the page is not a teacher page (e.g. edit course)\n # - the course is visible only to enrolled students\n #\n # If SIS enrollment is applied and course requires enrollment questionnaire,\n # redirect to the questionnaire instead.\n enrollment = self.user_course_data\n if enrollment and enrollment.status == Enrollment.ENROLLMENT_STATUS.PENDING:\n exercise = LearningObject.objects.find_enrollment_exercise(\n self.instance, self.profile.is_external)\n if exercise:\n return self.redirect(exercise.get_absolute_url())\n return redirect(self.instance.get_url('enroll'))\n return super().handle_no_permission()\n\n\nclass CourseInstanceBaseView(CourseInstanceMixin, BaseTemplateView):\n pass\n\n\nclass EnrollableViewMixin(CourseInstanceMixin):\n access_mode = ACCESS.ENROLL\n\n def get_common_objects(self):\n self.enrolled = self.is_student\n self.enrollable = (\n self.profile\n and self.instance.is_enrollable(self.profile.user)\n )\n self.note('enrolled', 'enrollable')\n\n\nclass CourseModuleBaseMixin(object):\n module_kw = \"module_slug\"\n module_permissions_classes = (\n CourseModulePermission,\n )\n\n def get_permissions(self):\n perms = super().get_permissions()\n perms.extend((Perm() for Perm in self.module_permissions_classes))\n return perms\n\n # get_course_module_object\n\n def get_resource_objects(self):\n super().get_resource_objects()\n self.module = self.get_course_module_object()\n self.note(\"module\")\n\n\nclass CourseModuleMixin(CourseModuleBaseMixin, CourseInstanceMixin):\n def get_course_module_object(self):\n return get_object_or_404(\n CourseModule,\n url=self.kwargs[self.module_kw],\n course_instance=self.instance\n )\n\n\nclass CourseModuleBaseView(CourseModuleMixin, BaseTemplateView):\n pass\n", "path": "course/viewbase.py"}]}
2,512
324
gh_patches_debug_19375
rasdani/github-patches
git_diff
PaddlePaddle__PaddleDetection-2350
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 训练-裁剪-训练-导出报错 PaddleDetection是2.0版本 项目是在平台上运行的 使用PaddleDetection训练好yolov3_mobilenet_v3模型后进行模型裁剪 裁剪完评估完成后导出报错 这是我执行的脚本 ` !python slim/prune/export_model.py \ -c configs/yolov3_mobilenet_v3.yml \ --pruned_params "yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights,yolo_block.0.1.1.conv.weights,yolo_block.0.2.conv.weights,yolo_block.0.tip.conv.weights,yolo_block.1.0.0.conv.weights,yolo_block.1.0.1.conv.weights,yolo_block.1.1.0.conv.weights,yolo_block.1.1.1.conv.weights,yolo_block.1.2.conv.weights,yolo_block.1.tip.conv.weights,yolo_block.2.0.0.conv.weights,yolo_block.2.0.1.conv.weights,yolo_block.2.1.0.conv.weights,yolo_block.2.1.1.conv.weights,yolo_block.2.2.conv.weights,yolo_block.2.tip.conv.weights" \ --pruned_ratios="0.7150126596733395,0.8177442961035291,0.8274278897456334,0.8373393786362668,0.7956892620674756,0.8445719578292334,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9" \ -o weights=output/yolov3_mobilenet_v3/model_final ` 以下是报错信息, ``` [03-04 10:10:58 MainThread @logger.py:242] Argv: slim/prune/export_model.py -c configs/yolov3_mobilenet_v3.yml --pruned_params yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights,yolo_block.0.1.1.conv.weights,yolo_block.0.2.conv.weights,yolo_block.0.tip.conv.weights,yolo_block.1.0.0.conv.weights,yolo_block.1.0.1.conv.weights,yolo_block.1.1.0.conv.weights,yolo_block.1.1.1.conv.weights,yolo_block.1.2.conv.weights,yolo_block.1.tip.conv.weights,yolo_block.2.0.0.conv.weights,yolo_block.2.0.1.conv.weights,yolo_block.2.1.0.conv.weights,yolo_block.2.1.1.conv.weights,yolo_block.2.2.conv.weights,yolo_block.2.tip.conv.weights --pruned_ratios=0.7150126596733395,0.8177442961035291,0.8274278897456334,0.8373393786362668,0.7956892620674756,0.8445719578292334,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9 -o weights=output/yolov3_mobilenet_v3/model_final [03-04 10:10:58 MainThread @utils.py:79] WRN paddlepaddle version: 2.0.0. The dynamic graph version of PARL is under development, not fully tested and supported /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/parl/remote/communication.py:38: DeprecationWarning: 'pyarrow.default_serialization_context' is deprecated as of 2.0.0 and will be removed in a future version. Use pickle or the pyarrow IPC functionality instead. context = pyarrow.default_serialization_context() /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/pandas/core/tools/datetimes.py:3: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working from collections import MutableMapping /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/rcsetup.py:20: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working from collections import Iterable, Mapping /opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/colors.py:53: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working from collections import Sized 2021-03-04 10:11:00,126-INFO: pruned params: ['yolo_block.0.0.0.conv.weights', 'yolo_block.0.0.1.conv.weights', 'yolo_block.0.1.0.conv.weights', 'yolo_block.0.1.1.conv.weights', 'yolo_block.0.2.conv.weights', 'yolo_block.0.tip.conv.weights', 'yolo_block.1.0.0.conv.weights', 'yolo_block.1.0.1.conv.weights', 'yolo_block.1.1.0.conv.weights', 'yolo_block.1.1.1.conv.weights', 'yolo_block.1.2.conv.weights', 'yolo_block.1.tip.conv.weights', 'yolo_block.2.0.0.conv.weights', 'yolo_block.2.0.1.conv.weights', 'yolo_block.2.1.0.conv.weights', 'yolo_block.2.1.1.conv.weights', 'yolo_block.2.2.conv.weights', 'yolo_block.2.tip.conv.weights'] 2021-03-04 10:11:00,126-INFO: pruned ratios: [0.7150126596733395, 0.8177442961035291, 0.8274278897456334, 0.8373393786362668, 0.7956892620674756, 0.8445719578292334, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9] 2021-03-04 10:11:00,169-INFO: pruning: yolo_block.0.0.0.conv.weights Traceback (most recent call last): File "slim/prune/export_model.py", line 123, in <module> main() File "slim/prune/export_model.py", line 88, in main only_graph=True) File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddleslim/prune/pruner.py", line 112, in prune g = self._transform(self.idx_selector(scores, ratio)) File "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddleslim/prune/idx_selector.py", line 57, in default_idx_selector 0] # sort channels by the first convolution's score IndexError: list index out of range ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `slim/prune/export_model.py` Content: ``` 1 # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from __future__ import absolute_import 16 from __future__ import division 17 from __future__ import print_function 18 19 import os, sys 20 # add python path of PadleDetection to sys.path 21 parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3))) 22 if parent_path not in sys.path: 23 sys.path.append(parent_path) 24 25 import paddle 26 from paddle import fluid 27 28 from ppdet.core.workspace import load_config, merge_config, create 29 from ppdet.utils.cli import ArgsParser 30 import ppdet.utils.checkpoint as checkpoint 31 from ppdet.utils.export_utils import save_infer_model, dump_infer_config 32 from ppdet.utils.check import check_config, check_version, enable_static_mode 33 from paddleslim.prune import Pruner 34 from paddleslim.analysis import flops 35 36 import logging 37 FORMAT = '%(asctime)s-%(levelname)s: %(message)s' 38 logging.basicConfig(level=logging.INFO, format=FORMAT) 39 logger = logging.getLogger(__name__) 40 41 42 def main(): 43 cfg = load_config(FLAGS.config) 44 merge_config(FLAGS.opt) 45 check_config(cfg) 46 check_version() 47 48 main_arch = cfg.architecture 49 50 # Use CPU for exporting inference model instead of GPU 51 place = fluid.CPUPlace() 52 exe = fluid.Executor(place) 53 54 model = create(main_arch) 55 56 startup_prog = fluid.Program() 57 infer_prog = fluid.Program() 58 with fluid.program_guard(infer_prog, startup_prog): 59 with fluid.unique_name.guard(): 60 inputs_def = cfg['TestReader']['inputs_def'] 61 inputs_def['use_dataloader'] = False 62 feed_vars, _ = model.build_inputs(**inputs_def) 63 test_fetches = model.test(feed_vars) 64 infer_prog = infer_prog.clone(True) 65 66 pruned_params = FLAGS.pruned_params 67 assert ( 68 FLAGS.pruned_params is not None 69 ), "FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option." 70 pruned_params = FLAGS.pruned_params.strip().split(",") 71 logger.info("pruned params: {}".format(pruned_params)) 72 pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(",")] 73 logger.info("pruned ratios: {}".format(pruned_ratios)) 74 assert (len(pruned_params) == len(pruned_ratios) 75 ), "The length of pruned params and pruned ratios should be equal." 76 assert (pruned_ratios > [0] * len(pruned_ratios) and 77 pruned_ratios < [1] * len(pruned_ratios) 78 ), "The elements of pruned ratios should be in range (0, 1)." 79 80 base_flops = flops(infer_prog) 81 pruner = Pruner() 82 infer_prog, _, _ = pruner.prune( 83 infer_prog, 84 fluid.global_scope(), 85 params=pruned_params, 86 ratios=pruned_ratios, 87 place=place, 88 only_graph=True) 89 pruned_flops = flops(infer_prog) 90 logger.info("pruned FLOPS: {}".format( 91 float(base_flops - pruned_flops) / base_flops)) 92 93 exe.run(startup_prog) 94 checkpoint.load_checkpoint(exe, infer_prog, cfg.weights) 95 96 dump_infer_config(FLAGS, cfg) 97 save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog) 98 99 100 if __name__ == '__main__': 101 enable_static_mode() 102 parser = ArgsParser() 103 parser.add_argument( 104 "--output_dir", 105 type=str, 106 default="output", 107 help="Directory for storing the output model files.") 108 109 parser.add_argument( 110 "-p", 111 "--pruned_params", 112 default=None, 113 type=str, 114 help="The parameters to be pruned when calculating sensitivities.") 115 parser.add_argument( 116 "--pruned_ratios", 117 default=None, 118 type=str, 119 help="The ratios pruned iteratively for each parameter when calculating sensitivities." 120 ) 121 122 FLAGS = parser.parse_args() 123 main() 124 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/slim/prune/export_model.py b/slim/prune/export_model.py --- a/slim/prune/export_model.py +++ b/slim/prune/export_model.py @@ -63,6 +63,9 @@ test_fetches = model.test(feed_vars) infer_prog = infer_prog.clone(True) + exe.run(startup_prog) + checkpoint.load_checkpoint(exe, infer_prog, cfg.weights) + pruned_params = FLAGS.pruned_params assert ( FLAGS.pruned_params is not None @@ -90,13 +93,9 @@ logger.info("pruned FLOPS: {}".format( float(base_flops - pruned_flops) / base_flops)) - exe.run(startup_prog) - checkpoint.load_checkpoint(exe, infer_prog, cfg.weights) - dump_infer_config(FLAGS, cfg) save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog) - if __name__ == '__main__': enable_static_mode() parser = ArgsParser()
{"golden_diff": "diff --git a/slim/prune/export_model.py b/slim/prune/export_model.py\n--- a/slim/prune/export_model.py\n+++ b/slim/prune/export_model.py\n@@ -63,6 +63,9 @@\n test_fetches = model.test(feed_vars)\n infer_prog = infer_prog.clone(True)\n \n+ exe.run(startup_prog)\n+ checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n+\n pruned_params = FLAGS.pruned_params\n assert (\n FLAGS.pruned_params is not None\n@@ -90,13 +93,9 @@\n logger.info(\"pruned FLOPS: {}\".format(\n float(base_flops - pruned_flops) / base_flops))\n \n- exe.run(startup_prog)\n- checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n-\n dump_infer_config(FLAGS, cfg)\n save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)\n \n-\n if __name__ == '__main__':\n enable_static_mode()\n parser = ArgsParser()\n", "issue": "\u8bad\u7ec3-\u88c1\u526a-\u8bad\u7ec3-\u5bfc\u51fa\u62a5\u9519\nPaddleDetection\u662f2.0\u7248\u672c\r\n\u9879\u76ee\u662f\u5728\u5e73\u53f0\u4e0a\u8fd0\u884c\u7684\r\n\u4f7f\u7528PaddleDetection\u8bad\u7ec3\u597dyolov3_mobilenet_v3\u6a21\u578b\u540e\u8fdb\u884c\u6a21\u578b\u88c1\u526a\r\n\u88c1\u526a\u5b8c\u8bc4\u4f30\u5b8c\u6210\u540e\u5bfc\u51fa\u62a5\u9519\r\n\u8fd9\u662f\u6211\u6267\u884c\u7684\u811a\u672c\r\n`\r\n!python slim/prune/export_model.py \\\r\n-c configs/yolov3_mobilenet_v3.yml \\\r\n--pruned_params \"yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights,yolo_block.0.1.1.conv.weights,yolo_block.0.2.conv.weights,yolo_block.0.tip.conv.weights,yolo_block.1.0.0.conv.weights,yolo_block.1.0.1.conv.weights,yolo_block.1.1.0.conv.weights,yolo_block.1.1.1.conv.weights,yolo_block.1.2.conv.weights,yolo_block.1.tip.conv.weights,yolo_block.2.0.0.conv.weights,yolo_block.2.0.1.conv.weights,yolo_block.2.1.0.conv.weights,yolo_block.2.1.1.conv.weights,yolo_block.2.2.conv.weights,yolo_block.2.tip.conv.weights\" \\\r\n--pruned_ratios=\"0.7150126596733395,0.8177442961035291,0.8274278897456334,0.8373393786362668,0.7956892620674756,0.8445719578292334,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9\" \\\r\n-o weights=output/yolov3_mobilenet_v3/model_final\r\n`\r\n\r\n\u4ee5\u4e0b\u662f\u62a5\u9519\u4fe1\u606f\uff0c\r\n\r\n```\r\n[03-04 10:10:58 MainThread @logger.py:242] Argv: slim/prune/export_model.py -c configs/yolov3_mobilenet_v3.yml --pruned_params yolo_block.0.0.0.conv.weights,yolo_block.0.0.1.conv.weights,yolo_block.0.1.0.conv.weights,yolo_block.0.1.1.conv.weights,yolo_block.0.2.conv.weights,yolo_block.0.tip.conv.weights,yolo_block.1.0.0.conv.weights,yolo_block.1.0.1.conv.weights,yolo_block.1.1.0.conv.weights,yolo_block.1.1.1.conv.weights,yolo_block.1.2.conv.weights,yolo_block.1.tip.conv.weights,yolo_block.2.0.0.conv.weights,yolo_block.2.0.1.conv.weights,yolo_block.2.1.0.conv.weights,yolo_block.2.1.1.conv.weights,yolo_block.2.2.conv.weights,yolo_block.2.tip.conv.weights --pruned_ratios=0.7150126596733395,0.8177442961035291,0.8274278897456334,0.8373393786362668,0.7956892620674756,0.8445719578292334,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9,0.9 -o weights=output/yolov3_mobilenet_v3/model_final\r\n[03-04 10:10:58 MainThread @utils.py:79] WRN paddlepaddle version: 2.0.0. The dynamic graph version of PARL is under development, not fully tested and supported\r\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/parl/remote/communication.py:38: DeprecationWarning: 'pyarrow.default_serialization_context' is deprecated as of 2.0.0 and will be removed in a future version. Use pickle or the pyarrow IPC functionality instead.\r\n context = pyarrow.default_serialization_context()\r\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/pandas/core/tools/datetimes.py:3: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n from collections import MutableMapping\r\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/rcsetup.py:20: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n from collections import Iterable, Mapping\r\n/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/matplotlib/colors.py:53: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\r\n from collections import Sized\r\n2021-03-04 10:11:00,126-INFO: pruned params: ['yolo_block.0.0.0.conv.weights', 'yolo_block.0.0.1.conv.weights', 'yolo_block.0.1.0.conv.weights', 'yolo_block.0.1.1.conv.weights', 'yolo_block.0.2.conv.weights', 'yolo_block.0.tip.conv.weights', 'yolo_block.1.0.0.conv.weights', 'yolo_block.1.0.1.conv.weights', 'yolo_block.1.1.0.conv.weights', 'yolo_block.1.1.1.conv.weights', 'yolo_block.1.2.conv.weights', 'yolo_block.1.tip.conv.weights', 'yolo_block.2.0.0.conv.weights', 'yolo_block.2.0.1.conv.weights', 'yolo_block.2.1.0.conv.weights', 'yolo_block.2.1.1.conv.weights', 'yolo_block.2.2.conv.weights', 'yolo_block.2.tip.conv.weights']\r\n2021-03-04 10:11:00,126-INFO: pruned ratios: [0.7150126596733395, 0.8177442961035291, 0.8274278897456334, 0.8373393786362668, 0.7956892620674756, 0.8445719578292334, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9]\r\n2021-03-04 10:11:00,169-INFO: pruning: yolo_block.0.0.0.conv.weights\r\nTraceback (most recent call last):\r\n File \"slim/prune/export_model.py\", line 123, in <module>\r\n main()\r\n File \"slim/prune/export_model.py\", line 88, in main\r\n only_graph=True)\r\n File \"/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddleslim/prune/pruner.py\", line 112, in prune\r\n g = self._transform(self.idx_selector(scores, ratio))\r\n File \"/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddleslim/prune/idx_selector.py\", line 57, in default_idx_selector\r\n 0] # sort channels by the first convolution's score\r\nIndexError: list index out of range\r\n```\n", "before_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))\nif parent_path not in sys.path:\n sys.path.append(parent_path)\n\nimport paddle\nfrom paddle import fluid\n\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.utils.cli import ArgsParser\nimport ppdet.utils.checkpoint as checkpoint\nfrom ppdet.utils.export_utils import save_infer_model, dump_infer_config\nfrom ppdet.utils.check import check_config, check_version, enable_static_mode\nfrom paddleslim.prune import Pruner\nfrom paddleslim.analysis import flops\n\nimport logging\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n cfg = load_config(FLAGS.config)\n merge_config(FLAGS.opt)\n check_config(cfg)\n check_version()\n\n main_arch = cfg.architecture\n\n # Use CPU for exporting inference model instead of GPU\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n model = create(main_arch)\n\n startup_prog = fluid.Program()\n infer_prog = fluid.Program()\n with fluid.program_guard(infer_prog, startup_prog):\n with fluid.unique_name.guard():\n inputs_def = cfg['TestReader']['inputs_def']\n inputs_def['use_dataloader'] = False\n feed_vars, _ = model.build_inputs(**inputs_def)\n test_fetches = model.test(feed_vars)\n infer_prog = infer_prog.clone(True)\n\n pruned_params = FLAGS.pruned_params\n assert (\n FLAGS.pruned_params is not None\n ), \"FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option.\"\n pruned_params = FLAGS.pruned_params.strip().split(\",\")\n logger.info(\"pruned params: {}\".format(pruned_params))\n pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(\",\")]\n logger.info(\"pruned ratios: {}\".format(pruned_ratios))\n assert (len(pruned_params) == len(pruned_ratios)\n ), \"The length of pruned params and pruned ratios should be equal.\"\n assert (pruned_ratios > [0] * len(pruned_ratios) and\n pruned_ratios < [1] * len(pruned_ratios)\n ), \"The elements of pruned ratios should be in range (0, 1).\"\n\n base_flops = flops(infer_prog)\n pruner = Pruner()\n infer_prog, _, _ = pruner.prune(\n infer_prog,\n fluid.global_scope(),\n params=pruned_params,\n ratios=pruned_ratios,\n place=place,\n only_graph=True)\n pruned_flops = flops(infer_prog)\n logger.info(\"pruned FLOPS: {}\".format(\n float(base_flops - pruned_flops) / base_flops))\n\n exe.run(startup_prog)\n checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n\n dump_infer_config(FLAGS, cfg)\n save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)\n\n\nif __name__ == '__main__':\n enable_static_mode()\n parser = ArgsParser()\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"output\",\n help=\"Directory for storing the output model files.\")\n\n parser.add_argument(\n \"-p\",\n \"--pruned_params\",\n default=None,\n type=str,\n help=\"The parameters to be pruned when calculating sensitivities.\")\n parser.add_argument(\n \"--pruned_ratios\",\n default=None,\n type=str,\n help=\"The ratios pruned iteratively for each parameter when calculating sensitivities.\"\n )\n\n FLAGS = parser.parse_args()\n main()\n", "path": "slim/prune/export_model.py"}], "after_files": [{"content": "# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os, sys\n# add python path of PadleDetection to sys.path\nparent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 3)))\nif parent_path not in sys.path:\n sys.path.append(parent_path)\n\nimport paddle\nfrom paddle import fluid\n\nfrom ppdet.core.workspace import load_config, merge_config, create\nfrom ppdet.utils.cli import ArgsParser\nimport ppdet.utils.checkpoint as checkpoint\nfrom ppdet.utils.export_utils import save_infer_model, dump_infer_config\nfrom ppdet.utils.check import check_config, check_version, enable_static_mode\nfrom paddleslim.prune import Pruner\nfrom paddleslim.analysis import flops\n\nimport logging\nFORMAT = '%(asctime)s-%(levelname)s: %(message)s'\nlogging.basicConfig(level=logging.INFO, format=FORMAT)\nlogger = logging.getLogger(__name__)\n\n\ndef main():\n cfg = load_config(FLAGS.config)\n merge_config(FLAGS.opt)\n check_config(cfg)\n check_version()\n\n main_arch = cfg.architecture\n\n # Use CPU for exporting inference model instead of GPU\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n model = create(main_arch)\n\n startup_prog = fluid.Program()\n infer_prog = fluid.Program()\n with fluid.program_guard(infer_prog, startup_prog):\n with fluid.unique_name.guard():\n inputs_def = cfg['TestReader']['inputs_def']\n inputs_def['use_dataloader'] = False\n feed_vars, _ = model.build_inputs(**inputs_def)\n test_fetches = model.test(feed_vars)\n infer_prog = infer_prog.clone(True)\n\n exe.run(startup_prog)\n checkpoint.load_checkpoint(exe, infer_prog, cfg.weights)\n\n pruned_params = FLAGS.pruned_params\n assert (\n FLAGS.pruned_params is not None\n ), \"FLAGS.pruned_params is empty!!! Please set it by '--pruned_params' option.\"\n pruned_params = FLAGS.pruned_params.strip().split(\",\")\n logger.info(\"pruned params: {}\".format(pruned_params))\n pruned_ratios = [float(n) for n in FLAGS.pruned_ratios.strip().split(\",\")]\n logger.info(\"pruned ratios: {}\".format(pruned_ratios))\n assert (len(pruned_params) == len(pruned_ratios)\n ), \"The length of pruned params and pruned ratios should be equal.\"\n assert (pruned_ratios > [0] * len(pruned_ratios) and\n pruned_ratios < [1] * len(pruned_ratios)\n ), \"The elements of pruned ratios should be in range (0, 1).\"\n\n base_flops = flops(infer_prog)\n pruner = Pruner()\n infer_prog, _, _ = pruner.prune(\n infer_prog,\n fluid.global_scope(),\n params=pruned_params,\n ratios=pruned_ratios,\n place=place,\n only_graph=True)\n pruned_flops = flops(infer_prog)\n logger.info(\"pruned FLOPS: {}\".format(\n float(base_flops - pruned_flops) / base_flops))\n\n dump_infer_config(FLAGS, cfg)\n save_infer_model(FLAGS, exe, feed_vars, test_fetches, infer_prog)\n\nif __name__ == '__main__':\n enable_static_mode()\n parser = ArgsParser()\n parser.add_argument(\n \"--output_dir\",\n type=str,\n default=\"output\",\n help=\"Directory for storing the output model files.\")\n\n parser.add_argument(\n \"-p\",\n \"--pruned_params\",\n default=None,\n type=str,\n help=\"The parameters to be pruned when calculating sensitivities.\")\n parser.add_argument(\n \"--pruned_ratios\",\n default=None,\n type=str,\n help=\"The ratios pruned iteratively for each parameter when calculating sensitivities.\"\n )\n\n FLAGS = parser.parse_args()\n main()\n", "path": "slim/prune/export_model.py"}]}
3,464
234
gh_patches_debug_5790
rasdani/github-patches
git_diff
googleapis__google-auth-library-python-175
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- GCE metadata ping taking ~75 seconds, not timing out after 3 seconds google-api-python-client: 1.6.2 python version: 3.5.2 ``` from google.auth.compute_engine import _metadata import google.auth.transport._http_client request = google.auth.transport._http_client.Request() _metadata.ping(request=request) ``` When running the following script, the default timeout of 3 seconds is not used. I'm instead seeing the script complete in about 75 seconds. If I print out the value of line 4, I get `False`. I'm not sure if that indicates that the call was successful, or if a timeout had occurred. I see this take ~75 seconds consistently If I set the environment variable `GCE_METADATA_TIMEOUT` to a value such as 10, the script still takes ~75 seconds. I'm running into this issue when using the pandas-gbq library. I'm writing a script to authorize pandas to access the gbq project. That library checks for the default google credentials, which do not exist in my case. [Here's my related issue with that library](https://github.com/pydata/pandas-gbq/issues/73). Is this an issue with the google-auth library? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `google/auth/transport/_http_client.py` Content: ``` 1 # Copyright 2016 Google Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 """Transport adapter for http.client, for internal use only.""" 16 17 import logging 18 import socket 19 20 from six.moves import http_client 21 from six.moves import urllib 22 23 from google.auth import exceptions 24 from google.auth import transport 25 26 _LOGGER = logging.getLogger(__name__) 27 28 29 class Response(transport.Response): 30 """http.client transport response adapter. 31 32 Args: 33 response (http.client.HTTPResponse): The raw http client response. 34 """ 35 def __init__(self, response): 36 self._status = response.status 37 self._headers = { 38 key.lower(): value for key, value in response.getheaders()} 39 self._data = response.read() 40 41 @property 42 def status(self): 43 return self._status 44 45 @property 46 def headers(self): 47 return self._headers 48 49 @property 50 def data(self): 51 return self._data 52 53 54 class Request(transport.Request): 55 """http.client transport request adapter.""" 56 57 def __call__(self, url, method='GET', body=None, headers=None, 58 timeout=None, **kwargs): 59 """Make an HTTP request using http.client. 60 61 Args: 62 url (str): The URI to be requested. 63 method (str): The HTTP method to use for the request. Defaults 64 to 'GET'. 65 body (bytes): The payload / body in HTTP request. 66 headers (Mapping): Request headers. 67 timeout (Optional(int)): The number of seconds to wait for a 68 response from the server. If not specified or if None, the 69 socket global default timeout will be used. 70 kwargs: Additional arguments passed throught to the underlying 71 :meth:`~http.client.HTTPConnection.request` method. 72 73 Returns: 74 Response: The HTTP response. 75 76 Raises: 77 google.auth.exceptions.TransportError: If any exception occurred. 78 """ 79 # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client. 80 if timeout is None: 81 timeout = socket._GLOBAL_DEFAULT_TIMEOUT 82 83 # http.client doesn't allow None as the headers argument. 84 if headers is None: 85 headers = {} 86 87 # http.client needs the host and path parts specified separately. 88 parts = urllib.parse.urlsplit(url) 89 path = urllib.parse.urlunsplit( 90 ('', '', parts.path, parts.query, parts.fragment)) 91 92 if parts.scheme != 'http': 93 raise exceptions.TransportError( 94 'http.client transport only supports the http scheme, {}' 95 'was specified'.format(parts.scheme)) 96 97 connection = http_client.HTTPConnection(parts.netloc) 98 99 try: 100 _LOGGER.debug('Making request: %s %s', method, url) 101 102 connection.request( 103 method, path, body=body, headers=headers, **kwargs) 104 response = connection.getresponse() 105 return Response(response) 106 107 except (http_client.HTTPException, socket.error) as exc: 108 raise exceptions.TransportError(exc) 109 110 finally: 111 connection.close() 112 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/google/auth/transport/_http_client.py b/google/auth/transport/_http_client.py --- a/google/auth/transport/_http_client.py +++ b/google/auth/transport/_http_client.py @@ -94,7 +94,7 @@ 'http.client transport only supports the http scheme, {}' 'was specified'.format(parts.scheme)) - connection = http_client.HTTPConnection(parts.netloc) + connection = http_client.HTTPConnection(parts.netloc, timeout=timeout) try: _LOGGER.debug('Making request: %s %s', method, url)
{"golden_diff": "diff --git a/google/auth/transport/_http_client.py b/google/auth/transport/_http_client.py\n--- a/google/auth/transport/_http_client.py\n+++ b/google/auth/transport/_http_client.py\n@@ -94,7 +94,7 @@\n 'http.client transport only supports the http scheme, {}'\n 'was specified'.format(parts.scheme))\n \n- connection = http_client.HTTPConnection(parts.netloc)\n+ connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)\n \n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n", "issue": "GCE metadata ping taking ~75 seconds, not timing out after 3 seconds\ngoogle-api-python-client: 1.6.2\r\npython version: 3.5.2\r\n\r\n```\r\nfrom google.auth.compute_engine import _metadata\r\nimport google.auth.transport._http_client\r\nrequest = google.auth.transport._http_client.Request()\r\n_metadata.ping(request=request)\r\n```\r\n\r\nWhen running the following script, the default timeout of 3 seconds is not used. I'm instead seeing the script complete in about 75 seconds. If I print out the value of line 4, I get `False`. I'm not sure if that indicates that the call was successful, or if a timeout had occurred. I see this take ~75 seconds consistently If I set the environment variable `GCE_METADATA_TIMEOUT` to a value such as 10, the script still takes ~75 seconds.\r\n\r\nI'm running into this issue when using the pandas-gbq library. I'm writing a script to authorize pandas to access the gbq project. That library checks for the default google credentials, which do not exist in my case. [Here's my related issue with that library](https://github.com/pydata/pandas-gbq/issues/73).\r\n\r\nIs this an issue with the google-auth library?\n", "before_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport adapter for http.client, for internal use only.\"\"\"\n\nimport logging\nimport socket\n\nfrom six.moves import http_client\nfrom six.moves import urllib\n\nfrom google.auth import exceptions\nfrom google.auth import transport\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Response(transport.Response):\n \"\"\"http.client transport response adapter.\n\n Args:\n response (http.client.HTTPResponse): The raw http client response.\n \"\"\"\n def __init__(self, response):\n self._status = response.status\n self._headers = {\n key.lower(): value for key, value in response.getheaders()}\n self._data = response.read()\n\n @property\n def status(self):\n return self._status\n\n @property\n def headers(self):\n return self._headers\n\n @property\n def data(self):\n return self._data\n\n\nclass Request(transport.Request):\n \"\"\"http.client transport request adapter.\"\"\"\n\n def __call__(self, url, method='GET', body=None, headers=None,\n timeout=None, **kwargs):\n \"\"\"Make an HTTP request using http.client.\n\n Args:\n url (str): The URI to be requested.\n method (str): The HTTP method to use for the request. Defaults\n to 'GET'.\n body (bytes): The payload / body in HTTP request.\n headers (Mapping): Request headers.\n timeout (Optional(int)): The number of seconds to wait for a\n response from the server. If not specified or if None, the\n socket global default timeout will be used.\n kwargs: Additional arguments passed throught to the underlying\n :meth:`~http.client.HTTPConnection.request` method.\n\n Returns:\n Response: The HTTP response.\n\n Raises:\n google.auth.exceptions.TransportError: If any exception occurred.\n \"\"\"\n # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.\n if timeout is None:\n timeout = socket._GLOBAL_DEFAULT_TIMEOUT\n\n # http.client doesn't allow None as the headers argument.\n if headers is None:\n headers = {}\n\n # http.client needs the host and path parts specified separately.\n parts = urllib.parse.urlsplit(url)\n path = urllib.parse.urlunsplit(\n ('', '', parts.path, parts.query, parts.fragment))\n\n if parts.scheme != 'http':\n raise exceptions.TransportError(\n 'http.client transport only supports the http scheme, {}'\n 'was specified'.format(parts.scheme))\n\n connection = http_client.HTTPConnection(parts.netloc)\n\n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n\n connection.request(\n method, path, body=body, headers=headers, **kwargs)\n response = connection.getresponse()\n return Response(response)\n\n except (http_client.HTTPException, socket.error) as exc:\n raise exceptions.TransportError(exc)\n\n finally:\n connection.close()\n", "path": "google/auth/transport/_http_client.py"}], "after_files": [{"content": "# Copyright 2016 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport adapter for http.client, for internal use only.\"\"\"\n\nimport logging\nimport socket\n\nfrom six.moves import http_client\nfrom six.moves import urllib\n\nfrom google.auth import exceptions\nfrom google.auth import transport\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Response(transport.Response):\n \"\"\"http.client transport response adapter.\n\n Args:\n response (http.client.HTTPResponse): The raw http client response.\n \"\"\"\n def __init__(self, response):\n self._status = response.status\n self._headers = {\n key.lower(): value for key, value in response.getheaders()}\n self._data = response.read()\n\n @property\n def status(self):\n return self._status\n\n @property\n def headers(self):\n return self._headers\n\n @property\n def data(self):\n return self._data\n\n\nclass Request(transport.Request):\n \"\"\"http.client transport request adapter.\"\"\"\n\n def __call__(self, url, method='GET', body=None, headers=None,\n timeout=None, **kwargs):\n \"\"\"Make an HTTP request using http.client.\n\n Args:\n url (str): The URI to be requested.\n method (str): The HTTP method to use for the request. Defaults\n to 'GET'.\n body (bytes): The payload / body in HTTP request.\n headers (Mapping): Request headers.\n timeout (Optional(int)): The number of seconds to wait for a\n response from the server. If not specified or if None, the\n socket global default timeout will be used.\n kwargs: Additional arguments passed throught to the underlying\n :meth:`~http.client.HTTPConnection.request` method.\n\n Returns:\n Response: The HTTP response.\n\n Raises:\n google.auth.exceptions.TransportError: If any exception occurred.\n \"\"\"\n # socket._GLOBAL_DEFAULT_TIMEOUT is the default in http.client.\n if timeout is None:\n timeout = socket._GLOBAL_DEFAULT_TIMEOUT\n\n # http.client doesn't allow None as the headers argument.\n if headers is None:\n headers = {}\n\n # http.client needs the host and path parts specified separately.\n parts = urllib.parse.urlsplit(url)\n path = urllib.parse.urlunsplit(\n ('', '', parts.path, parts.query, parts.fragment))\n\n if parts.scheme != 'http':\n raise exceptions.TransportError(\n 'http.client transport only supports the http scheme, {}'\n 'was specified'.format(parts.scheme))\n\n connection = http_client.HTTPConnection(parts.netloc, timeout=timeout)\n\n try:\n _LOGGER.debug('Making request: %s %s', method, url)\n\n connection.request(\n method, path, body=body, headers=headers, **kwargs)\n response = connection.getresponse()\n return Response(response)\n\n except (http_client.HTTPException, socket.error) as exc:\n raise exceptions.TransportError(exc)\n\n finally:\n connection.close()\n", "path": "google/auth/transport/_http_client.py"}]}
1,503
128
gh_patches_debug_4041
rasdani/github-patches
git_diff
electricitymaps__electricitymaps-contrib-1208
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Moldova repeated ValueError Seen on the Kibana dashboard. [Logger](https://kibana.electricitymap.org/app/kibana#/discover/1710fdd0-2460-11e8-a779-9d01de8d7a71?_g=(refreshInterval:('$$hashKey':'object:6765',display:'10%20seconds',pause:!f,section:1,value:10000),time:(from:'2018-03-10T00:00:00.000Z',mode:absolute,to:'2018-03-12T10:40:25.571Z'))&_a=(columns:!(level,extra.path,message),filters:!(('$state':(store:appState),exists:(field:level),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:level,negate:!f,type:exists,value:exists)),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:level,negate:!t,params:(query:INFO,type:phrase),type:phrase,value:INFO),query:(match:(level:(query:INFO,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:extra.key,negate:!f,params:(query:MD,type:phrase),type:phrase,value:MD),query:(match:(extra.key:(query:MD,type:phrase))))),index:'93e631f0-245f-11e8-a779-9d01de8d7a71',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc))) ``` Traceback (most recent call last): File "feeder_electricity.py", line 176, in fetch_exchange objs = parser(country_code1, country_code2, session, logger=public_logger) File "/home/electricitymap/parsers/MD.py", line 113, in fetch_exchange exchange_status = get_data(session=session) File "/home/electricitymap/parsers/MD.py", line 31, in get_data data = [float(i) for i in raw_data.split(',')] File "/home/electricitymap/parsers/MD.py", line 31, in <listcomp> data = [float(i) for i in raw_data.split(',')] ValueError: could not convert string to float: ``` ``` Traceback (most recent call last): File "feeder_electricity.py", line 148, in fetch_production objs = parser(country_code, session, logger=public_logger) File "/home/electricitymap/parsers/MD.py", line 69, in fetch_production grid_status = get_data(session=session) File "/home/electricitymap/parsers/MD.py", line 31, in get_data data = [float(i) for i in raw_data.split(',')] File "/home/electricitymap/parsers/MD.py", line 31, in <listcomp> data = [float(i) for i in raw_data.split(',')] ValueError: could not convert string to float: ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parsers/MD.py` Content: ``` 1 #!/usr/bin/env python3 2 # coding=utf-8 3 4 """Parser for Moldova.""" 5 6 import arrow 7 from operator import itemgetter 8 import requests 9 10 TYPE_MAPPING = { 11 u'tmva476': 'hydro', # NHE Costeşti (run-of-river) #2 index 12 u'tmva112': 'hydro', # NHE Dubăsari (run-of-river) #4 index 13 u'tmva367': 'gas', # CET Nord (CHPP) #3 index 14 u'tmva42': 'gas', # CET-1 Chişinău (CHPP) #6 index 15 u'tmva378': 'gas', # CET-2 Chişinău (CHPP) #5 index 16 u'tmva1024': 'unknown', # CERS Moldovenească (fuel mix coal, gas, oil) #7 index 17 } 18 19 display_url = 'http://www.moldelectrica.md/ro/activity/system_state' 20 data_url = 'http://www.moldelectrica.md/utils/load4' 21 22 23 def get_data(session=None): 24 """ Returns generation data as a list of floats.""" 25 26 s = session or requests.Session() 27 28 data_response = s.get(data_url) 29 raw_data = data_response.text 30 31 data = [float(i) for i in raw_data.split(',')] 32 33 return data 34 35 36 def fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None): 37 """Requests the last known production mix (in MW) of a given country 38 39 Arguments: 40 zone_key (optional) -- used in case a parser is able to fetch multiple countries 41 session (optional) -- request session passed in order to re-use an existing session 42 43 Return: 44 A dictionary in the form: 45 { 46 'zoneKey': 'FR', 47 'datetime': '2017-01-01T00:00:00Z', 48 'production': { 49 'biomass': 0.0, 50 'coal': 0.0, 51 'gas': 0.0, 52 'hydro': 0.0, 53 'nuclear': null, 54 'oil': 0.0, 55 'solar': 0.0, 56 'wind': 0.0, 57 'geothermal': 0.0, 58 'unknown': 0.0 59 }, 60 'storage': { 61 'hydro': -10.0, 62 }, 63 'source': 'mysource.com' 64 } 65 """ 66 if target_datetime: 67 raise NotImplementedError('This parser is not yet able to parse past dates') 68 69 grid_status = get_data(session=session) 70 production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0} 71 72 production['gas'] = sum(itemgetter(3, 5, 6)(grid_status)) 73 production['hydro'] = sum(itemgetter(2, 4)(grid_status)) 74 production['unknown'] = grid_status[7] 75 76 consumption = grid_status[-5] 77 78 dt = arrow.now('Europe/Chisinau').datetime 79 80 datapoint = { 81 'zoneKey': zone_key, 82 'datetime': dt, 83 'consumption': consumption, 84 'production': production, 85 'storage': {}, 86 'source': 'moldelectrica.md' 87 } 88 89 return datapoint 90 91 92 def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None): 93 """Requests the last known power exchange (in MW) between two countries 94 Arguments: 95 zone_key1 -- the first country code 96 zone_key2 -- the second country code; order of the two codes in params doesn't matter 97 session (optional) -- request session passed in order to re-use an existing session 98 Return: 99 A dictionary in the form: 100 { 101 'sortedZoneKeys': 'DK->NO', 102 'datetime': '2017-01-01T00:00:00Z', 103 'netFlow': 0.0, 104 'source': 'mysource.com' 105 } 106 where net flow is from DK into NO 107 """ 108 if target_datetime: 109 raise NotImplementedError('This parser is not yet able to parse past dates') 110 111 sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2])) 112 113 exchange_status = get_data(session=session) 114 115 if sortedZoneKeys == 'MD->UA': 116 netflow = -1 * exchange_status[-3] 117 elif sortedZoneKeys == 'MD->RO': 118 netflow = -1 * exchange_status[-2] 119 else: 120 raise NotImplementedError('This exchange pair is not implemented') 121 122 dt = arrow.now('Europe/Chisinau').datetime 123 124 exchange = { 125 'sortedZoneKeys': sortedZoneKeys, 126 'datetime': dt, 127 'netFlow': netflow, 128 'source': 'moldelectrica.md' 129 } 130 131 return exchange 132 133 134 if __name__ == '__main__': 135 """Main method, never used by the Electricity Map backend, but handy for testing.""" 136 137 print('fetch_production() ->') 138 print(fetch_production()) 139 print('fetch_exchange(MD, UA) ->') 140 print(fetch_exchange('MD', 'UA')) 141 print('fetch_exchange(MD, RO) ->') 142 print(fetch_exchange('MD', 'RO')) 143 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parsers/MD.py b/parsers/MD.py --- a/parsers/MD.py +++ b/parsers/MD.py @@ -25,9 +25,10 @@ s = session or requests.Session() + #In order for the data url to return data, cookies from the display url must be obtained then reused. + response = s.get(display_url) data_response = s.get(data_url) raw_data = data_response.text - data = [float(i) for i in raw_data.split(',')] return data
{"golden_diff": "diff --git a/parsers/MD.py b/parsers/MD.py\n--- a/parsers/MD.py\n+++ b/parsers/MD.py\n@@ -25,9 +25,10 @@\n \n s = session or requests.Session()\n \n+ #In order for the data url to return data, cookies from the display url must be obtained then reused.\n+ response = s.get(display_url)\n data_response = s.get(data_url)\n raw_data = data_response.text\n-\n data = [float(i) for i in raw_data.split(',')]\n \n return data\n", "issue": "Moldova repeated ValueError\nSeen on the Kibana dashboard.\r\n\r\n[Logger](https://kibana.electricitymap.org/app/kibana#/discover/1710fdd0-2460-11e8-a779-9d01de8d7a71?_g=(refreshInterval:('$$hashKey':'object:6765',display:'10%20seconds',pause:!f,section:1,value:10000),time:(from:'2018-03-10T00:00:00.000Z',mode:absolute,to:'2018-03-12T10:40:25.571Z'))&_a=(columns:!(level,extra.path,message),filters:!(('$state':(store:appState),exists:(field:level),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:level,negate:!f,type:exists,value:exists)),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:level,negate:!t,params:(query:INFO,type:phrase),type:phrase,value:INFO),query:(match:(level:(query:INFO,type:phrase)))),('$state':(store:appState),meta:(alias:!n,disabled:!f,index:'93e631f0-245f-11e8-a779-9d01de8d7a71',key:extra.key,negate:!f,params:(query:MD,type:phrase),type:phrase,value:MD),query:(match:(extra.key:(query:MD,type:phrase))))),index:'93e631f0-245f-11e8-a779-9d01de8d7a71',interval:auto,query:(language:lucene,query:''),sort:!('@timestamp',desc)))\r\n```\r\nTraceback (most recent call last):\r\n File \"feeder_electricity.py\", line 176, in fetch_exchange\r\n objs = parser(country_code1, country_code2, session, logger=public_logger)\r\n File \"/home/electricitymap/parsers/MD.py\", line 113, in fetch_exchange\r\n exchange_status = get_data(session=session)\r\n File \"/home/electricitymap/parsers/MD.py\", line 31, in get_data\r\n data = [float(i) for i in raw_data.split(',')]\r\n File \"/home/electricitymap/parsers/MD.py\", line 31, in <listcomp>\r\n data = [float(i) for i in raw_data.split(',')]\r\nValueError: could not convert string to float:\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"feeder_electricity.py\", line 148, in fetch_production\r\n objs = parser(country_code, session, logger=public_logger)\r\n File \"/home/electricitymap/parsers/MD.py\", line 69, in fetch_production\r\n grid_status = get_data(session=session)\r\n File \"/home/electricitymap/parsers/MD.py\", line 31, in get_data\r\n data = [float(i) for i in raw_data.split(',')]\r\n File \"/home/electricitymap/parsers/MD.py\", line 31, in <listcomp>\r\n data = [float(i) for i in raw_data.split(',')]\r\nValueError: could not convert string to float:\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"Parser for Moldova.\"\"\"\n\nimport arrow\nfrom operator import itemgetter\nimport requests\n\nTYPE_MAPPING = {\n u'tmva476': 'hydro', # NHE Coste\u015fti (run-of-river) #2 index\n u'tmva112': 'hydro', # NHE Dub\u0103sari (run-of-river) #4 index\n u'tmva367': 'gas', # CET Nord (CHPP) #3 index\n u'tmva42': 'gas', # CET-1 Chi\u015fin\u0103u (CHPP) #6 index\n u'tmva378': 'gas', # CET-2 Chi\u015fin\u0103u (CHPP) #5 index\n u'tmva1024': 'unknown', # CERS Moldoveneasc\u0103 (fuel mix coal, gas, oil) #7 index\n}\n\ndisplay_url = 'http://www.moldelectrica.md/ro/activity/system_state'\ndata_url = 'http://www.moldelectrica.md/utils/load4'\n\n\ndef get_data(session=None):\n \"\"\" Returns generation data as a list of floats.\"\"\"\n\n s = session or requests.Session()\n\n data_response = s.get(data_url)\n raw_data = data_response.text\n\n data = [float(i) for i in raw_data.split(',')]\n\n return data\n\n\ndef fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n grid_status = get_data(session=session)\n production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0}\n\n production['gas'] = sum(itemgetter(3, 5, 6)(grid_status))\n production['hydro'] = sum(itemgetter(2, 4)(grid_status))\n production['unknown'] = grid_status[7]\n\n consumption = grid_status[-5]\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': dt,\n 'consumption': consumption,\n 'production': production,\n 'storage': {},\n 'source': 'moldelectrica.md'\n }\n\n return datapoint\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))\n\n exchange_status = get_data(session=session)\n\n if sortedZoneKeys == 'MD->UA':\n netflow = -1 * exchange_status[-3]\n elif sortedZoneKeys == 'MD->RO':\n netflow = -1 * exchange_status[-2]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n exchange = {\n 'sortedZoneKeys': sortedZoneKeys,\n 'datetime': dt,\n 'netFlow': netflow,\n 'source': 'moldelectrica.md'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(MD, UA) ->')\n print(fetch_exchange('MD', 'UA'))\n print('fetch_exchange(MD, RO) ->')\n print(fetch_exchange('MD', 'RO'))\n", "path": "parsers/MD.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\n\n\"\"\"Parser for Moldova.\"\"\"\n\nimport arrow\nfrom operator import itemgetter\nimport requests\n\nTYPE_MAPPING = {\n u'tmva476': 'hydro', # NHE Coste\u015fti (run-of-river) #2 index\n u'tmva112': 'hydro', # NHE Dub\u0103sari (run-of-river) #4 index\n u'tmva367': 'gas', # CET Nord (CHPP) #3 index\n u'tmva42': 'gas', # CET-1 Chi\u015fin\u0103u (CHPP) #6 index\n u'tmva378': 'gas', # CET-2 Chi\u015fin\u0103u (CHPP) #5 index\n u'tmva1024': 'unknown', # CERS Moldoveneasc\u0103 (fuel mix coal, gas, oil) #7 index\n}\n\ndisplay_url = 'http://www.moldelectrica.md/ro/activity/system_state'\ndata_url = 'http://www.moldelectrica.md/utils/load4'\n\n\ndef get_data(session=None):\n \"\"\" Returns generation data as a list of floats.\"\"\"\n\n s = session or requests.Session()\n\n #In order for the data url to return data, cookies from the display url must be obtained then reused.\n response = s.get(display_url)\n data_response = s.get(data_url)\n raw_data = data_response.text\n data = [float(i) for i in raw_data.split(',')]\n\n return data\n\n\ndef fetch_production(zone_key='MD', session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known production mix (in MW) of a given country\n\n Arguments:\n zone_key (optional) -- used in case a parser is able to fetch multiple countries\n session (optional) -- request session passed in order to re-use an existing session\n\n Return:\n A dictionary in the form:\n {\n 'zoneKey': 'FR',\n 'datetime': '2017-01-01T00:00:00Z',\n 'production': {\n 'biomass': 0.0,\n 'coal': 0.0,\n 'gas': 0.0,\n 'hydro': 0.0,\n 'nuclear': null,\n 'oil': 0.0,\n 'solar': 0.0,\n 'wind': 0.0,\n 'geothermal': 0.0,\n 'unknown': 0.0\n },\n 'storage': {\n 'hydro': -10.0,\n },\n 'source': 'mysource.com'\n }\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n grid_status = get_data(session=session)\n production = {'solar': None, 'wind': None, 'biomass': None, 'nuclear': 0.0}\n\n production['gas'] = sum(itemgetter(3, 5, 6)(grid_status))\n production['hydro'] = sum(itemgetter(2, 4)(grid_status))\n production['unknown'] = grid_status[7]\n\n consumption = grid_status[-5]\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n datapoint = {\n 'zoneKey': zone_key,\n 'datetime': dt,\n 'consumption': consumption,\n 'production': production,\n 'storage': {},\n 'source': 'moldelectrica.md'\n }\n\n return datapoint\n\n\ndef fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None, logger=None):\n \"\"\"Requests the last known power exchange (in MW) between two countries\n Arguments:\n zone_key1 -- the first country code\n zone_key2 -- the second country code; order of the two codes in params doesn't matter\n session (optional) -- request session passed in order to re-use an existing session\n Return:\n A dictionary in the form:\n {\n 'sortedZoneKeys': 'DK->NO',\n 'datetime': '2017-01-01T00:00:00Z',\n 'netFlow': 0.0,\n 'source': 'mysource.com'\n }\n where net flow is from DK into NO\n \"\"\"\n if target_datetime:\n raise NotImplementedError('This parser is not yet able to parse past dates')\n\n sortedZoneKeys = '->'.join(sorted([zone_key1, zone_key2]))\n\n exchange_status = get_data(session=session)\n\n if sortedZoneKeys == 'MD->UA':\n netflow = -1 * exchange_status[-3]\n elif sortedZoneKeys == 'MD->RO':\n netflow = -1 * exchange_status[-2]\n else:\n raise NotImplementedError('This exchange pair is not implemented')\n\n dt = arrow.now('Europe/Chisinau').datetime\n\n exchange = {\n 'sortedZoneKeys': sortedZoneKeys,\n 'datetime': dt,\n 'netFlow': netflow,\n 'source': 'moldelectrica.md'\n }\n\n return exchange\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_exchange(MD, UA) ->')\n print(fetch_exchange('MD', 'UA'))\n print('fetch_exchange(MD, RO) ->')\n print(fetch_exchange('MD', 'RO'))\n", "path": "parsers/MD.py"}]}
2,623
126
gh_patches_debug_29610
rasdani/github-patches
git_diff
rasterio__rasterio-2779
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- DEP: python, numpy, GDAL versions for rasterio 1.4 Related #2064 - Python 3.9+ (https://numpy.org/neps/nep-0029-deprecation_policy.html) - Numpy 1.21+ - GDAL 3.3+ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 # Two environmental variables influence this script. 4 # 5 # GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers, 6 # libraries, and data files. 7 # 8 # PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the 9 # source or binary distribution. This is essential when creating self-contained 10 # binary wheels. 11 12 import copy 13 import itertools 14 import logging 15 import os 16 import platform 17 import pprint 18 import shutil 19 from subprocess import check_output 20 import sys 21 22 from pkg_resources import parse_version 23 from setuptools import setup 24 from setuptools.extension import Extension 25 26 logging.basicConfig(stream=sys.stderr, level=logging.INFO) 27 log = logging.getLogger() 28 29 30 def copy_data_tree(datadir, destdir): 31 try: 32 shutil.rmtree(destdir) 33 except OSError: 34 pass 35 shutil.copytree(datadir, destdir) 36 37 38 # python -W all setup.py ... 39 if "all" in sys.warnoptions: 40 log.level = logging.DEBUG 41 42 # Parse the version from the rasterio module. 43 with open("rasterio/__init__.py") as f: 44 for line in f: 45 if line.find("__version__") >= 0: 46 version = line.split("=")[1].strip() 47 version = version.strip('"') 48 version = version.strip("'") 49 continue 50 51 with open("VERSION.txt", "w") as f: 52 f.write(version) 53 54 # Use Cython if available. 55 try: 56 from Cython.Build import cythonize 57 except ImportError: 58 raise SystemExit( 59 "ERROR: Cython.Build.cythonize not found. " 60 "Cython is required to build rasterio.") 61 62 # By default we'll try to get options via gdal-config. On systems without, 63 # options will need to be set in setup.cfg or on the setup command line. 64 include_dirs = [] 65 library_dirs = [] 66 libraries = [] 67 extra_link_args = [] 68 gdal2plus = False 69 gdal_output = [None] * 4 70 gdalversion = None 71 gdal_major_version = 0 72 gdal_minor_version = 0 73 gdal_patch_version = 0 74 75 try: 76 import numpy as np 77 78 include_dirs.append(np.get_include()) 79 except ImportError: 80 raise SystemExit("ERROR: Numpy and its headers are required to run setup().") 81 82 if "clean" not in sys.argv: 83 try: 84 gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config') 85 for i, flag in enumerate(("--cflags", "--libs", "--datadir", "--version")): 86 gdal_output[i] = check_output([gdal_config, flag]).decode("utf-8").strip() 87 88 for item in gdal_output[0].split(): 89 if item.startswith("-I"): 90 include_dirs.extend(item[2:].split(":")) 91 for item in gdal_output[1].split(): 92 if item.startswith("-L"): 93 library_dirs.extend(item[2:].split(":")) 94 elif item.startswith("-l"): 95 libraries.append(item[2:]) 96 else: 97 # e.g. -framework GDAL 98 extra_link_args.append(item) 99 # datadir, gdal_output[2] handled below 100 101 gdalversion = gdal_output[3] 102 if gdalversion: 103 log.info("GDAL API version obtained from gdal-config: %s", 104 gdalversion) 105 106 except Exception as e: 107 if os.name == "nt": 108 log.info("Building on Windows requires extra options to setup.py " 109 "to locate needed GDAL files. More information is available " 110 "in the README.") 111 else: 112 log.warning("Failed to get options via gdal-config: %s", str(e)) 113 114 # Get GDAL API version from environment variable. 115 if 'GDAL_VERSION' in os.environ: 116 gdalversion = os.environ['GDAL_VERSION'] 117 log.info("GDAL API version obtained from environment: %s", gdalversion) 118 119 # Get GDAL API version from the command line if specified there. 120 if '--gdalversion' in sys.argv: 121 index = sys.argv.index('--gdalversion') 122 sys.argv.pop(index) 123 gdalversion = sys.argv.pop(index) 124 log.info("GDAL API version obtained from command line option: %s", 125 gdalversion) 126 127 if not gdalversion: 128 raise SystemExit("ERROR: A GDAL API version must be specified. Provide a path " 129 "to gdal-config using a GDAL_CONFIG environment variable " 130 "or use a GDAL_VERSION environment variable.") 131 132 gdal_major_version, gdal_minor_version, gdal_patch_version = parse_version( 133 gdalversion 134 ).base_version.split(".", maxsplit=3) 135 gdal_major_version = int(gdal_major_version) 136 gdal_minor_version = int(gdal_minor_version) 137 gdal_patch_version = int(gdal_patch_version) 138 139 if (gdal_major_version, gdal_minor_version) < (3, 1): 140 raise SystemExit("ERROR: GDAL >= 3.1 is required for rasterio. " 141 "Please upgrade GDAL.") 142 143 # Conditionally copy the GDAL data. To be used in conjunction with 144 # the bdist_wheel command to make self-contained binary wheels. 145 if os.environ.get('PACKAGE_DATA'): 146 destdir = 'rasterio/gdal_data' 147 if gdal_output[2]: 148 log.info("Copying gdal data from %s" % gdal_output[2]) 149 copy_data_tree(gdal_output[2], destdir) 150 else: 151 # check to see if GDAL_DATA is defined 152 gdal_data = os.environ.get('GDAL_DATA', None) 153 if gdal_data: 154 log.info("Copying gdal_data from %s" % gdal_data) 155 copy_data_tree(gdal_data, destdir) 156 157 # Conditionally copy PROJ DATA. 158 projdatadir = os.environ.get('PROJ_DATA', os.environ.get('PROJ_LIB', '/usr/local/share/proj')) 159 if os.path.exists(projdatadir): 160 log.info("Copying proj_data from %s" % projdatadir) 161 copy_data_tree(projdatadir, 'rasterio/proj_data') 162 163 compile_time_env = { 164 "CTE_GDAL_MAJOR_VERSION": gdal_major_version, 165 "CTE_GDAL_MINOR_VERSION": gdal_minor_version, 166 "CTE_GDAL_PATCH_VERSION": gdal_patch_version, 167 } 168 169 ext_options = { 170 'include_dirs': include_dirs, 171 'library_dirs': library_dirs, 172 'libraries': libraries, 173 'extra_link_args': extra_link_args, 174 'define_macros': [], 175 'cython_compile_time_env': compile_time_env 176 } 177 178 if not os.name == "nt": 179 # These options fail on Windows if using Visual Studio 180 ext_options['extra_compile_args'] = ['-Wno-unused-parameter', 181 '-Wno-unused-function'] 182 183 # Copy extension options for cpp extension modules. 184 cpp_ext_options = copy.deepcopy(ext_options) 185 186 # Remove -std=c++11 from C extension options. 187 try: 188 ext_options['extra_link_args'].remove('-std=c++11') 189 ext_options['extra_compile_args'].remove('-std=c++11') 190 except Exception: 191 pass 192 193 # GDAL 2.3 and newer requires C++11 194 if (gdal_major_version, gdal_minor_version) >= (2, 3): 195 cpp11_flag = '-std=c++11' 196 197 # 'extra_compile_args' may not be defined 198 eca = cpp_ext_options.get('extra_compile_args', []) 199 200 if platform.system() == 'Darwin': 201 202 if cpp11_flag not in eca: 203 eca.append(cpp11_flag) 204 205 eca += [cpp11_flag, '-mmacosx-version-min=10.9', '-stdlib=libc++'] 206 207 # TODO: Windows 208 209 elif cpp11_flag not in eca: 210 eca.append(cpp11_flag) 211 212 cpp_ext_options['extra_compile_args'] = eca 213 214 # Configure optional Cython coverage. 215 cythonize_options = {"language_level": sys.version_info[0]} 216 if os.environ.get('CYTHON_COVERAGE'): 217 cythonize_options['compiler_directives'] = {'linetrace': True} 218 cythonize_options['annotate'] = True 219 ext_options['define_macros'].extend( 220 [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')]) 221 222 log.debug('ext_options:\n%s', pprint.pformat(ext_options)) 223 224 ext_modules = None 225 if "clean" not in sys.argv: 226 extensions = [ 227 Extension( 228 'rasterio._base', ['rasterio/_base.pyx'], **ext_options), 229 Extension( 230 'rasterio._io', ['rasterio/_io.pyx'], **ext_options), 231 Extension( 232 'rasterio._features', ['rasterio/_features.pyx'], **ext_options), 233 Extension( 234 'rasterio._env', ['rasterio/_env.pyx'], **ext_options), 235 Extension( 236 'rasterio._warp', ['rasterio/_warp.pyx'], **cpp_ext_options), 237 Extension( 238 'rasterio._fill', ['rasterio/_fill.pyx'], **cpp_ext_options), 239 Extension( 240 'rasterio._err', ['rasterio/_err.pyx'], **ext_options), 241 Extension( 242 'rasterio._example', ['rasterio/_example.pyx'], **ext_options), 243 Extension( 244 'rasterio._version', ['rasterio/_version.pyx'], **ext_options), 245 Extension( 246 'rasterio.crs', ['rasterio/crs.pyx'], **ext_options), 247 Extension( 248 'rasterio.shutil', ['rasterio/shutil.pyx'], **ext_options), 249 Extension( 250 'rasterio._transform', ['rasterio/_transform.pyx'], **ext_options)] 251 if gdal_major_version >= 3: 252 # VSI Plugins are only 3.0+ 253 extensions.append( 254 Extension( 255 'rasterio._filepath', ['rasterio/_filepath.pyx'], **cpp_ext_options)) 256 ext_modules = cythonize( 257 extensions, quiet=True, compile_time_env=compile_time_env, **cythonize_options) 258 259 260 with open("README.rst", encoding="utf-8") as f: 261 readme = f.read() 262 263 # Runtime requirements. 264 inst_reqs = [ 265 "affine", 266 "attrs", 267 "certifi", 268 "click>=4.0", 269 "cligj>=0.5", 270 "numpy>=1.18", 271 "snuggs>=1.4.1", 272 "click-plugins", 273 "setuptools", 274 ] 275 276 extra_reqs = { 277 "docs": [ 278 "ghp-import", 279 "numpydoc", 280 "sphinx", 281 "sphinx-click", 282 "sphinx-rtd-theme", 283 ], 284 "ipython": ["ipython>=2.0"], 285 "plot": ["matplotlib"], 286 "s3": ["boto3>=1.2.4"], 287 "test": [ 288 "boto3>=1.2.4", 289 "hypothesis", 290 "packaging", 291 "pytest-cov>=2.2.0", 292 "pytest>=2.8.2", 293 "shapely", 294 ], 295 } 296 297 # Add all extra requirements 298 extra_reqs["all"] = list(set(itertools.chain(*extra_reqs.values()))) 299 300 setup_args = dict( 301 name="rasterio", 302 version=version, 303 description="Fast and direct raster I/O for use with Numpy and SciPy", 304 long_description=readme, 305 classifiers=[ 306 "Development Status :: 5 - Production/Stable", 307 "Intended Audience :: Developers", 308 "Intended Audience :: Information Technology", 309 "Intended Audience :: Science/Research", 310 "License :: OSI Approved :: BSD License", 311 "Programming Language :: C", 312 "Programming Language :: Cython", 313 "Programming Language :: Python :: 3.8", 314 "Programming Language :: Python :: 3.9", 315 "Programming Language :: Python :: 3.10", 316 "Programming Language :: Python :: 3", 317 "Topic :: Multimedia :: Graphics :: Graphics Conversion", 318 "Topic :: Scientific/Engineering :: GIS", 319 ], 320 keywords="raster gdal", 321 author="Sean Gillies", 322 author_email="[email protected]", 323 url="https://github.com/rasterio/rasterio", 324 license="BSD", 325 package_dir={"": "."}, 326 packages=["rasterio", "rasterio.rio"], 327 include_package_data=True, 328 ext_modules=ext_modules, 329 zip_safe=False, 330 install_requires=inst_reqs, 331 extras_require=extra_reqs, 332 python_requires=">=3.8", 333 ) 334 335 if os.environ.get('PACKAGE_DATA'): 336 setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']} 337 338 setup(**setup_args) 339 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -136,8 +136,8 @@ gdal_minor_version = int(gdal_minor_version) gdal_patch_version = int(gdal_patch_version) - if (gdal_major_version, gdal_minor_version) < (3, 1): - raise SystemExit("ERROR: GDAL >= 3.1 is required for rasterio. " + if (gdal_major_version, gdal_minor_version) < (3, 3): + raise SystemExit("ERROR: GDAL >= 3.3 is required for rasterio. " "Please upgrade GDAL.") # Conditionally copy the GDAL data. To be used in conjunction with @@ -267,7 +267,7 @@ "certifi", "click>=4.0", "cligj>=0.5", - "numpy>=1.18", + "numpy>=1.21", "snuggs>=1.4.1", "click-plugins", "setuptools", @@ -310,7 +310,6 @@ "License :: OSI Approved :: BSD License", "Programming Language :: C", "Programming Language :: Cython", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3", @@ -329,7 +328,7 @@ zip_safe=False, install_requires=inst_reqs, extras_require=extra_reqs, - python_requires=">=3.8", + python_requires=">=3.9", ) if os.environ.get('PACKAGE_DATA'):
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -136,8 +136,8 @@\n gdal_minor_version = int(gdal_minor_version)\n gdal_patch_version = int(gdal_patch_version)\n \n- if (gdal_major_version, gdal_minor_version) < (3, 1):\n- raise SystemExit(\"ERROR: GDAL >= 3.1 is required for rasterio. \"\n+ if (gdal_major_version, gdal_minor_version) < (3, 3):\n+ raise SystemExit(\"ERROR: GDAL >= 3.3 is required for rasterio. \"\n \"Please upgrade GDAL.\")\n \n # Conditionally copy the GDAL data. To be used in conjunction with\n@@ -267,7 +267,7 @@\n \"certifi\",\n \"click>=4.0\",\n \"cligj>=0.5\",\n- \"numpy>=1.18\",\n+ \"numpy>=1.21\",\n \"snuggs>=1.4.1\",\n \"click-plugins\",\n \"setuptools\",\n@@ -310,7 +310,6 @@\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: C\",\n \"Programming Language :: Cython\",\n- \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3\",\n@@ -329,7 +328,7 @@\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require=extra_reqs,\n- python_requires=\">=3.8\",\n+ python_requires=\">=3.9\",\n )\n \n if os.environ.get('PACKAGE_DATA'):\n", "issue": "DEP: python, numpy, GDAL versions for rasterio 1.4\nRelated #2064\r\n\r\n- Python 3.9+ (https://numpy.org/neps/nep-0029-deprecation_policy.html)\r\n- Numpy 1.21+\r\n- GDAL 3.3+\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport copy\nimport itertools\nimport logging\nimport os\nimport platform\nimport pprint\nimport shutil\nfrom subprocess import check_output\nimport sys\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig(stream=sys.stderr, level=logging.INFO)\nlog = logging.getLogger()\n\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n\n# python -W all setup.py ...\nif \"all\" in sys.warnoptions:\n log.level = logging.DEBUG\n\n# Parse the version from the rasterio module.\nwith open(\"rasterio/__init__.py\") as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open(\"VERSION.txt\", \"w\") as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n raise SystemExit(\n \"ERROR: Cython.Build.cythonize not found. \"\n \"Cython is required to build rasterio.\")\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal2plus = False\ngdal_output = [None] * 4\ngdalversion = None\ngdal_major_version = 0\ngdal_minor_version = 0\ngdal_patch_version = 0\n\ntry:\n import numpy as np\n\n include_dirs.append(np.get_include())\nexcept ImportError:\n raise SystemExit(\"ERROR: Numpy and its headers are required to run setup().\")\n\nif \"clean\" not in sys.argv:\n try:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\", \"--version\")):\n gdal_output[i] = check_output([gdal_config, flag]).decode(\"utf-8\").strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n # datadir, gdal_output[2] handled below\n\n gdalversion = gdal_output[3]\n if gdalversion:\n log.info(\"GDAL API version obtained from gdal-config: %s\",\n gdalversion)\n\n except Exception as e:\n if os.name == \"nt\":\n log.info(\"Building on Windows requires extra options to setup.py \"\n \"to locate needed GDAL files. More information is available \"\n \"in the README.\")\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n # Get GDAL API version from environment variable.\n if 'GDAL_VERSION' in os.environ:\n gdalversion = os.environ['GDAL_VERSION']\n log.info(\"GDAL API version obtained from environment: %s\", gdalversion)\n\n # Get GDAL API version from the command line if specified there.\n if '--gdalversion' in sys.argv:\n index = sys.argv.index('--gdalversion')\n sys.argv.pop(index)\n gdalversion = sys.argv.pop(index)\n log.info(\"GDAL API version obtained from command line option: %s\",\n gdalversion)\n\n if not gdalversion:\n raise SystemExit(\"ERROR: A GDAL API version must be specified. Provide a path \"\n \"to gdal-config using a GDAL_CONFIG environment variable \"\n \"or use a GDAL_VERSION environment variable.\")\n\n gdal_major_version, gdal_minor_version, gdal_patch_version = parse_version(\n gdalversion\n ).base_version.split(\".\", maxsplit=3)\n gdal_major_version = int(gdal_major_version)\n gdal_minor_version = int(gdal_minor_version)\n gdal_patch_version = int(gdal_patch_version)\n\n if (gdal_major_version, gdal_minor_version) < (3, 1):\n raise SystemExit(\"ERROR: GDAL >= 3.1 is required for rasterio. \"\n \"Please upgrade GDAL.\")\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ DATA.\n projdatadir = os.environ.get('PROJ_DATA', os.environ.get('PROJ_LIB', '/usr/local/share/proj'))\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\ncompile_time_env = {\n \"CTE_GDAL_MAJOR_VERSION\": gdal_major_version,\n \"CTE_GDAL_MINOR_VERSION\": gdal_minor_version,\n \"CTE_GDAL_PATCH_VERSION\": gdal_patch_version,\n}\n\next_options = {\n 'include_dirs': include_dirs,\n 'library_dirs': library_dirs,\n 'libraries': libraries,\n 'extra_link_args': extra_link_args,\n 'define_macros': [],\n 'cython_compile_time_env': compile_time_env\n}\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\n# Copy extension options for cpp extension modules.\ncpp_ext_options = copy.deepcopy(ext_options)\n\n# Remove -std=c++11 from C extension options.\ntry:\n ext_options['extra_link_args'].remove('-std=c++11')\n ext_options['extra_compile_args'].remove('-std=c++11')\nexcept Exception:\n pass\n\n# GDAL 2.3 and newer requires C++11\nif (gdal_major_version, gdal_minor_version) >= (2, 3):\n cpp11_flag = '-std=c++11'\n\n # 'extra_compile_args' may not be defined\n eca = cpp_ext_options.get('extra_compile_args', [])\n\n if platform.system() == 'Darwin':\n\n if cpp11_flag not in eca:\n eca.append(cpp11_flag)\n\n eca += [cpp11_flag, '-mmacosx-version-min=10.9', '-stdlib=libc++']\n\n # TODO: Windows\n\n elif cpp11_flag not in eca:\n eca.append(cpp11_flag)\n\n cpp_ext_options['extra_compile_args'] = eca\n\n# Configure optional Cython coverage.\ncythonize_options = {\"language_level\": sys.version_info[0]}\nif os.environ.get('CYTHON_COVERAGE'):\n cythonize_options['compiler_directives'] = {'linetrace': True}\n cythonize_options['annotate'] = True\n ext_options['define_macros'].extend(\n [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')])\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\next_modules = None\nif \"clean\" not in sys.argv:\n extensions = [\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._env', ['rasterio/_env.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **cpp_ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx'], **cpp_ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n Extension(\n 'rasterio._version', ['rasterio/_version.pyx'], **ext_options),\n Extension(\n 'rasterio.crs', ['rasterio/crs.pyx'], **ext_options),\n Extension(\n 'rasterio.shutil', ['rasterio/shutil.pyx'], **ext_options),\n Extension(\n 'rasterio._transform', ['rasterio/_transform.pyx'], **ext_options)]\n if gdal_major_version >= 3:\n # VSI Plugins are only 3.0+\n extensions.append(\n Extension(\n 'rasterio._filepath', ['rasterio/_filepath.pyx'], **cpp_ext_options))\n ext_modules = cythonize(\n extensions, quiet=True, compile_time_env=compile_time_env, **cythonize_options)\n\n\nwith open(\"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n \"affine\",\n \"attrs\",\n \"certifi\",\n \"click>=4.0\",\n \"cligj>=0.5\",\n \"numpy>=1.18\",\n \"snuggs>=1.4.1\",\n \"click-plugins\",\n \"setuptools\",\n]\n\nextra_reqs = {\n \"docs\": [\n \"ghp-import\",\n \"numpydoc\",\n \"sphinx\",\n \"sphinx-click\",\n \"sphinx-rtd-theme\",\n ],\n \"ipython\": [\"ipython>=2.0\"],\n \"plot\": [\"matplotlib\"],\n \"s3\": [\"boto3>=1.2.4\"],\n \"test\": [\n \"boto3>=1.2.4\",\n \"hypothesis\",\n \"packaging\",\n \"pytest-cov>=2.2.0\",\n \"pytest>=2.8.2\",\n \"shapely\",\n ],\n}\n\n# Add all extra requirements\nextra_reqs[\"all\"] = list(set(itertools.chain(*extra_reqs.values())))\n\nsetup_args = dict(\n name=\"rasterio\",\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: C\",\n \"Programming Language :: Cython\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Multimedia :: Graphics :: Graphics Conversion\",\n \"Topic :: Scientific/Engineering :: GIS\",\n ],\n keywords=\"raster gdal\",\n author=\"Sean Gillies\",\n author_email=\"[email protected]\",\n url=\"https://github.com/rasterio/rasterio\",\n license=\"BSD\",\n package_dir={\"\": \".\"},\n packages=[\"rasterio\", \"rasterio.rio\"],\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require=extra_reqs,\n python_requires=\">=3.8\",\n)\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\n# Two environmental variables influence this script.\n#\n# GDAL_CONFIG: the path to a gdal-config program that points to GDAL headers,\n# libraries, and data files.\n#\n# PACKAGE_DATA: if defined, GDAL and PROJ4 data files will be copied into the\n# source or binary distribution. This is essential when creating self-contained\n# binary wheels.\n\nimport copy\nimport itertools\nimport logging\nimport os\nimport platform\nimport pprint\nimport shutil\nfrom subprocess import check_output\nimport sys\n\nfrom pkg_resources import parse_version\nfrom setuptools import setup\nfrom setuptools.extension import Extension\n\nlogging.basicConfig(stream=sys.stderr, level=logging.INFO)\nlog = logging.getLogger()\n\n\ndef copy_data_tree(datadir, destdir):\n try:\n shutil.rmtree(destdir)\n except OSError:\n pass\n shutil.copytree(datadir, destdir)\n\n\n# python -W all setup.py ...\nif \"all\" in sys.warnoptions:\n log.level = logging.DEBUG\n\n# Parse the version from the rasterio module.\nwith open(\"rasterio/__init__.py\") as f:\n for line in f:\n if line.find(\"__version__\") >= 0:\n version = line.split(\"=\")[1].strip()\n version = version.strip('\"')\n version = version.strip(\"'\")\n continue\n\nwith open(\"VERSION.txt\", \"w\") as f:\n f.write(version)\n\n# Use Cython if available.\ntry:\n from Cython.Build import cythonize\nexcept ImportError:\n raise SystemExit(\n \"ERROR: Cython.Build.cythonize not found. \"\n \"Cython is required to build rasterio.\")\n\n# By default we'll try to get options via gdal-config. On systems without,\n# options will need to be set in setup.cfg or on the setup command line.\ninclude_dirs = []\nlibrary_dirs = []\nlibraries = []\nextra_link_args = []\ngdal2plus = False\ngdal_output = [None] * 4\ngdalversion = None\ngdal_major_version = 0\ngdal_minor_version = 0\ngdal_patch_version = 0\n\ntry:\n import numpy as np\n\n include_dirs.append(np.get_include())\nexcept ImportError:\n raise SystemExit(\"ERROR: Numpy and its headers are required to run setup().\")\n\nif \"clean\" not in sys.argv:\n try:\n gdal_config = os.environ.get('GDAL_CONFIG', 'gdal-config')\n for i, flag in enumerate((\"--cflags\", \"--libs\", \"--datadir\", \"--version\")):\n gdal_output[i] = check_output([gdal_config, flag]).decode(\"utf-8\").strip()\n\n for item in gdal_output[0].split():\n if item.startswith(\"-I\"):\n include_dirs.extend(item[2:].split(\":\"))\n for item in gdal_output[1].split():\n if item.startswith(\"-L\"):\n library_dirs.extend(item[2:].split(\":\"))\n elif item.startswith(\"-l\"):\n libraries.append(item[2:])\n else:\n # e.g. -framework GDAL\n extra_link_args.append(item)\n # datadir, gdal_output[2] handled below\n\n gdalversion = gdal_output[3]\n if gdalversion:\n log.info(\"GDAL API version obtained from gdal-config: %s\",\n gdalversion)\n\n except Exception as e:\n if os.name == \"nt\":\n log.info(\"Building on Windows requires extra options to setup.py \"\n \"to locate needed GDAL files. More information is available \"\n \"in the README.\")\n else:\n log.warning(\"Failed to get options via gdal-config: %s\", str(e))\n\n # Get GDAL API version from environment variable.\n if 'GDAL_VERSION' in os.environ:\n gdalversion = os.environ['GDAL_VERSION']\n log.info(\"GDAL API version obtained from environment: %s\", gdalversion)\n\n # Get GDAL API version from the command line if specified there.\n if '--gdalversion' in sys.argv:\n index = sys.argv.index('--gdalversion')\n sys.argv.pop(index)\n gdalversion = sys.argv.pop(index)\n log.info(\"GDAL API version obtained from command line option: %s\",\n gdalversion)\n\n if not gdalversion:\n raise SystemExit(\"ERROR: A GDAL API version must be specified. Provide a path \"\n \"to gdal-config using a GDAL_CONFIG environment variable \"\n \"or use a GDAL_VERSION environment variable.\")\n\n gdal_major_version, gdal_minor_version, gdal_patch_version = parse_version(\n gdalversion\n ).base_version.split(\".\", maxsplit=3)\n gdal_major_version = int(gdal_major_version)\n gdal_minor_version = int(gdal_minor_version)\n gdal_patch_version = int(gdal_patch_version)\n\n if (gdal_major_version, gdal_minor_version) < (3, 3):\n raise SystemExit(\"ERROR: GDAL >= 3.3 is required for rasterio. \"\n \"Please upgrade GDAL.\")\n\n# Conditionally copy the GDAL data. To be used in conjunction with\n# the bdist_wheel command to make self-contained binary wheels.\nif os.environ.get('PACKAGE_DATA'):\n destdir = 'rasterio/gdal_data'\n if gdal_output[2]:\n log.info(\"Copying gdal data from %s\" % gdal_output[2])\n copy_data_tree(gdal_output[2], destdir)\n else:\n # check to see if GDAL_DATA is defined\n gdal_data = os.environ.get('GDAL_DATA', None)\n if gdal_data:\n log.info(\"Copying gdal_data from %s\" % gdal_data)\n copy_data_tree(gdal_data, destdir)\n\n # Conditionally copy PROJ DATA.\n projdatadir = os.environ.get('PROJ_DATA', os.environ.get('PROJ_LIB', '/usr/local/share/proj'))\n if os.path.exists(projdatadir):\n log.info(\"Copying proj_data from %s\" % projdatadir)\n copy_data_tree(projdatadir, 'rasterio/proj_data')\n\ncompile_time_env = {\n \"CTE_GDAL_MAJOR_VERSION\": gdal_major_version,\n \"CTE_GDAL_MINOR_VERSION\": gdal_minor_version,\n \"CTE_GDAL_PATCH_VERSION\": gdal_patch_version,\n}\n\next_options = {\n 'include_dirs': include_dirs,\n 'library_dirs': library_dirs,\n 'libraries': libraries,\n 'extra_link_args': extra_link_args,\n 'define_macros': [],\n 'cython_compile_time_env': compile_time_env\n}\n\nif not os.name == \"nt\":\n # These options fail on Windows if using Visual Studio\n ext_options['extra_compile_args'] = ['-Wno-unused-parameter',\n '-Wno-unused-function']\n\n# Copy extension options for cpp extension modules.\ncpp_ext_options = copy.deepcopy(ext_options)\n\n# Remove -std=c++11 from C extension options.\ntry:\n ext_options['extra_link_args'].remove('-std=c++11')\n ext_options['extra_compile_args'].remove('-std=c++11')\nexcept Exception:\n pass\n\n# GDAL 2.3 and newer requires C++11\nif (gdal_major_version, gdal_minor_version) >= (2, 3):\n cpp11_flag = '-std=c++11'\n\n # 'extra_compile_args' may not be defined\n eca = cpp_ext_options.get('extra_compile_args', [])\n\n if platform.system() == 'Darwin':\n\n if cpp11_flag not in eca:\n eca.append(cpp11_flag)\n\n eca += [cpp11_flag, '-mmacosx-version-min=10.9', '-stdlib=libc++']\n\n # TODO: Windows\n\n elif cpp11_flag not in eca:\n eca.append(cpp11_flag)\n\n cpp_ext_options['extra_compile_args'] = eca\n\n# Configure optional Cython coverage.\ncythonize_options = {\"language_level\": sys.version_info[0]}\nif os.environ.get('CYTHON_COVERAGE'):\n cythonize_options['compiler_directives'] = {'linetrace': True}\n cythonize_options['annotate'] = True\n ext_options['define_macros'].extend(\n [('CYTHON_TRACE', '1'), ('CYTHON_TRACE_NOGIL', '1')])\n\nlog.debug('ext_options:\\n%s', pprint.pformat(ext_options))\n\next_modules = None\nif \"clean\" not in sys.argv:\n extensions = [\n Extension(\n 'rasterio._base', ['rasterio/_base.pyx'], **ext_options),\n Extension(\n 'rasterio._io', ['rasterio/_io.pyx'], **ext_options),\n Extension(\n 'rasterio._features', ['rasterio/_features.pyx'], **ext_options),\n Extension(\n 'rasterio._env', ['rasterio/_env.pyx'], **ext_options),\n Extension(\n 'rasterio._warp', ['rasterio/_warp.pyx'], **cpp_ext_options),\n Extension(\n 'rasterio._fill', ['rasterio/_fill.pyx'], **cpp_ext_options),\n Extension(\n 'rasterio._err', ['rasterio/_err.pyx'], **ext_options),\n Extension(\n 'rasterio._example', ['rasterio/_example.pyx'], **ext_options),\n Extension(\n 'rasterio._version', ['rasterio/_version.pyx'], **ext_options),\n Extension(\n 'rasterio.crs', ['rasterio/crs.pyx'], **ext_options),\n Extension(\n 'rasterio.shutil', ['rasterio/shutil.pyx'], **ext_options),\n Extension(\n 'rasterio._transform', ['rasterio/_transform.pyx'], **ext_options)]\n if gdal_major_version >= 3:\n # VSI Plugins are only 3.0+\n extensions.append(\n Extension(\n 'rasterio._filepath', ['rasterio/_filepath.pyx'], **cpp_ext_options))\n ext_modules = cythonize(\n extensions, quiet=True, compile_time_env=compile_time_env, **cythonize_options)\n\n\nwith open(\"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n# Runtime requirements.\ninst_reqs = [\n \"affine\",\n \"attrs\",\n \"certifi\",\n \"click>=4.0\",\n \"cligj>=0.5\",\n \"numpy>=1.21\",\n \"snuggs>=1.4.1\",\n \"click-plugins\",\n \"setuptools\",\n]\n\nextra_reqs = {\n \"docs\": [\n \"ghp-import\",\n \"numpydoc\",\n \"sphinx\",\n \"sphinx-click\",\n \"sphinx-rtd-theme\",\n ],\n \"ipython\": [\"ipython>=2.0\"],\n \"plot\": [\"matplotlib\"],\n \"s3\": [\"boto3>=1.2.4\"],\n \"test\": [\n \"boto3>=1.2.4\",\n \"hypothesis\",\n \"packaging\",\n \"pytest-cov>=2.2.0\",\n \"pytest>=2.8.2\",\n \"shapely\",\n ],\n}\n\n# Add all extra requirements\nextra_reqs[\"all\"] = list(set(itertools.chain(*extra_reqs.values())))\n\nsetup_args = dict(\n name=\"rasterio\",\n version=version,\n description=\"Fast and direct raster I/O for use with Numpy and SciPy\",\n long_description=readme,\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Information Technology\",\n \"Intended Audience :: Science/Research\",\n \"License :: OSI Approved :: BSD License\",\n \"Programming Language :: C\",\n \"Programming Language :: Cython\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Programming Language :: Python :: 3\",\n \"Topic :: Multimedia :: Graphics :: Graphics Conversion\",\n \"Topic :: Scientific/Engineering :: GIS\",\n ],\n keywords=\"raster gdal\",\n author=\"Sean Gillies\",\n author_email=\"[email protected]\",\n url=\"https://github.com/rasterio/rasterio\",\n license=\"BSD\",\n package_dir={\"\": \".\"},\n packages=[\"rasterio\", \"rasterio.rio\"],\n include_package_data=True,\n ext_modules=ext_modules,\n zip_safe=False,\n install_requires=inst_reqs,\n extras_require=extra_reqs,\n python_requires=\">=3.9\",\n)\n\nif os.environ.get('PACKAGE_DATA'):\n setup_args['package_data'] = {'rasterio': ['gdal_data/*', 'proj_data/*']}\n\nsetup(**setup_args)\n", "path": "setup.py"}]}
4,060
400
gh_patches_debug_11561
rasdani/github-patches
git_diff
iterative__dvc-3675
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Secsh channel 10 open FAILED: open failed: Connect failed **Please provide information about your setup** DVC version(i.e. `dvc --version`), Platform and method of installation (pip, homebrew, pkg Mac, exe (Windows), DEB(Linux), RPM(Linux)) version 0.88.0 installed via pip on Linux and Mac. Syncing to a Ubuntu host via ssh. Everything seems to be working but I get a cryptic warning message every time I do anything. For example: dvc push 0% Querying cache in ssh://[email protected]/media/sda2/dvc/first_day| |0/42 Secsh channel 10 open FAILED: open failed: Connect failed Secsh channel 10 open FAILED: open failed: Connect failed Secsh channel 10 open FAILED: open failed: Connect failed 2% /media/sda2/dvc/first_day/4e/4b31f0c5784a2e185d88a3120cac19| |1/42 [00:02<0Secsh channel 10 open FAILED: open failed: Connect failed Everything is up to date. This is probably an edge case due to my setup but I'm not sure how to quiet the message or resolve the issue. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/remote/ssh/connection.py` Content: ``` 1 import errno 2 import logging 3 import os 4 import posixpath 5 import stat 6 from contextlib import suppress 7 8 from funcy import cached_property 9 10 try: 11 import paramiko 12 except ImportError: 13 paramiko = None 14 15 from dvc.utils import tmp_fname 16 from dvc.progress import Tqdm 17 from dvc.exceptions import DvcException 18 from dvc.remote.base import RemoteCmdError 19 20 21 logger = logging.getLogger(__name__) 22 23 24 def sizeof_fmt(num, suffix="B"): 25 """ Convert number of bytes to human-readable string """ 26 for unit in ["", "K", "M", "G", "T", "P", "E", "Z"]: 27 if abs(num) < 1024.0: 28 return "%3.1f%s%s" % (num, unit, suffix) 29 num /= 1024.0 30 return "%.1f%s%s" % (num, "Y", suffix) 31 32 33 class SSHConnection: 34 def __init__(self, host, *args, **kwargs): 35 logger.debug( 36 "Establishing ssh connection with '{host}' " 37 "through port '{port}' as user '{username}'".format( 38 host=host, **kwargs 39 ) 40 ) 41 self.timeout = kwargs.get("timeout", 1800) 42 43 self._ssh = paramiko.SSHClient() 44 self._ssh.load_system_host_keys() 45 self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 46 47 self._ssh.connect(host, *args, **kwargs) 48 self._ssh.get_transport().set_keepalive(10) 49 self._sftp_channels = [] 50 51 @property 52 def sftp(self): 53 if not self._sftp_channels: 54 self._sftp_channels = [self._ssh.open_sftp()] 55 return self._sftp_channels[0] 56 57 def close(self): 58 for sftp in self._sftp_channels: 59 sftp.close() 60 self._ssh.close() 61 62 def st_mode(self, path): 63 with suppress(FileNotFoundError): 64 return self.sftp.lstat(path).st_mode 65 66 return 0 67 68 def getsize(self, path): 69 with suppress(FileNotFoundError): 70 return self.sftp.lstat(path).st_size 71 72 return 0 73 74 def exists(self, path, sftp=None): 75 return bool(self.st_mode(path)) 76 77 def isdir(self, path): 78 return stat.S_ISDIR(self.st_mode(path)) 79 80 def isfile(self, path): 81 return stat.S_ISREG(self.st_mode(path)) 82 83 def islink(self, path): 84 return stat.S_ISLNK(self.st_mode(path)) 85 86 def makedirs(self, path): 87 # Single stat call will say whether this is a dir, a file or a link 88 st_mode = self.st_mode(path) 89 90 if stat.S_ISDIR(st_mode): 91 return 92 93 if stat.S_ISREG(st_mode) or stat.S_ISLNK(st_mode): 94 raise DvcException( 95 "a file with the same name '{}' already exists".format(path) 96 ) 97 98 head, tail = posixpath.split(path) 99 100 if head: 101 self.makedirs(head) 102 103 if tail: 104 try: 105 self.sftp.mkdir(path) 106 except IOError as exc: 107 # Since paramiko errors are very vague we need to recheck 108 # whether it's because path already exists or something else 109 if exc.errno == errno.EACCES or not self.exists(path): 110 raise DvcException( 111 "unable to create remote directory '{}'".format(path) 112 ) from exc 113 114 def walk(self, directory, topdown=True): 115 # NOTE: original os.walk() implementation [1] with default options was 116 # used as a template. 117 # 118 # [1] https://github.com/python/cpython/blob/master/Lib/os.py 119 try: 120 dir_entries = self.sftp.listdir_attr(directory) 121 except IOError as exc: 122 raise DvcException( 123 "couldn't get the '{}' remote directory files list".format( 124 directory 125 ) 126 ) from exc 127 128 dirs = [] 129 nondirs = [] 130 for entry in dir_entries: 131 name = entry.filename 132 if stat.S_ISDIR(entry.st_mode): 133 dirs.append(name) 134 else: 135 nondirs.append(name) 136 137 if topdown: 138 yield directory, dirs, nondirs 139 140 for dname in dirs: 141 newpath = posixpath.join(directory, dname) 142 yield from self.walk(newpath, topdown=topdown) 143 144 if not topdown: 145 yield directory, dirs, nondirs 146 147 def walk_files(self, directory): 148 for root, dirs, files in self.walk(directory): 149 for fname in files: 150 yield posixpath.join(root, fname) 151 152 def _remove_file(self, path): 153 with suppress(FileNotFoundError): 154 self.sftp.remove(path) 155 156 def _remove_dir(self, path): 157 for root, dirs, files in self.walk(path, topdown=False): 158 for fname in files: 159 with suppress(FileNotFoundError): 160 self._remove_file(posixpath.join(root, fname)) 161 162 for dname in dirs: 163 with suppress(FileNotFoundError): 164 self.sftp.rmdir(posixpath.join(root, dname)) 165 166 with suppress(FileNotFoundError): 167 self.sftp.rmdir(path) 168 169 def remove(self, path): 170 if self.isdir(path): 171 self._remove_dir(path) 172 else: 173 self._remove_file(path) 174 175 def download(self, src, dest, no_progress_bar=False, progress_title=None): 176 with Tqdm( 177 desc=progress_title or os.path.basename(src), 178 disable=no_progress_bar, 179 bytes=True, 180 ) as pbar: 181 self.sftp.get(src, dest, callback=pbar.update_to) 182 183 def move(self, src, dst): 184 """Rename src to dst, if it is not possible (in case src and dst are 185 on different filesystems) and actual physical copying of data is 186 happening. 187 """ 188 self.makedirs(posixpath.dirname(dst)) 189 190 try: 191 self.sftp.rename(src, dst) 192 except OSError: 193 self.atomic_copy(src, dst) 194 self.remove(src) 195 196 def atomic_copy(self, src, dst): 197 tmp = tmp_fname(dst) 198 199 try: 200 self.copy(src, tmp) 201 self.sftp.rename(tmp, dst) 202 finally: 203 self.remove(tmp) 204 205 def upload(self, src, dest, no_progress_bar=False, progress_title=None): 206 self.makedirs(posixpath.dirname(dest)) 207 tmp_file = tmp_fname(dest) 208 if not progress_title: 209 progress_title = posixpath.basename(dest) 210 211 with Tqdm( 212 desc=progress_title, disable=no_progress_bar, bytes=True 213 ) as pbar: 214 self.sftp.put(src, tmp_file, callback=pbar.update_to) 215 216 self.sftp.rename(tmp_file, dest) 217 218 def execute(self, cmd): 219 stdin, stdout, stderr = self._ssh.exec_command(cmd) 220 channel = stdout.channel 221 222 stdin.close() 223 channel.shutdown_write() 224 225 stdout_chunks = [] 226 stderr_chunks = [] 227 while ( 228 not channel.closed 229 or channel.recv_ready() 230 or channel.recv_stderr_ready() 231 ): 232 import select 233 234 got_chunk = False 235 readq, _, _ = select.select([stdout.channel], [], [], self.timeout) 236 for c in readq: 237 if c.recv_ready(): 238 stdout_chunks.append(stdout.channel.recv(len(c.in_buffer))) 239 got_chunk = True 240 241 if c.recv_stderr_ready(): 242 stderr_len = len(c.in_stderr_buffer) 243 s = stderr.channel.recv_stderr(stderr_len) 244 stderr_chunks.append(s) 245 got_chunk = True 246 247 if ( 248 not got_chunk 249 and stdout.channel.exit_status_ready() 250 and not stderr.channel.recv_stderr_ready() 251 and not stdout.channel.recv_ready() 252 ): 253 stdout.channel.shutdown_read() 254 stdout.channel.close() 255 break 256 257 stdout.close() 258 stderr.close() 259 260 ret = stdout.channel.recv_exit_status() 261 if ret != 0: 262 err = b"".join(stderr_chunks).decode("utf-8") 263 raise RemoteCmdError("ssh", cmd, ret, err) 264 265 return b"".join(stdout_chunks).decode("utf-8") 266 267 @cached_property 268 def uname(self): 269 return self.execute("uname").strip() 270 271 def md5(self, path): 272 """ 273 Use different md5 commands depending on the OS: 274 275 - Darwin's `md5` returns BSD-style checksums by default 276 - Linux's `md5sum` needs the `--tag` flag for a similar output 277 278 Example: 279 MD5 (foo.txt) = f3d220a856b52aabbf294351e8a24300 280 """ 281 if self.uname == "Linux": 282 md5 = self.execute("md5sum " + path).split()[0] 283 elif self.uname == "Darwin": 284 md5 = self.execute("md5 " + path).split()[-1] 285 else: 286 raise DvcException( 287 "'{}' is not supported as a SSH remote".format(self.uname) 288 ) 289 290 assert len(md5) == 32 291 return md5 292 293 def copy(self, src, dest): 294 self.execute("cp {} {}".format(src, dest)) 295 296 def open_max_sftp_channels(self): 297 # If there are more than 1 it means we've already opened max amount 298 if len(self._sftp_channels) <= 1: 299 while True: 300 try: 301 self._sftp_channels.append(self._ssh.open_sftp()) 302 except paramiko.ssh_exception.ChannelException: 303 if not self._sftp_channels: 304 raise 305 break 306 return self._sftp_channels 307 308 def open(self, *args, **kwargs): 309 return self.sftp.open(*args, **kwargs) 310 311 def symlink(self, src, dest): 312 self.sftp.symlink(src, dest) 313 314 def reflink(self, src, dest): 315 if self.uname == "Linux": 316 return self.execute("cp --reflink {} {}".format(src, dest)) 317 318 if self.uname == "Darwin": 319 return self.execute("cp -c {} {}".format(src, dest)) 320 321 raise DvcException( 322 "'{}' is not supported as a SSH remote".format(self.uname) 323 ) 324 325 def hardlink(self, src, dest): 326 self.execute("ln {} {}".format(src, dest)) 327 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/remote/ssh/connection.py b/dvc/remote/ssh/connection.py --- a/dvc/remote/ssh/connection.py +++ b/dvc/remote/ssh/connection.py @@ -41,6 +41,14 @@ self.timeout = kwargs.get("timeout", 1800) self._ssh = paramiko.SSHClient() + + # Explicitly disable paramiko logger. Due to how paramiko dynamically + # loads loggers, it is not disabled by DVC disable_other_loggers(). + # See https://github.com/iterative/dvc/issues/3482 + self._ssh.set_log_channel("dvc.paramiko") + logging.getLogger("dvc.paramiko").disabled = True + logging.getLogger("dvc.paramiko.sftp").disabled = True + self._ssh.load_system_host_keys() self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
{"golden_diff": "diff --git a/dvc/remote/ssh/connection.py b/dvc/remote/ssh/connection.py\n--- a/dvc/remote/ssh/connection.py\n+++ b/dvc/remote/ssh/connection.py\n@@ -41,6 +41,14 @@\n self.timeout = kwargs.get(\"timeout\", 1800)\n \n self._ssh = paramiko.SSHClient()\n+\n+ # Explicitly disable paramiko logger. Due to how paramiko dynamically\n+ # loads loggers, it is not disabled by DVC disable_other_loggers().\n+ # See https://github.com/iterative/dvc/issues/3482\n+ self._ssh.set_log_channel(\"dvc.paramiko\")\n+ logging.getLogger(\"dvc.paramiko\").disabled = True\n+ logging.getLogger(\"dvc.paramiko.sftp\").disabled = True\n+\n self._ssh.load_system_host_keys()\n self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n", "issue": "Secsh channel 10 open FAILED: open failed: Connect failed\n**Please provide information about your setup**\r\nDVC version(i.e. `dvc --version`), Platform and method of installation (pip, homebrew, pkg Mac, exe (Windows), DEB(Linux), RPM(Linux))\r\n\r\nversion 0.88.0\r\ninstalled via pip on Linux and Mac.\r\nSyncing to a Ubuntu host via ssh.\r\n\r\nEverything seems to be working but I get a cryptic warning message every time I do anything. For example:\r\n\r\ndvc push\r\n 0% Querying cache in ssh://[email protected]/media/sda2/dvc/first_day| |0/42 Secsh channel 10 open FAILED: open failed: Connect failed\r\nSecsh channel 10 open FAILED: open failed: Connect failed\r\nSecsh channel 10 open FAILED: open failed: Connect failed\r\n 2% /media/sda2/dvc/first_day/4e/4b31f0c5784a2e185d88a3120cac19| |1/42 [00:02<0Secsh channel 10 open FAILED: open failed: Connect failed\r\nEverything is up to date. \r\n\r\nThis is probably an edge case due to my setup but I'm not sure how to quiet the message or resolve the issue.\n", "before_files": [{"content": "import errno\nimport logging\nimport os\nimport posixpath\nimport stat\nfrom contextlib import suppress\n\nfrom funcy import cached_property\n\ntry:\n import paramiko\nexcept ImportError:\n paramiko = None\n\nfrom dvc.utils import tmp_fname\nfrom dvc.progress import Tqdm\nfrom dvc.exceptions import DvcException\nfrom dvc.remote.base import RemoteCmdError\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef sizeof_fmt(num, suffix=\"B\"):\n \"\"\" Convert number of bytes to human-readable string \"\"\"\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\"]:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, \"Y\", suffix)\n\n\nclass SSHConnection:\n def __init__(self, host, *args, **kwargs):\n logger.debug(\n \"Establishing ssh connection with '{host}' \"\n \"through port '{port}' as user '{username}'\".format(\n host=host, **kwargs\n )\n )\n self.timeout = kwargs.get(\"timeout\", 1800)\n\n self._ssh = paramiko.SSHClient()\n self._ssh.load_system_host_keys()\n self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n self._ssh.connect(host, *args, **kwargs)\n self._ssh.get_transport().set_keepalive(10)\n self._sftp_channels = []\n\n @property\n def sftp(self):\n if not self._sftp_channels:\n self._sftp_channels = [self._ssh.open_sftp()]\n return self._sftp_channels[0]\n\n def close(self):\n for sftp in self._sftp_channels:\n sftp.close()\n self._ssh.close()\n\n def st_mode(self, path):\n with suppress(FileNotFoundError):\n return self.sftp.lstat(path).st_mode\n\n return 0\n\n def getsize(self, path):\n with suppress(FileNotFoundError):\n return self.sftp.lstat(path).st_size\n\n return 0\n\n def exists(self, path, sftp=None):\n return bool(self.st_mode(path))\n\n def isdir(self, path):\n return stat.S_ISDIR(self.st_mode(path))\n\n def isfile(self, path):\n return stat.S_ISREG(self.st_mode(path))\n\n def islink(self, path):\n return stat.S_ISLNK(self.st_mode(path))\n\n def makedirs(self, path):\n # Single stat call will say whether this is a dir, a file or a link\n st_mode = self.st_mode(path)\n\n if stat.S_ISDIR(st_mode):\n return\n\n if stat.S_ISREG(st_mode) or stat.S_ISLNK(st_mode):\n raise DvcException(\n \"a file with the same name '{}' already exists\".format(path)\n )\n\n head, tail = posixpath.split(path)\n\n if head:\n self.makedirs(head)\n\n if tail:\n try:\n self.sftp.mkdir(path)\n except IOError as exc:\n # Since paramiko errors are very vague we need to recheck\n # whether it's because path already exists or something else\n if exc.errno == errno.EACCES or not self.exists(path):\n raise DvcException(\n \"unable to create remote directory '{}'\".format(path)\n ) from exc\n\n def walk(self, directory, topdown=True):\n # NOTE: original os.walk() implementation [1] with default options was\n # used as a template.\n #\n # [1] https://github.com/python/cpython/blob/master/Lib/os.py\n try:\n dir_entries = self.sftp.listdir_attr(directory)\n except IOError as exc:\n raise DvcException(\n \"couldn't get the '{}' remote directory files list\".format(\n directory\n )\n ) from exc\n\n dirs = []\n nondirs = []\n for entry in dir_entries:\n name = entry.filename\n if stat.S_ISDIR(entry.st_mode):\n dirs.append(name)\n else:\n nondirs.append(name)\n\n if topdown:\n yield directory, dirs, nondirs\n\n for dname in dirs:\n newpath = posixpath.join(directory, dname)\n yield from self.walk(newpath, topdown=topdown)\n\n if not topdown:\n yield directory, dirs, nondirs\n\n def walk_files(self, directory):\n for root, dirs, files in self.walk(directory):\n for fname in files:\n yield posixpath.join(root, fname)\n\n def _remove_file(self, path):\n with suppress(FileNotFoundError):\n self.sftp.remove(path)\n\n def _remove_dir(self, path):\n for root, dirs, files in self.walk(path, topdown=False):\n for fname in files:\n with suppress(FileNotFoundError):\n self._remove_file(posixpath.join(root, fname))\n\n for dname in dirs:\n with suppress(FileNotFoundError):\n self.sftp.rmdir(posixpath.join(root, dname))\n\n with suppress(FileNotFoundError):\n self.sftp.rmdir(path)\n\n def remove(self, path):\n if self.isdir(path):\n self._remove_dir(path)\n else:\n self._remove_file(path)\n\n def download(self, src, dest, no_progress_bar=False, progress_title=None):\n with Tqdm(\n desc=progress_title or os.path.basename(src),\n disable=no_progress_bar,\n bytes=True,\n ) as pbar:\n self.sftp.get(src, dest, callback=pbar.update_to)\n\n def move(self, src, dst):\n \"\"\"Rename src to dst, if it is not possible (in case src and dst are\n on different filesystems) and actual physical copying of data is\n happening.\n \"\"\"\n self.makedirs(posixpath.dirname(dst))\n\n try:\n self.sftp.rename(src, dst)\n except OSError:\n self.atomic_copy(src, dst)\n self.remove(src)\n\n def atomic_copy(self, src, dst):\n tmp = tmp_fname(dst)\n\n try:\n self.copy(src, tmp)\n self.sftp.rename(tmp, dst)\n finally:\n self.remove(tmp)\n\n def upload(self, src, dest, no_progress_bar=False, progress_title=None):\n self.makedirs(posixpath.dirname(dest))\n tmp_file = tmp_fname(dest)\n if not progress_title:\n progress_title = posixpath.basename(dest)\n\n with Tqdm(\n desc=progress_title, disable=no_progress_bar, bytes=True\n ) as pbar:\n self.sftp.put(src, tmp_file, callback=pbar.update_to)\n\n self.sftp.rename(tmp_file, dest)\n\n def execute(self, cmd):\n stdin, stdout, stderr = self._ssh.exec_command(cmd)\n channel = stdout.channel\n\n stdin.close()\n channel.shutdown_write()\n\n stdout_chunks = []\n stderr_chunks = []\n while (\n not channel.closed\n or channel.recv_ready()\n or channel.recv_stderr_ready()\n ):\n import select\n\n got_chunk = False\n readq, _, _ = select.select([stdout.channel], [], [], self.timeout)\n for c in readq:\n if c.recv_ready():\n stdout_chunks.append(stdout.channel.recv(len(c.in_buffer)))\n got_chunk = True\n\n if c.recv_stderr_ready():\n stderr_len = len(c.in_stderr_buffer)\n s = stderr.channel.recv_stderr(stderr_len)\n stderr_chunks.append(s)\n got_chunk = True\n\n if (\n not got_chunk\n and stdout.channel.exit_status_ready()\n and not stderr.channel.recv_stderr_ready()\n and not stdout.channel.recv_ready()\n ):\n stdout.channel.shutdown_read()\n stdout.channel.close()\n break\n\n stdout.close()\n stderr.close()\n\n ret = stdout.channel.recv_exit_status()\n if ret != 0:\n err = b\"\".join(stderr_chunks).decode(\"utf-8\")\n raise RemoteCmdError(\"ssh\", cmd, ret, err)\n\n return b\"\".join(stdout_chunks).decode(\"utf-8\")\n\n @cached_property\n def uname(self):\n return self.execute(\"uname\").strip()\n\n def md5(self, path):\n \"\"\"\n Use different md5 commands depending on the OS:\n\n - Darwin's `md5` returns BSD-style checksums by default\n - Linux's `md5sum` needs the `--tag` flag for a similar output\n\n Example:\n MD5 (foo.txt) = f3d220a856b52aabbf294351e8a24300\n \"\"\"\n if self.uname == \"Linux\":\n md5 = self.execute(\"md5sum \" + path).split()[0]\n elif self.uname == \"Darwin\":\n md5 = self.execute(\"md5 \" + path).split()[-1]\n else:\n raise DvcException(\n \"'{}' is not supported as a SSH remote\".format(self.uname)\n )\n\n assert len(md5) == 32\n return md5\n\n def copy(self, src, dest):\n self.execute(\"cp {} {}\".format(src, dest))\n\n def open_max_sftp_channels(self):\n # If there are more than 1 it means we've already opened max amount\n if len(self._sftp_channels) <= 1:\n while True:\n try:\n self._sftp_channels.append(self._ssh.open_sftp())\n except paramiko.ssh_exception.ChannelException:\n if not self._sftp_channels:\n raise\n break\n return self._sftp_channels\n\n def open(self, *args, **kwargs):\n return self.sftp.open(*args, **kwargs)\n\n def symlink(self, src, dest):\n self.sftp.symlink(src, dest)\n\n def reflink(self, src, dest):\n if self.uname == \"Linux\":\n return self.execute(\"cp --reflink {} {}\".format(src, dest))\n\n if self.uname == \"Darwin\":\n return self.execute(\"cp -c {} {}\".format(src, dest))\n\n raise DvcException(\n \"'{}' is not supported as a SSH remote\".format(self.uname)\n )\n\n def hardlink(self, src, dest):\n self.execute(\"ln {} {}\".format(src, dest))\n", "path": "dvc/remote/ssh/connection.py"}], "after_files": [{"content": "import errno\nimport logging\nimport os\nimport posixpath\nimport stat\nfrom contextlib import suppress\n\nfrom funcy import cached_property\n\ntry:\n import paramiko\nexcept ImportError:\n paramiko = None\n\nfrom dvc.utils import tmp_fname\nfrom dvc.progress import Tqdm\nfrom dvc.exceptions import DvcException\nfrom dvc.remote.base import RemoteCmdError\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef sizeof_fmt(num, suffix=\"B\"):\n \"\"\" Convert number of bytes to human-readable string \"\"\"\n for unit in [\"\", \"K\", \"M\", \"G\", \"T\", \"P\", \"E\", \"Z\"]:\n if abs(num) < 1024.0:\n return \"%3.1f%s%s\" % (num, unit, suffix)\n num /= 1024.0\n return \"%.1f%s%s\" % (num, \"Y\", suffix)\n\n\nclass SSHConnection:\n def __init__(self, host, *args, **kwargs):\n logger.debug(\n \"Establishing ssh connection with '{host}' \"\n \"through port '{port}' as user '{username}'\".format(\n host=host, **kwargs\n )\n )\n self.timeout = kwargs.get(\"timeout\", 1800)\n\n self._ssh = paramiko.SSHClient()\n\n # Explicitly disable paramiko logger. Due to how paramiko dynamically\n # loads loggers, it is not disabled by DVC disable_other_loggers().\n # See https://github.com/iterative/dvc/issues/3482\n self._ssh.set_log_channel(\"dvc.paramiko\")\n logging.getLogger(\"dvc.paramiko\").disabled = True\n logging.getLogger(\"dvc.paramiko.sftp\").disabled = True\n\n self._ssh.load_system_host_keys()\n self._ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n\n self._ssh.connect(host, *args, **kwargs)\n self._ssh.get_transport().set_keepalive(10)\n self._sftp_channels = []\n\n @property\n def sftp(self):\n if not self._sftp_channels:\n self._sftp_channels = [self._ssh.open_sftp()]\n return self._sftp_channels[0]\n\n def close(self):\n for sftp in self._sftp_channels:\n sftp.close()\n self._ssh.close()\n\n def st_mode(self, path):\n with suppress(FileNotFoundError):\n return self.sftp.lstat(path).st_mode\n\n return 0\n\n def getsize(self, path):\n with suppress(FileNotFoundError):\n return self.sftp.lstat(path).st_size\n\n return 0\n\n def exists(self, path, sftp=None):\n return bool(self.st_mode(path))\n\n def isdir(self, path):\n return stat.S_ISDIR(self.st_mode(path))\n\n def isfile(self, path):\n return stat.S_ISREG(self.st_mode(path))\n\n def islink(self, path):\n return stat.S_ISLNK(self.st_mode(path))\n\n def makedirs(self, path):\n # Single stat call will say whether this is a dir, a file or a link\n st_mode = self.st_mode(path)\n\n if stat.S_ISDIR(st_mode):\n return\n\n if stat.S_ISREG(st_mode) or stat.S_ISLNK(st_mode):\n raise DvcException(\n \"a file with the same name '{}' already exists\".format(path)\n )\n\n head, tail = posixpath.split(path)\n\n if head:\n self.makedirs(head)\n\n if tail:\n try:\n self.sftp.mkdir(path)\n except IOError as exc:\n # Since paramiko errors are very vague we need to recheck\n # whether it's because path already exists or something else\n if exc.errno == errno.EACCES or not self.exists(path):\n raise DvcException(\n \"unable to create remote directory '{}'\".format(path)\n ) from exc\n\n def walk(self, directory, topdown=True):\n # NOTE: original os.walk() implementation [1] with default options was\n # used as a template.\n #\n # [1] https://github.com/python/cpython/blob/master/Lib/os.py\n try:\n dir_entries = self.sftp.listdir_attr(directory)\n except IOError as exc:\n raise DvcException(\n \"couldn't get the '{}' remote directory files list\".format(\n directory\n )\n ) from exc\n\n dirs = []\n nondirs = []\n for entry in dir_entries:\n name = entry.filename\n if stat.S_ISDIR(entry.st_mode):\n dirs.append(name)\n else:\n nondirs.append(name)\n\n if topdown:\n yield directory, dirs, nondirs\n\n for dname in dirs:\n newpath = posixpath.join(directory, dname)\n yield from self.walk(newpath, topdown=topdown)\n\n if not topdown:\n yield directory, dirs, nondirs\n\n def walk_files(self, directory):\n for root, dirs, files in self.walk(directory):\n for fname in files:\n yield posixpath.join(root, fname)\n\n def _remove_file(self, path):\n with suppress(FileNotFoundError):\n self.sftp.remove(path)\n\n def _remove_dir(self, path):\n for root, dirs, files in self.walk(path, topdown=False):\n for fname in files:\n with suppress(FileNotFoundError):\n self._remove_file(posixpath.join(root, fname))\n\n for dname in dirs:\n with suppress(FileNotFoundError):\n self.sftp.rmdir(posixpath.join(root, dname))\n\n with suppress(FileNotFoundError):\n self.sftp.rmdir(path)\n\n def remove(self, path):\n if self.isdir(path):\n self._remove_dir(path)\n else:\n self._remove_file(path)\n\n def download(self, src, dest, no_progress_bar=False, progress_title=None):\n with Tqdm(\n desc=progress_title or os.path.basename(src),\n disable=no_progress_bar,\n bytes=True,\n ) as pbar:\n self.sftp.get(src, dest, callback=pbar.update_to)\n\n def move(self, src, dst):\n \"\"\"Rename src to dst, if it is not possible (in case src and dst are\n on different filesystems) and actual physical copying of data is\n happening.\n \"\"\"\n self.makedirs(posixpath.dirname(dst))\n\n try:\n self.sftp.rename(src, dst)\n except OSError:\n self.atomic_copy(src, dst)\n self.remove(src)\n\n def atomic_copy(self, src, dst):\n tmp = tmp_fname(dst)\n\n try:\n self.copy(src, tmp)\n self.sftp.rename(tmp, dst)\n finally:\n self.remove(tmp)\n\n def upload(self, src, dest, no_progress_bar=False, progress_title=None):\n self.makedirs(posixpath.dirname(dest))\n tmp_file = tmp_fname(dest)\n if not progress_title:\n progress_title = posixpath.basename(dest)\n\n with Tqdm(\n desc=progress_title, disable=no_progress_bar, bytes=True\n ) as pbar:\n self.sftp.put(src, tmp_file, callback=pbar.update_to)\n\n self.sftp.rename(tmp_file, dest)\n\n def execute(self, cmd):\n stdin, stdout, stderr = self._ssh.exec_command(cmd)\n channel = stdout.channel\n\n stdin.close()\n channel.shutdown_write()\n\n stdout_chunks = []\n stderr_chunks = []\n while (\n not channel.closed\n or channel.recv_ready()\n or channel.recv_stderr_ready()\n ):\n import select\n\n got_chunk = False\n readq, _, _ = select.select([stdout.channel], [], [], self.timeout)\n for c in readq:\n if c.recv_ready():\n stdout_chunks.append(stdout.channel.recv(len(c.in_buffer)))\n got_chunk = True\n\n if c.recv_stderr_ready():\n stderr_len = len(c.in_stderr_buffer)\n s = stderr.channel.recv_stderr(stderr_len)\n stderr_chunks.append(s)\n got_chunk = True\n\n if (\n not got_chunk\n and stdout.channel.exit_status_ready()\n and not stderr.channel.recv_stderr_ready()\n and not stdout.channel.recv_ready()\n ):\n stdout.channel.shutdown_read()\n stdout.channel.close()\n break\n\n stdout.close()\n stderr.close()\n\n ret = stdout.channel.recv_exit_status()\n if ret != 0:\n err = b\"\".join(stderr_chunks).decode(\"utf-8\")\n raise RemoteCmdError(\"ssh\", cmd, ret, err)\n\n return b\"\".join(stdout_chunks).decode(\"utf-8\")\n\n @cached_property\n def uname(self):\n return self.execute(\"uname\").strip()\n\n def md5(self, path):\n \"\"\"\n Use different md5 commands depending on the OS:\n\n - Darwin's `md5` returns BSD-style checksums by default\n - Linux's `md5sum` needs the `--tag` flag for a similar output\n\n Example:\n MD5 (foo.txt) = f3d220a856b52aabbf294351e8a24300\n \"\"\"\n if self.uname == \"Linux\":\n md5 = self.execute(\"md5sum \" + path).split()[0]\n elif self.uname == \"Darwin\":\n md5 = self.execute(\"md5 \" + path).split()[-1]\n else:\n raise DvcException(\n \"'{}' is not supported as a SSH remote\".format(self.uname)\n )\n\n assert len(md5) == 32\n return md5\n\n def copy(self, src, dest):\n self.execute(\"cp {} {}\".format(src, dest))\n\n def open_max_sftp_channels(self):\n # If there are more than 1 it means we've already opened max amount\n if len(self._sftp_channels) <= 1:\n while True:\n try:\n self._sftp_channels.append(self._ssh.open_sftp())\n except paramiko.ssh_exception.ChannelException:\n if not self._sftp_channels:\n raise\n break\n return self._sftp_channels\n\n def open(self, *args, **kwargs):\n return self.sftp.open(*args, **kwargs)\n\n def symlink(self, src, dest):\n self.sftp.symlink(src, dest)\n\n def reflink(self, src, dest):\n if self.uname == \"Linux\":\n return self.execute(\"cp --reflink {} {}\".format(src, dest))\n\n if self.uname == \"Darwin\":\n return self.execute(\"cp -c {} {}\".format(src, dest))\n\n raise DvcException(\n \"'{}' is not supported as a SSH remote\".format(self.uname)\n )\n\n def hardlink(self, src, dest):\n self.execute(\"ln {} {}\".format(src, dest))\n", "path": "dvc/remote/ssh/connection.py"}]}
3,708
210
gh_patches_debug_30707
rasdani/github-patches
git_diff
encode__starlette-1147
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Session cookie should use root path The session cookie currently uses '/'. It should really use the ASGI root path instead, in case the application is submounted. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `starlette/middleware/sessions.py` Content: ``` 1 import json 2 import typing 3 from base64 import b64decode, b64encode 4 5 import itsdangerous 6 from itsdangerous.exc import BadTimeSignature, SignatureExpired 7 8 from starlette.datastructures import MutableHeaders, Secret 9 from starlette.requests import HTTPConnection 10 from starlette.types import ASGIApp, Message, Receive, Scope, Send 11 12 13 class SessionMiddleware: 14 def __init__( 15 self, 16 app: ASGIApp, 17 secret_key: typing.Union[str, Secret], 18 session_cookie: str = "session", 19 max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds 20 same_site: str = "lax", 21 https_only: bool = False, 22 ) -> None: 23 self.app = app 24 self.signer = itsdangerous.TimestampSigner(str(secret_key)) 25 self.session_cookie = session_cookie 26 self.max_age = max_age 27 self.security_flags = "httponly; samesite=" + same_site 28 if https_only: # Secure flag can be used with HTTPS only 29 self.security_flags += "; secure" 30 31 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None: 32 if scope["type"] not in ("http", "websocket"): # pragma: no cover 33 await self.app(scope, receive, send) 34 return 35 36 connection = HTTPConnection(scope) 37 initial_session_was_empty = True 38 39 if self.session_cookie in connection.cookies: 40 data = connection.cookies[self.session_cookie].encode("utf-8") 41 try: 42 data = self.signer.unsign(data, max_age=self.max_age) 43 scope["session"] = json.loads(b64decode(data)) 44 initial_session_was_empty = False 45 except (BadTimeSignature, SignatureExpired): 46 scope["session"] = {} 47 else: 48 scope["session"] = {} 49 50 async def send_wrapper(message: Message) -> None: 51 if message["type"] == "http.response.start": 52 if scope["session"]: 53 # We have session data to persist. 54 data = b64encode(json.dumps(scope["session"]).encode("utf-8")) 55 data = self.signer.sign(data) 56 headers = MutableHeaders(scope=message) 57 header_value = "%s=%s; path=/; Max-Age=%d; %s" % ( 58 self.session_cookie, 59 data.decode("utf-8"), 60 self.max_age, 61 self.security_flags, 62 ) 63 headers.append("Set-Cookie", header_value) 64 elif not initial_session_was_empty: 65 # The session has been cleared. 66 headers = MutableHeaders(scope=message) 67 header_value = "{}={}; {}".format( 68 self.session_cookie, 69 "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;", 70 self.security_flags, 71 ) 72 headers.append("Set-Cookie", header_value) 73 await send(message) 74 75 await self.app(scope, receive, send_wrapper) 76 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py --- a/starlette/middleware/sessions.py +++ b/starlette/middleware/sessions.py @@ -49,14 +49,16 @@ async def send_wrapper(message: Message) -> None: if message["type"] == "http.response.start": + path = scope.get("root_path", "") or "/" if scope["session"]: # We have session data to persist. data = b64encode(json.dumps(scope["session"]).encode("utf-8")) data = self.signer.sign(data) headers = MutableHeaders(scope=message) - header_value = "%s=%s; path=/; Max-Age=%d; %s" % ( + header_value = "%s=%s; path=%s; Max-Age=%d; %s" % ( self.session_cookie, data.decode("utf-8"), + path, self.max_age, self.security_flags, ) @@ -66,7 +68,7 @@ headers = MutableHeaders(scope=message) header_value = "{}={}; {}".format( self.session_cookie, - "null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;", + f"null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;", self.security_flags, ) headers.append("Set-Cookie", header_value)
{"golden_diff": "diff --git a/starlette/middleware/sessions.py b/starlette/middleware/sessions.py\n--- a/starlette/middleware/sessions.py\n+++ b/starlette/middleware/sessions.py\n@@ -49,14 +49,16 @@\n \n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n+ path = scope.get(\"root_path\", \"\") or \"/\"\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n- header_value = \"%s=%s; path=/; Max-Age=%d; %s\" % (\n+ header_value = \"%s=%s; path=%s; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n+ path,\n self.max_age,\n self.security_flags,\n )\n@@ -66,7 +68,7 @@\n headers = MutableHeaders(scope=message)\n header_value = \"{}={}; {}\".format(\n self.session_cookie,\n- \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n+ f\"null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n", "issue": "Session cookie should use root path\nThe session cookie currently uses '/'.\r\nIt should really use the ASGI root path instead, in case the application is submounted.\n", "before_files": [{"content": "import json\nimport typing\nfrom base64 import b64decode, b64encode\n\nimport itsdangerous\nfrom itsdangerous.exc import BadTimeSignature, SignatureExpired\n\nfrom starlette.datastructures import MutableHeaders, Secret\nfrom starlette.requests import HTTPConnection\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass SessionMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n secret_key: typing.Union[str, Secret],\n session_cookie: str = \"session\",\n max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds\n same_site: str = \"lax\",\n https_only: bool = False,\n ) -> None:\n self.app = app\n self.signer = itsdangerous.TimestampSigner(str(secret_key))\n self.session_cookie = session_cookie\n self.max_age = max_age\n self.security_flags = \"httponly; samesite=\" + same_site\n if https_only: # Secure flag can be used with HTTPS only\n self.security_flags += \"; secure\"\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] not in (\"http\", \"websocket\"): # pragma: no cover\n await self.app(scope, receive, send)\n return\n\n connection = HTTPConnection(scope)\n initial_session_was_empty = True\n\n if self.session_cookie in connection.cookies:\n data = connection.cookies[self.session_cookie].encode(\"utf-8\")\n try:\n data = self.signer.unsign(data, max_age=self.max_age)\n scope[\"session\"] = json.loads(b64decode(data))\n initial_session_was_empty = False\n except (BadTimeSignature, SignatureExpired):\n scope[\"session\"] = {}\n else:\n scope[\"session\"] = {}\n\n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s; path=/; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n self.max_age,\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n elif not initial_session_was_empty:\n # The session has been cleared.\n headers = MutableHeaders(scope=message)\n header_value = \"{}={}; {}\".format(\n self.session_cookie,\n \"null; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n await send(message)\n\n await self.app(scope, receive, send_wrapper)\n", "path": "starlette/middleware/sessions.py"}], "after_files": [{"content": "import json\nimport typing\nfrom base64 import b64decode, b64encode\n\nimport itsdangerous\nfrom itsdangerous.exc import BadTimeSignature, SignatureExpired\n\nfrom starlette.datastructures import MutableHeaders, Secret\nfrom starlette.requests import HTTPConnection\nfrom starlette.types import ASGIApp, Message, Receive, Scope, Send\n\n\nclass SessionMiddleware:\n def __init__(\n self,\n app: ASGIApp,\n secret_key: typing.Union[str, Secret],\n session_cookie: str = \"session\",\n max_age: int = 14 * 24 * 60 * 60, # 14 days, in seconds\n same_site: str = \"lax\",\n https_only: bool = False,\n ) -> None:\n self.app = app\n self.signer = itsdangerous.TimestampSigner(str(secret_key))\n self.session_cookie = session_cookie\n self.max_age = max_age\n self.security_flags = \"httponly; samesite=\" + same_site\n if https_only: # Secure flag can be used with HTTPS only\n self.security_flags += \"; secure\"\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n if scope[\"type\"] not in (\"http\", \"websocket\"): # pragma: no cover\n await self.app(scope, receive, send)\n return\n\n connection = HTTPConnection(scope)\n initial_session_was_empty = True\n\n if self.session_cookie in connection.cookies:\n data = connection.cookies[self.session_cookie].encode(\"utf-8\")\n try:\n data = self.signer.unsign(data, max_age=self.max_age)\n scope[\"session\"] = json.loads(b64decode(data))\n initial_session_was_empty = False\n except (BadTimeSignature, SignatureExpired):\n scope[\"session\"] = {}\n else:\n scope[\"session\"] = {}\n\n async def send_wrapper(message: Message) -> None:\n if message[\"type\"] == \"http.response.start\":\n path = scope.get(\"root_path\", \"\") or \"/\"\n if scope[\"session\"]:\n # We have session data to persist.\n data = b64encode(json.dumps(scope[\"session\"]).encode(\"utf-8\"))\n data = self.signer.sign(data)\n headers = MutableHeaders(scope=message)\n header_value = \"%s=%s; path=%s; Max-Age=%d; %s\" % (\n self.session_cookie,\n data.decode(\"utf-8\"),\n path,\n self.max_age,\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n elif not initial_session_was_empty:\n # The session has been cleared.\n headers = MutableHeaders(scope=message)\n header_value = \"{}={}; {}\".format(\n self.session_cookie,\n f\"null; path={path}; expires=Thu, 01 Jan 1970 00:00:00 GMT;\",\n self.security_flags,\n )\n headers.append(\"Set-Cookie\", header_value)\n await send(message)\n\n await self.app(scope, receive, send_wrapper)\n", "path": "starlette/middleware/sessions.py"}]}
1,096
340
gh_patches_debug_27519
rasdani/github-patches
git_diff
opsdroid__opsdroid-22
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Config locations Currently opsdroid looks for the `configuration.yaml` file in the current working directory. It should also look in `~/.opsdroid/configuration.yaml` and `/etc/opsdroid/configuration.yaml`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/loader.py` Content: ``` 1 """Class for loading in modules to OpsDroid.""" 2 3 import logging 4 import os 5 import shutil 6 import subprocess 7 import importlib 8 import yaml 9 from opsdroid.const import ( 10 DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULE_BRANCH) 11 12 13 def import_module(config): 14 """Import module namespace as variable and return it.""" 15 try: 16 module = importlib.import_module( 17 config["path"] + "." + config["name"]) 18 logging.debug("Loading " + config["type"] + ": " + config["name"]) 19 return module 20 except ImportError as error: 21 logging.error("Failed to load " + config["type"] + 22 " " + config["name"]) 23 logging.error(error) 24 return None 25 26 27 def check_cache(config): 28 """Remove module if 'no-cache' set in config.""" 29 if "no-cache" in config \ 30 and config["no-cache"] \ 31 and os.path.isdir(config["install_path"]): 32 logging.debug("'no-cache' set, removing " + config["install_path"]) 33 shutil.rmtree(config["install_path"]) 34 35 36 def build_module_path(path_type, config): 37 """Generate the module path from name and type.""" 38 if path_type == "import": 39 return MODULES_DIRECTORY + "." + config["type"] + "." + config["name"] 40 elif path_type == "install": 41 return MODULES_DIRECTORY + "/" + config["type"] + "/" + config["name"] 42 43 44 def git_clone(git_url, install_path, branch): 45 """Clone a git repo to a location and wait for finish.""" 46 process = subprocess.Popen(["git", "clone", "-b", branch, 47 git_url, install_path], shell=False, 48 stdout=subprocess.PIPE, 49 stderr=subprocess.PIPE) 50 process.wait() 51 52 53 def pip_install_deps(requirements_path): 54 """Pip install a requirements.txt file and wait for finish.""" 55 process = subprocess.Popen(["pip", "install", "-r", requirements_path], 56 shell=False, 57 stdout=subprocess.PIPE, 58 stderr=subprocess.PIPE) 59 for output in process.communicate(): 60 if output != "": 61 for line in output.splitlines(): 62 logging.debug(str(line).strip()) 63 process.wait() 64 65 66 class Loader: 67 """Class to load in config and modules.""" 68 69 def __init__(self, opsdroid): 70 """Setup object with opsdroid instance.""" 71 self.opsdroid = opsdroid 72 logging.debug("Loaded loader") 73 74 def load_config_file(self, config_path): 75 """Load a yaml config file from path.""" 76 if not os.path.isfile(config_path): 77 self.opsdroid.critical("Config file " + config_path + 78 " not found", 1) 79 80 try: 81 with open(config_path, 'r') as stream: 82 return yaml.load(stream) 83 except yaml.YAMLError as error: 84 self.opsdroid.critical(error, 1) 85 except FileNotFoundError as error: 86 self.opsdroid.critical(str(error), 1) 87 88 def load_config(self, config): 89 """Load all module types based on config.""" 90 logging.debug("Loading modules from config") 91 92 if 'databases' in config.keys(): 93 self.opsdroid.start_databases( 94 self._load_modules('database', config['databases'])) 95 else: 96 logging.warning("No databases in configuration") 97 98 if 'skills' in config.keys(): 99 self._setup_modules( 100 self._load_modules('skill', config['skills']) 101 ) 102 else: 103 self.opsdroid.critical( 104 "No skills in configuration, at least 1 required", 1) 105 106 if 'connectors' in config.keys(): 107 self.opsdroid.start_connectors( 108 self._load_modules('connector', config['connectors'])) 109 else: 110 self.opsdroid.critical( 111 "No connectors in configuration, at least 1 required", 1) 112 113 def _load_modules(self, modules_type, modules): 114 """Install and load modules.""" 115 logging.debug("Loading " + modules_type + " modules") 116 loaded_modules = [] 117 118 # Create modules directory if doesn't exist 119 if not os.path.isdir(MODULES_DIRECTORY): 120 os.makedirs(MODULES_DIRECTORY) 121 122 for module_name in modules.keys(): 123 124 # Set up module config 125 config = modules[module_name] 126 config = {} if config is None else config 127 config["name"] = module_name 128 config["type"] = modules_type 129 config["path"] = build_module_path("import", config) 130 config["install_path"] = build_module_path("install", config) 131 if "branch" not in config: 132 config["branch"] = DEFAULT_MODULE_BRANCH 133 134 # Remove module for reinstall if no-cache set 135 check_cache(config) 136 137 # Install module 138 self._install_module(config) 139 140 # Import module 141 module = import_module(config) 142 if module is not None: 143 loaded_modules.append({ 144 "module": module, 145 "config": config}) 146 147 return loaded_modules 148 149 def _setup_modules(self, modules): 150 """Call the setup function on the passed in modules.""" 151 for module in modules: 152 module["module"].setup(self.opsdroid) 153 154 def _install_module(self, config): 155 # pylint: disable=R0201 156 """Install a module.""" 157 logging.debug("Installing " + config["name"]) 158 159 if os.path.isdir(config["install_path"]): 160 # TODO Allow for updating or reinstalling of modules 161 logging.debug("Module " + config["name"] + 162 " already installed, skipping") 163 else: 164 if config is not None and "repo" in config: 165 git_url = config["repo"] 166 else: 167 git_url = DEFAULT_GIT_URL + config["type"] + \ 168 "-" + config["name"] + ".git" 169 170 if any(prefix in git_url for prefix in ["http", "https", "ssh"]): 171 # TODO Test if url or ssh path exists 172 # TODO Handle github authentication 173 git_clone(git_url, config["install_path"], config["branch"]) 174 else: 175 if os.path.isdir(git_url): 176 git_clone(git_url, config["install_path"], 177 config["branch"]) 178 else: 179 logging.debug("Could not find local git repo " + git_url) 180 181 if os.path.isdir(config["install_path"]): 182 logging.debug("Installed " + config["name"] + 183 " to " + config["install_path"]) 184 else: 185 logging.debug("Install of " + config["name"] + " failed ") 186 187 # Install module dependancies 188 if os.path.isfile(config["install_path"] + "/requirements.txt"): 189 pip_install_deps(config["install_path"] + "/requirements.txt") 190 ``` Path: `opsdroid/__main__.py` Content: ``` 1 """Starts opsdroid.""" 2 3 import logging 4 5 from opsdroid.loader import Loader 6 from opsdroid.core import OpsDroid 7 from opsdroid.helper import set_logging_level 8 from opsdroid.const import LOG_FILENAME 9 10 11 def main(): 12 """The main function.""" 13 logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO) 14 logging.info("="*40) 15 logging.info("Stated application") 16 with OpsDroid() as opsdroid: 17 loader = Loader(opsdroid) 18 opsdroid.config = loader.load_config_file("./configuration.yaml") 19 if "logging" in opsdroid.config: 20 set_logging_level(opsdroid.config['logging']) 21 loader.load_config(opsdroid.config) 22 opsdroid.exit() 23 24 if __name__ == "__main__": 25 main() 26 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py --- a/opsdroid/__main__.py +++ b/opsdroid/__main__.py @@ -15,7 +15,11 @@ logging.info("Stated application") with OpsDroid() as opsdroid: loader = Loader(opsdroid) - opsdroid.config = loader.load_config_file("./configuration.yaml") + opsdroid.config = loader.load_config_file([ + "./configuration.yaml", + "~/.opsdroid/configuration.yaml", + "/etc/opsdroid/configuration.yaml" + ]) if "logging" in opsdroid.config: set_logging_level(opsdroid.config['logging']) loader.load_config(opsdroid.config) diff --git a/opsdroid/loader.py b/opsdroid/loader.py --- a/opsdroid/loader.py +++ b/opsdroid/loader.py @@ -71,11 +71,19 @@ self.opsdroid = opsdroid logging.debug("Loaded loader") - def load_config_file(self, config_path): + def load_config_file(self, config_paths): """Load a yaml config file from path.""" - if not os.path.isfile(config_path): - self.opsdroid.critical("Config file " + config_path + - " not found", 1) + config_path = "" + for possible_path in config_paths: + if not os.path.isfile(possible_path): + logging.warning("Config file " + possible_path + + " not found", 1) + else: + config_path = possible_path + break + + if not config_path: + self.opsdroid.critical("No configuration files found", 1) try: with open(config_path, 'r') as stream:
{"golden_diff": "diff --git a/opsdroid/__main__.py b/opsdroid/__main__.py\n--- a/opsdroid/__main__.py\n+++ b/opsdroid/__main__.py\n@@ -15,7 +15,11 @@\n logging.info(\"Stated application\")\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n- opsdroid.config = loader.load_config_file(\"./configuration.yaml\")\n+ opsdroid.config = loader.load_config_file([\n+ \"./configuration.yaml\",\n+ \"~/.opsdroid/configuration.yaml\",\n+ \"/etc/opsdroid/configuration.yaml\"\n+ ])\n if \"logging\" in opsdroid.config:\n set_logging_level(opsdroid.config['logging'])\n loader.load_config(opsdroid.config)\ndiff --git a/opsdroid/loader.py b/opsdroid/loader.py\n--- a/opsdroid/loader.py\n+++ b/opsdroid/loader.py\n@@ -71,11 +71,19 @@\n self.opsdroid = opsdroid\n logging.debug(\"Loaded loader\")\n \n- def load_config_file(self, config_path):\n+ def load_config_file(self, config_paths):\n \"\"\"Load a yaml config file from path.\"\"\"\n- if not os.path.isfile(config_path):\n- self.opsdroid.critical(\"Config file \" + config_path +\n- \" not found\", 1)\n+ config_path = \"\"\n+ for possible_path in config_paths:\n+ if not os.path.isfile(possible_path):\n+ logging.warning(\"Config file \" + possible_path +\n+ \" not found\", 1)\n+ else:\n+ config_path = possible_path\n+ break\n+\n+ if not config_path:\n+ self.opsdroid.critical(\"No configuration files found\", 1)\n \n try:\n with open(config_path, 'r') as stream:\n", "issue": "Config locations\nCurrently opsdroid looks for the `configuration.yaml` file in the current working directory. It should also look in `~/.opsdroid/configuration.yaml` and `/etc/opsdroid/configuration.yaml`.\n\n", "before_files": [{"content": "\"\"\"Class for loading in modules to OpsDroid.\"\"\"\n\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport importlib\nimport yaml\nfrom opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULE_BRANCH)\n\n\ndef import_module(config):\n \"\"\"Import module namespace as variable and return it.\"\"\"\n try:\n module = importlib.import_module(\n config[\"path\"] + \".\" + config[\"name\"])\n logging.debug(\"Loading \" + config[\"type\"] + \": \" + config[\"name\"])\n return module\n except ImportError as error:\n logging.error(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"name\"])\n logging.error(error)\n return None\n\n\ndef check_cache(config):\n \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n if \"no-cache\" in config \\\n and config[\"no-cache\"] \\\n and os.path.isdir(config[\"install_path\"]):\n logging.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n shutil.rmtree(config[\"install_path\"])\n\n\ndef build_module_path(path_type, config):\n \"\"\"Generate the module path from name and type.\"\"\"\n if path_type == \"import\":\n return MODULES_DIRECTORY + \".\" + config[\"type\"] + \".\" + config[\"name\"]\n elif path_type == \"install\":\n return MODULES_DIRECTORY + \"/\" + config[\"type\"] + \"/\" + config[\"name\"]\n\n\ndef git_clone(git_url, install_path, branch):\n \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n git_url, install_path], shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n process.wait()\n\n\ndef pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n logging.debug(str(line).strip())\n process.wait()\n\n\nclass Loader:\n \"\"\"Class to load in config and modules.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Setup object with opsdroid instance.\"\"\"\n self.opsdroid = opsdroid\n logging.debug(\"Loaded loader\")\n\n def load_config_file(self, config_path):\n \"\"\"Load a yaml config file from path.\"\"\"\n if not os.path.isfile(config_path):\n self.opsdroid.critical(\"Config file \" + config_path +\n \" not found\", 1)\n\n try:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.opsdroid.critical(error, 1)\n except FileNotFoundError as error:\n self.opsdroid.critical(str(error), 1)\n\n def load_config(self, config):\n \"\"\"Load all module types based on config.\"\"\"\n logging.debug(\"Loading modules from config\")\n\n if 'databases' in config.keys():\n self.opsdroid.start_databases(\n self._load_modules('database', config['databases']))\n else:\n logging.warning(\"No databases in configuration\")\n\n if 'skills' in config.keys():\n self._setup_modules(\n self._load_modules('skill', config['skills'])\n )\n else:\n self.opsdroid.critical(\n \"No skills in configuration, at least 1 required\", 1)\n\n if 'connectors' in config.keys():\n self.opsdroid.start_connectors(\n self._load_modules('connector', config['connectors']))\n else:\n self.opsdroid.critical(\n \"No connectors in configuration, at least 1 required\", 1)\n\n def _load_modules(self, modules_type, modules):\n \"\"\"Install and load modules.\"\"\"\n logging.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n\n # Create modules directory if doesn't exist\n if not os.path.isdir(MODULES_DIRECTORY):\n os.makedirs(MODULES_DIRECTORY)\n\n for module_name in modules.keys():\n\n # Set up module config\n config = modules[module_name]\n config = {} if config is None else config\n config[\"name\"] = module_name\n config[\"type\"] = modules_type\n config[\"path\"] = build_module_path(\"import\", config)\n config[\"install_path\"] = build_module_path(\"install\", config)\n if \"branch\" not in config:\n config[\"branch\"] = DEFAULT_MODULE_BRANCH\n\n # Remove module for reinstall if no-cache set\n check_cache(config)\n\n # Install module\n self._install_module(config)\n\n # Import module\n module = import_module(config)\n if module is not None:\n loaded_modules.append({\n \"module\": module,\n \"config\": config})\n\n return loaded_modules\n\n def _setup_modules(self, modules):\n \"\"\"Call the setup function on the passed in modules.\"\"\"\n for module in modules:\n module[\"module\"].setup(self.opsdroid)\n\n def _install_module(self, config):\n # pylint: disable=R0201\n \"\"\"Install a module.\"\"\"\n logging.debug(\"Installing \" + config[\"name\"])\n\n if os.path.isdir(config[\"install_path\"]):\n # TODO Allow for updating or reinstalling of modules\n logging.debug(\"Module \" + config[\"name\"] +\n \" already installed, skipping\")\n else:\n if config is not None and \"repo\" in config:\n git_url = config[\"repo\"]\n else:\n git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n \"-\" + config[\"name\"] + \".git\"\n\n if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n # TODO Test if url or ssh path exists\n # TODO Handle github authentication\n git_clone(git_url, config[\"install_path\"], config[\"branch\"])\n else:\n if os.path.isdir(git_url):\n git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n logging.debug(\"Could not find local git repo \" + git_url)\n\n if os.path.isdir(config[\"install_path\"]):\n logging.debug(\"Installed \" + config[\"name\"] +\n \" to \" + config[\"install_path\"])\n else:\n logging.debug(\"Install of \" + config[\"name\"] + \" failed \")\n\n # Install module dependancies\n if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n pip_install_deps(config[\"install_path\"] + \"/requirements.txt\")\n", "path": "opsdroid/loader.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport logging\n\nfrom opsdroid.loader import Loader\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.helper import set_logging_level\nfrom opsdroid.const import LOG_FILENAME\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)\n logging.info(\"=\"*40)\n logging.info(\"Stated application\")\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n opsdroid.config = loader.load_config_file(\"./configuration.yaml\")\n if \"logging\" in opsdroid.config:\n set_logging_level(opsdroid.config['logging'])\n loader.load_config(opsdroid.config)\n opsdroid.exit()\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}], "after_files": [{"content": "\"\"\"Class for loading in modules to OpsDroid.\"\"\"\n\nimport logging\nimport os\nimport shutil\nimport subprocess\nimport importlib\nimport yaml\nfrom opsdroid.const import (\n DEFAULT_GIT_URL, MODULES_DIRECTORY, DEFAULT_MODULE_BRANCH)\n\n\ndef import_module(config):\n \"\"\"Import module namespace as variable and return it.\"\"\"\n try:\n module = importlib.import_module(\n config[\"path\"] + \".\" + config[\"name\"])\n logging.debug(\"Loading \" + config[\"type\"] + \": \" + config[\"name\"])\n return module\n except ImportError as error:\n logging.error(\"Failed to load \" + config[\"type\"] +\n \" \" + config[\"name\"])\n logging.error(error)\n return None\n\n\ndef check_cache(config):\n \"\"\"Remove module if 'no-cache' set in config.\"\"\"\n if \"no-cache\" in config \\\n and config[\"no-cache\"] \\\n and os.path.isdir(config[\"install_path\"]):\n logging.debug(\"'no-cache' set, removing \" + config[\"install_path\"])\n shutil.rmtree(config[\"install_path\"])\n\n\ndef build_module_path(path_type, config):\n \"\"\"Generate the module path from name and type.\"\"\"\n if path_type == \"import\":\n return MODULES_DIRECTORY + \".\" + config[\"type\"] + \".\" + config[\"name\"]\n elif path_type == \"install\":\n return MODULES_DIRECTORY + \"/\" + config[\"type\"] + \"/\" + config[\"name\"]\n\n\ndef git_clone(git_url, install_path, branch):\n \"\"\"Clone a git repo to a location and wait for finish.\"\"\"\n process = subprocess.Popen([\"git\", \"clone\", \"-b\", branch,\n git_url, install_path], shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n process.wait()\n\n\ndef pip_install_deps(requirements_path):\n \"\"\"Pip install a requirements.txt file and wait for finish.\"\"\"\n process = subprocess.Popen([\"pip\", \"install\", \"-r\", requirements_path],\n shell=False,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n for output in process.communicate():\n if output != \"\":\n for line in output.splitlines():\n logging.debug(str(line).strip())\n process.wait()\n\n\nclass Loader:\n \"\"\"Class to load in config and modules.\"\"\"\n\n def __init__(self, opsdroid):\n \"\"\"Setup object with opsdroid instance.\"\"\"\n self.opsdroid = opsdroid\n logging.debug(\"Loaded loader\")\n\n def load_config_file(self, config_paths):\n \"\"\"Load a yaml config file from path.\"\"\"\n config_path = \"\"\n for possible_path in config_paths:\n if not os.path.isfile(possible_path):\n logging.warning(\"Config file \" + possible_path +\n \" not found\", 1)\n else:\n config_path = possible_path\n break\n\n if not config_path:\n self.opsdroid.critical(\"No configuration files found\", 1)\n\n try:\n with open(config_path, 'r') as stream:\n return yaml.load(stream)\n except yaml.YAMLError as error:\n self.opsdroid.critical(error, 1)\n except FileNotFoundError as error:\n self.opsdroid.critical(str(error), 1)\n\n def load_config(self, config):\n \"\"\"Load all module types based on config.\"\"\"\n logging.debug(\"Loading modules from config\")\n\n if 'databases' in config.keys():\n self.opsdroid.start_databases(\n self._load_modules('database', config['databases']))\n else:\n logging.warning(\"No databases in configuration\")\n\n if 'skills' in config.keys():\n self._setup_modules(\n self._load_modules('skill', config['skills'])\n )\n else:\n self.opsdroid.critical(\n \"No skills in configuration, at least 1 required\", 1)\n\n if 'connectors' in config.keys():\n self.opsdroid.start_connectors(\n self._load_modules('connector', config['connectors']))\n else:\n self.opsdroid.critical(\n \"No connectors in configuration, at least 1 required\", 1)\n\n def _load_modules(self, modules_type, modules):\n \"\"\"Install and load modules.\"\"\"\n logging.debug(\"Loading \" + modules_type + \" modules\")\n loaded_modules = []\n\n # Create modules directory if doesn't exist\n if not os.path.isdir(MODULES_DIRECTORY):\n os.makedirs(MODULES_DIRECTORY)\n\n for module_name in modules.keys():\n\n # Set up module config\n config = modules[module_name]\n config = {} if config is None else config\n config[\"name\"] = module_name\n config[\"type\"] = modules_type\n config[\"path\"] = build_module_path(\"import\", config)\n config[\"install_path\"] = build_module_path(\"install\", config)\n if \"branch\" not in config:\n config[\"branch\"] = DEFAULT_MODULE_BRANCH\n\n # Remove module for reinstall if no-cache set\n check_cache(config)\n\n # Install module\n self._install_module(config)\n\n # Import module\n module = import_module(config)\n if module is not None:\n loaded_modules.append({\n \"module\": module,\n \"config\": config})\n\n return loaded_modules\n\n def _setup_modules(self, modules):\n \"\"\"Call the setup function on the passed in modules.\"\"\"\n for module in modules:\n module[\"module\"].setup(self.opsdroid)\n\n def _install_module(self, config):\n # pylint: disable=R0201\n \"\"\"Install a module.\"\"\"\n logging.debug(\"Installing \" + config[\"name\"])\n\n if os.path.isdir(config[\"install_path\"]):\n # TODO Allow for updating or reinstalling of modules\n logging.debug(\"Module \" + config[\"name\"] +\n \" already installed, skipping\")\n else:\n if config is not None and \"repo\" in config:\n git_url = config[\"repo\"]\n else:\n git_url = DEFAULT_GIT_URL + config[\"type\"] + \\\n \"-\" + config[\"name\"] + \".git\"\n\n if any(prefix in git_url for prefix in [\"http\", \"https\", \"ssh\"]):\n # TODO Test if url or ssh path exists\n # TODO Handle github authentication\n git_clone(git_url, config[\"install_path\"], config[\"branch\"])\n else:\n if os.path.isdir(git_url):\n git_clone(git_url, config[\"install_path\"],\n config[\"branch\"])\n else:\n logging.debug(\"Could not find local git repo \" + git_url)\n\n if os.path.isdir(config[\"install_path\"]):\n logging.debug(\"Installed \" + config[\"name\"] +\n \" to \" + config[\"install_path\"])\n else:\n logging.debug(\"Install of \" + config[\"name\"] + \" failed \")\n\n # Install module dependancies\n if os.path.isfile(config[\"install_path\"] + \"/requirements.txt\"):\n pip_install_deps(config[\"install_path\"] + \"/requirements.txt\")\n", "path": "opsdroid/loader.py"}, {"content": "\"\"\"Starts opsdroid.\"\"\"\n\nimport logging\n\nfrom opsdroid.loader import Loader\nfrom opsdroid.core import OpsDroid\nfrom opsdroid.helper import set_logging_level\nfrom opsdroid.const import LOG_FILENAME\n\n\ndef main():\n \"\"\"The main function.\"\"\"\n logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)\n logging.info(\"=\"*40)\n logging.info(\"Stated application\")\n with OpsDroid() as opsdroid:\n loader = Loader(opsdroid)\n opsdroid.config = loader.load_config_file([\n \"./configuration.yaml\",\n \"~/.opsdroid/configuration.yaml\",\n \"/etc/opsdroid/configuration.yaml\"\n ])\n if \"logging\" in opsdroid.config:\n set_logging_level(opsdroid.config['logging'])\n loader.load_config(opsdroid.config)\n opsdroid.exit()\n\nif __name__ == \"__main__\":\n main()\n", "path": "opsdroid/__main__.py"}]}
2,426
414
gh_patches_debug_12217
rasdani/github-patches
git_diff
pallets__click-1872
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- `HelpFormatter.write_text()` is not using all the available line width `HelpFormatter.write_text()` uses the function `wrap_text(text, width, initial_indent, ...)` internally. This function expects `width` to be the line width **including** the eventual indentation. `HelpFormatter.write_text()` gets this wrong and passes `self.width - self.current_indent` instead of just `self.width`. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/click/formatting.py` Content: ``` 1 import typing as t 2 from contextlib import contextmanager 3 from gettext import gettext as _ 4 5 from ._compat import term_len 6 from .parser import split_opt 7 8 # Can force a width. This is used by the test system 9 FORCED_WIDTH: t.Optional[int] = None 10 11 12 def measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]: 13 widths: t.Dict[int, int] = {} 14 15 for row in rows: 16 for idx, col in enumerate(row): 17 widths[idx] = max(widths.get(idx, 0), term_len(col)) 18 19 return tuple(y for x, y in sorted(widths.items())) 20 21 22 def iter_rows( 23 rows: t.Iterable[t.Tuple[str, str]], col_count: int 24 ) -> t.Iterator[t.Tuple[str, ...]]: 25 for row in rows: 26 yield row + ("",) * (col_count - len(row)) 27 28 29 def wrap_text( 30 text: str, 31 width: int = 78, 32 initial_indent: str = "", 33 subsequent_indent: str = "", 34 preserve_paragraphs: bool = False, 35 ) -> str: 36 """A helper function that intelligently wraps text. By default, it 37 assumes that it operates on a single paragraph of text but if the 38 `preserve_paragraphs` parameter is provided it will intelligently 39 handle paragraphs (defined by two empty lines). 40 41 If paragraphs are handled, a paragraph can be prefixed with an empty 42 line containing the ``\\b`` character (``\\x08``) to indicate that 43 no rewrapping should happen in that block. 44 45 :param text: the text that should be rewrapped. 46 :param width: the maximum width for the text. 47 :param initial_indent: the initial indent that should be placed on the 48 first line as a string. 49 :param subsequent_indent: the indent string that should be placed on 50 each consecutive line. 51 :param preserve_paragraphs: if this flag is set then the wrapping will 52 intelligently handle paragraphs. 53 """ 54 from ._textwrap import TextWrapper 55 56 text = text.expandtabs() 57 wrapper = TextWrapper( 58 width, 59 initial_indent=initial_indent, 60 subsequent_indent=subsequent_indent, 61 replace_whitespace=False, 62 ) 63 if not preserve_paragraphs: 64 return wrapper.fill(text) 65 66 p: t.List[t.Tuple[int, bool, str]] = [] 67 buf: t.List[str] = [] 68 indent = None 69 70 def _flush_par() -> None: 71 if not buf: 72 return 73 if buf[0].strip() == "\b": 74 p.append((indent or 0, True, "\n".join(buf[1:]))) 75 else: 76 p.append((indent or 0, False, " ".join(buf))) 77 del buf[:] 78 79 for line in text.splitlines(): 80 if not line: 81 _flush_par() 82 indent = None 83 else: 84 if indent is None: 85 orig_len = term_len(line) 86 line = line.lstrip() 87 indent = orig_len - term_len(line) 88 buf.append(line) 89 _flush_par() 90 91 rv = [] 92 for indent, raw, text in p: 93 with wrapper.extra_indent(" " * indent): 94 if raw: 95 rv.append(wrapper.indent_only(text)) 96 else: 97 rv.append(wrapper.fill(text)) 98 99 return "\n\n".join(rv) 100 101 102 class HelpFormatter: 103 """This class helps with formatting text-based help pages. It's 104 usually just needed for very special internal cases, but it's also 105 exposed so that developers can write their own fancy outputs. 106 107 At present, it always writes into memory. 108 109 :param indent_increment: the additional increment for each level. 110 :param width: the width for the text. This defaults to the terminal 111 width clamped to a maximum of 78. 112 """ 113 114 def __init__( 115 self, 116 indent_increment: int = 2, 117 width: t.Optional[int] = None, 118 max_width: t.Optional[int] = None, 119 ) -> None: 120 import shutil 121 122 self.indent_increment = indent_increment 123 if max_width is None: 124 max_width = 80 125 if width is None: 126 width = FORCED_WIDTH 127 if width is None: 128 width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50) 129 self.width = width 130 self.current_indent = 0 131 self.buffer: t.List[str] = [] 132 133 def write(self, string: str) -> None: 134 """Writes a unicode string into the internal buffer.""" 135 self.buffer.append(string) 136 137 def indent(self) -> None: 138 """Increases the indentation.""" 139 self.current_indent += self.indent_increment 140 141 def dedent(self) -> None: 142 """Decreases the indentation.""" 143 self.current_indent -= self.indent_increment 144 145 def write_usage( 146 self, prog: str, args: str = "", prefix: t.Optional[str] = None 147 ) -> None: 148 """Writes a usage line into the buffer. 149 150 :param prog: the program name. 151 :param args: whitespace separated list of arguments. 152 :param prefix: The prefix for the first line. Defaults to 153 ``"Usage: "``. 154 """ 155 if prefix is None: 156 prefix = f"{_('Usage:')} " 157 158 usage_prefix = f"{prefix:>{self.current_indent}}{prog} " 159 text_width = self.width - self.current_indent 160 161 if text_width >= (term_len(usage_prefix) + 20): 162 # The arguments will fit to the right of the prefix. 163 indent = " " * term_len(usage_prefix) 164 self.write( 165 wrap_text( 166 args, 167 text_width, 168 initial_indent=usage_prefix, 169 subsequent_indent=indent, 170 ) 171 ) 172 else: 173 # The prefix is too long, put the arguments on the next line. 174 self.write(usage_prefix) 175 self.write("\n") 176 indent = " " * (max(self.current_indent, term_len(prefix)) + 4) 177 self.write( 178 wrap_text( 179 args, text_width, initial_indent=indent, subsequent_indent=indent 180 ) 181 ) 182 183 self.write("\n") 184 185 def write_heading(self, heading: str) -> None: 186 """Writes a heading into the buffer.""" 187 self.write(f"{'':>{self.current_indent}}{heading}:\n") 188 189 def write_paragraph(self) -> None: 190 """Writes a paragraph into the buffer.""" 191 if self.buffer: 192 self.write("\n") 193 194 def write_text(self, text: str) -> None: 195 """Writes re-indented text into the buffer. This rewraps and 196 preserves paragraphs. 197 """ 198 text_width = max(self.width - self.current_indent, 11) 199 indent = " " * self.current_indent 200 self.write( 201 wrap_text( 202 text, 203 text_width, 204 initial_indent=indent, 205 subsequent_indent=indent, 206 preserve_paragraphs=True, 207 ) 208 ) 209 self.write("\n") 210 211 def write_dl( 212 self, 213 rows: t.Sequence[t.Tuple[str, str]], 214 col_max: int = 30, 215 col_spacing: int = 2, 216 ) -> None: 217 """Writes a definition list into the buffer. This is how options 218 and commands are usually formatted. 219 220 :param rows: a list of two item tuples for the terms and values. 221 :param col_max: the maximum width of the first column. 222 :param col_spacing: the number of spaces between the first and 223 second column. 224 """ 225 rows = list(rows) 226 widths = measure_table(rows) 227 if len(widths) != 2: 228 raise TypeError("Expected two columns for definition list") 229 230 first_col = min(widths[0], col_max) + col_spacing 231 232 for first, second in iter_rows(rows, len(widths)): 233 self.write(f"{'':>{self.current_indent}}{first}") 234 if not second: 235 self.write("\n") 236 continue 237 if term_len(first) <= first_col - col_spacing: 238 self.write(" " * (first_col - term_len(first))) 239 else: 240 self.write("\n") 241 self.write(" " * (first_col + self.current_indent)) 242 243 text_width = max(self.width - first_col - 2, 10) 244 wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True) 245 lines = wrapped_text.splitlines() 246 247 if lines: 248 self.write(f"{lines[0]}\n") 249 250 for line in lines[1:]: 251 self.write(f"{'':>{first_col + self.current_indent}}{line}\n") 252 else: 253 self.write("\n") 254 255 @contextmanager 256 def section(self, name: str) -> t.Iterator[None]: 257 """Helpful context manager that writes a paragraph, a heading, 258 and the indents. 259 260 :param name: the section name that is written as heading. 261 """ 262 self.write_paragraph() 263 self.write_heading(name) 264 self.indent() 265 try: 266 yield 267 finally: 268 self.dedent() 269 270 @contextmanager 271 def indentation(self) -> t.Iterator[None]: 272 """A context manager that increases the indentation.""" 273 self.indent() 274 try: 275 yield 276 finally: 277 self.dedent() 278 279 def getvalue(self) -> str: 280 """Returns the buffer contents.""" 281 return "".join(self.buffer) 282 283 284 def join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]: 285 """Given a list of option strings this joins them in the most appropriate 286 way and returns them in the form ``(formatted_string, 287 any_prefix_is_slash)`` where the second item in the tuple is a flag that 288 indicates if any of the option prefixes was a slash. 289 """ 290 rv = [] 291 any_prefix_is_slash = False 292 293 for opt in options: 294 prefix = split_opt(opt)[0] 295 296 if prefix == "/": 297 any_prefix_is_slash = True 298 299 rv.append((len(prefix), opt)) 300 301 rv.sort(key=lambda x: x[0]) 302 return ", ".join(x[1] for x in rv), any_prefix_is_slash 303 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/click/formatting.py b/src/click/formatting.py --- a/src/click/formatting.py +++ b/src/click/formatting.py @@ -195,12 +195,11 @@ """Writes re-indented text into the buffer. This rewraps and preserves paragraphs. """ - text_width = max(self.width - self.current_indent, 11) indent = " " * self.current_indent self.write( wrap_text( text, - text_width, + self.width, initial_indent=indent, subsequent_indent=indent, preserve_paragraphs=True,
{"golden_diff": "diff --git a/src/click/formatting.py b/src/click/formatting.py\n--- a/src/click/formatting.py\n+++ b/src/click/formatting.py\n@@ -195,12 +195,11 @@\n \"\"\"Writes re-indented text into the buffer. This rewraps and\n preserves paragraphs.\n \"\"\"\n- text_width = max(self.width - self.current_indent, 11)\n indent = \" \" * self.current_indent\n self.write(\n wrap_text(\n text,\n- text_width,\n+ self.width,\n initial_indent=indent,\n subsequent_indent=indent,\n preserve_paragraphs=True,\n", "issue": "`HelpFormatter.write_text()` is not using all the available line width\n`HelpFormatter.write_text()` uses the function `wrap_text(text, width, initial_indent, ...)` internally. This function expects `width` to be the line width **including** the eventual indentation. `HelpFormatter.write_text()` gets this wrong and passes `self.width - self.current_indent` instead of just `self.width`.\r\n\r\n\n", "before_files": [{"content": "import typing as t\nfrom contextlib import contextmanager\nfrom gettext import gettext as _\n\nfrom ._compat import term_len\nfrom .parser import split_opt\n\n# Can force a width. This is used by the test system\nFORCED_WIDTH: t.Optional[int] = None\n\n\ndef measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]:\n widths: t.Dict[int, int] = {}\n\n for row in rows:\n for idx, col in enumerate(row):\n widths[idx] = max(widths.get(idx, 0), term_len(col))\n\n return tuple(y for x, y in sorted(widths.items()))\n\n\ndef iter_rows(\n rows: t.Iterable[t.Tuple[str, str]], col_count: int\n) -> t.Iterator[t.Tuple[str, ...]]:\n for row in rows:\n yield row + (\"\",) * (col_count - len(row))\n\n\ndef wrap_text(\n text: str,\n width: int = 78,\n initial_indent: str = \"\",\n subsequent_indent: str = \"\",\n preserve_paragraphs: bool = False,\n) -> str:\n \"\"\"A helper function that intelligently wraps text. By default, it\n assumes that it operates on a single paragraph of text but if the\n `preserve_paragraphs` parameter is provided it will intelligently\n handle paragraphs (defined by two empty lines).\n\n If paragraphs are handled, a paragraph can be prefixed with an empty\n line containing the ``\\\\b`` character (``\\\\x08``) to indicate that\n no rewrapping should happen in that block.\n\n :param text: the text that should be rewrapped.\n :param width: the maximum width for the text.\n :param initial_indent: the initial indent that should be placed on the\n first line as a string.\n :param subsequent_indent: the indent string that should be placed on\n each consecutive line.\n :param preserve_paragraphs: if this flag is set then the wrapping will\n intelligently handle paragraphs.\n \"\"\"\n from ._textwrap import TextWrapper\n\n text = text.expandtabs()\n wrapper = TextWrapper(\n width,\n initial_indent=initial_indent,\n subsequent_indent=subsequent_indent,\n replace_whitespace=False,\n )\n if not preserve_paragraphs:\n return wrapper.fill(text)\n\n p: t.List[t.Tuple[int, bool, str]] = []\n buf: t.List[str] = []\n indent = None\n\n def _flush_par() -> None:\n if not buf:\n return\n if buf[0].strip() == \"\\b\":\n p.append((indent or 0, True, \"\\n\".join(buf[1:])))\n else:\n p.append((indent or 0, False, \" \".join(buf)))\n del buf[:]\n\n for line in text.splitlines():\n if not line:\n _flush_par()\n indent = None\n else:\n if indent is None:\n orig_len = term_len(line)\n line = line.lstrip()\n indent = orig_len - term_len(line)\n buf.append(line)\n _flush_par()\n\n rv = []\n for indent, raw, text in p:\n with wrapper.extra_indent(\" \" * indent):\n if raw:\n rv.append(wrapper.indent_only(text))\n else:\n rv.append(wrapper.fill(text))\n\n return \"\\n\\n\".join(rv)\n\n\nclass HelpFormatter:\n \"\"\"This class helps with formatting text-based help pages. It's\n usually just needed for very special internal cases, but it's also\n exposed so that developers can write their own fancy outputs.\n\n At present, it always writes into memory.\n\n :param indent_increment: the additional increment for each level.\n :param width: the width for the text. This defaults to the terminal\n width clamped to a maximum of 78.\n \"\"\"\n\n def __init__(\n self,\n indent_increment: int = 2,\n width: t.Optional[int] = None,\n max_width: t.Optional[int] = None,\n ) -> None:\n import shutil\n\n self.indent_increment = indent_increment\n if max_width is None:\n max_width = 80\n if width is None:\n width = FORCED_WIDTH\n if width is None:\n width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)\n self.width = width\n self.current_indent = 0\n self.buffer: t.List[str] = []\n\n def write(self, string: str) -> None:\n \"\"\"Writes a unicode string into the internal buffer.\"\"\"\n self.buffer.append(string)\n\n def indent(self) -> None:\n \"\"\"Increases the indentation.\"\"\"\n self.current_indent += self.indent_increment\n\n def dedent(self) -> None:\n \"\"\"Decreases the indentation.\"\"\"\n self.current_indent -= self.indent_increment\n\n def write_usage(\n self, prog: str, args: str = \"\", prefix: t.Optional[str] = None\n ) -> None:\n \"\"\"Writes a usage line into the buffer.\n\n :param prog: the program name.\n :param args: whitespace separated list of arguments.\n :param prefix: The prefix for the first line. Defaults to\n ``\"Usage: \"``.\n \"\"\"\n if prefix is None:\n prefix = f\"{_('Usage:')} \"\n\n usage_prefix = f\"{prefix:>{self.current_indent}}{prog} \"\n text_width = self.width - self.current_indent\n\n if text_width >= (term_len(usage_prefix) + 20):\n # The arguments will fit to the right of the prefix.\n indent = \" \" * term_len(usage_prefix)\n self.write(\n wrap_text(\n args,\n text_width,\n initial_indent=usage_prefix,\n subsequent_indent=indent,\n )\n )\n else:\n # The prefix is too long, put the arguments on the next line.\n self.write(usage_prefix)\n self.write(\"\\n\")\n indent = \" \" * (max(self.current_indent, term_len(prefix)) + 4)\n self.write(\n wrap_text(\n args, text_width, initial_indent=indent, subsequent_indent=indent\n )\n )\n\n self.write(\"\\n\")\n\n def write_heading(self, heading: str) -> None:\n \"\"\"Writes a heading into the buffer.\"\"\"\n self.write(f\"{'':>{self.current_indent}}{heading}:\\n\")\n\n def write_paragraph(self) -> None:\n \"\"\"Writes a paragraph into the buffer.\"\"\"\n if self.buffer:\n self.write(\"\\n\")\n\n def write_text(self, text: str) -> None:\n \"\"\"Writes re-indented text into the buffer. This rewraps and\n preserves paragraphs.\n \"\"\"\n text_width = max(self.width - self.current_indent, 11)\n indent = \" \" * self.current_indent\n self.write(\n wrap_text(\n text,\n text_width,\n initial_indent=indent,\n subsequent_indent=indent,\n preserve_paragraphs=True,\n )\n )\n self.write(\"\\n\")\n\n def write_dl(\n self,\n rows: t.Sequence[t.Tuple[str, str]],\n col_max: int = 30,\n col_spacing: int = 2,\n ) -> None:\n \"\"\"Writes a definition list into the buffer. This is how options\n and commands are usually formatted.\n\n :param rows: a list of two item tuples for the terms and values.\n :param col_max: the maximum width of the first column.\n :param col_spacing: the number of spaces between the first and\n second column.\n \"\"\"\n rows = list(rows)\n widths = measure_table(rows)\n if len(widths) != 2:\n raise TypeError(\"Expected two columns for definition list\")\n\n first_col = min(widths[0], col_max) + col_spacing\n\n for first, second in iter_rows(rows, len(widths)):\n self.write(f\"{'':>{self.current_indent}}{first}\")\n if not second:\n self.write(\"\\n\")\n continue\n if term_len(first) <= first_col - col_spacing:\n self.write(\" \" * (first_col - term_len(first)))\n else:\n self.write(\"\\n\")\n self.write(\" \" * (first_col + self.current_indent))\n\n text_width = max(self.width - first_col - 2, 10)\n wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)\n lines = wrapped_text.splitlines()\n\n if lines:\n self.write(f\"{lines[0]}\\n\")\n\n for line in lines[1:]:\n self.write(f\"{'':>{first_col + self.current_indent}}{line}\\n\")\n else:\n self.write(\"\\n\")\n\n @contextmanager\n def section(self, name: str) -> t.Iterator[None]:\n \"\"\"Helpful context manager that writes a paragraph, a heading,\n and the indents.\n\n :param name: the section name that is written as heading.\n \"\"\"\n self.write_paragraph()\n self.write_heading(name)\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n @contextmanager\n def indentation(self) -> t.Iterator[None]:\n \"\"\"A context manager that increases the indentation.\"\"\"\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n def getvalue(self) -> str:\n \"\"\"Returns the buffer contents.\"\"\"\n return \"\".join(self.buffer)\n\n\ndef join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]:\n \"\"\"Given a list of option strings this joins them in the most appropriate\n way and returns them in the form ``(formatted_string,\n any_prefix_is_slash)`` where the second item in the tuple is a flag that\n indicates if any of the option prefixes was a slash.\n \"\"\"\n rv = []\n any_prefix_is_slash = False\n\n for opt in options:\n prefix = split_opt(opt)[0]\n\n if prefix == \"/\":\n any_prefix_is_slash = True\n\n rv.append((len(prefix), opt))\n\n rv.sort(key=lambda x: x[0])\n return \", \".join(x[1] for x in rv), any_prefix_is_slash\n", "path": "src/click/formatting.py"}], "after_files": [{"content": "import typing as t\nfrom contextlib import contextmanager\nfrom gettext import gettext as _\n\nfrom ._compat import term_len\nfrom .parser import split_opt\n\n# Can force a width. This is used by the test system\nFORCED_WIDTH: t.Optional[int] = None\n\n\ndef measure_table(rows: t.Iterable[t.Tuple[str, str]]) -> t.Tuple[int, ...]:\n widths: t.Dict[int, int] = {}\n\n for row in rows:\n for idx, col in enumerate(row):\n widths[idx] = max(widths.get(idx, 0), term_len(col))\n\n return tuple(y for x, y in sorted(widths.items()))\n\n\ndef iter_rows(\n rows: t.Iterable[t.Tuple[str, str]], col_count: int\n) -> t.Iterator[t.Tuple[str, ...]]:\n for row in rows:\n yield row + (\"\",) * (col_count - len(row))\n\n\ndef wrap_text(\n text: str,\n width: int = 78,\n initial_indent: str = \"\",\n subsequent_indent: str = \"\",\n preserve_paragraphs: bool = False,\n) -> str:\n \"\"\"A helper function that intelligently wraps text. By default, it\n assumes that it operates on a single paragraph of text but if the\n `preserve_paragraphs` parameter is provided it will intelligently\n handle paragraphs (defined by two empty lines).\n\n If paragraphs are handled, a paragraph can be prefixed with an empty\n line containing the ``\\\\b`` character (``\\\\x08``) to indicate that\n no rewrapping should happen in that block.\n\n :param text: the text that should be rewrapped.\n :param width: the maximum width for the text.\n :param initial_indent: the initial indent that should be placed on the\n first line as a string.\n :param subsequent_indent: the indent string that should be placed on\n each consecutive line.\n :param preserve_paragraphs: if this flag is set then the wrapping will\n intelligently handle paragraphs.\n \"\"\"\n from ._textwrap import TextWrapper\n\n text = text.expandtabs()\n wrapper = TextWrapper(\n width,\n initial_indent=initial_indent,\n subsequent_indent=subsequent_indent,\n replace_whitespace=False,\n )\n if not preserve_paragraphs:\n return wrapper.fill(text)\n\n p: t.List[t.Tuple[int, bool, str]] = []\n buf: t.List[str] = []\n indent = None\n\n def _flush_par() -> None:\n if not buf:\n return\n if buf[0].strip() == \"\\b\":\n p.append((indent or 0, True, \"\\n\".join(buf[1:])))\n else:\n p.append((indent or 0, False, \" \".join(buf)))\n del buf[:]\n\n for line in text.splitlines():\n if not line:\n _flush_par()\n indent = None\n else:\n if indent is None:\n orig_len = term_len(line)\n line = line.lstrip()\n indent = orig_len - term_len(line)\n buf.append(line)\n _flush_par()\n\n rv = []\n for indent, raw, text in p:\n with wrapper.extra_indent(\" \" * indent):\n if raw:\n rv.append(wrapper.indent_only(text))\n else:\n rv.append(wrapper.fill(text))\n\n return \"\\n\\n\".join(rv)\n\n\nclass HelpFormatter:\n \"\"\"This class helps with formatting text-based help pages. It's\n usually just needed for very special internal cases, but it's also\n exposed so that developers can write their own fancy outputs.\n\n At present, it always writes into memory.\n\n :param indent_increment: the additional increment for each level.\n :param width: the width for the text. This defaults to the terminal\n width clamped to a maximum of 78.\n \"\"\"\n\n def __init__(\n self,\n indent_increment: int = 2,\n width: t.Optional[int] = None,\n max_width: t.Optional[int] = None,\n ) -> None:\n import shutil\n\n self.indent_increment = indent_increment\n if max_width is None:\n max_width = 80\n if width is None:\n width = FORCED_WIDTH\n if width is None:\n width = max(min(shutil.get_terminal_size().columns, max_width) - 2, 50)\n self.width = width\n self.current_indent = 0\n self.buffer: t.List[str] = []\n\n def write(self, string: str) -> None:\n \"\"\"Writes a unicode string into the internal buffer.\"\"\"\n self.buffer.append(string)\n\n def indent(self) -> None:\n \"\"\"Increases the indentation.\"\"\"\n self.current_indent += self.indent_increment\n\n def dedent(self) -> None:\n \"\"\"Decreases the indentation.\"\"\"\n self.current_indent -= self.indent_increment\n\n def write_usage(\n self, prog: str, args: str = \"\", prefix: t.Optional[str] = None\n ) -> None:\n \"\"\"Writes a usage line into the buffer.\n\n :param prog: the program name.\n :param args: whitespace separated list of arguments.\n :param prefix: The prefix for the first line. Defaults to\n ``\"Usage: \"``.\n \"\"\"\n if prefix is None:\n prefix = f\"{_('Usage:')} \"\n\n usage_prefix = f\"{prefix:>{self.current_indent}}{prog} \"\n text_width = self.width - self.current_indent\n\n if text_width >= (term_len(usage_prefix) + 20):\n # The arguments will fit to the right of the prefix.\n indent = \" \" * term_len(usage_prefix)\n self.write(\n wrap_text(\n args,\n text_width,\n initial_indent=usage_prefix,\n subsequent_indent=indent,\n )\n )\n else:\n # The prefix is too long, put the arguments on the next line.\n self.write(usage_prefix)\n self.write(\"\\n\")\n indent = \" \" * (max(self.current_indent, term_len(prefix)) + 4)\n self.write(\n wrap_text(\n args, text_width, initial_indent=indent, subsequent_indent=indent\n )\n )\n\n self.write(\"\\n\")\n\n def write_heading(self, heading: str) -> None:\n \"\"\"Writes a heading into the buffer.\"\"\"\n self.write(f\"{'':>{self.current_indent}}{heading}:\\n\")\n\n def write_paragraph(self) -> None:\n \"\"\"Writes a paragraph into the buffer.\"\"\"\n if self.buffer:\n self.write(\"\\n\")\n\n def write_text(self, text: str) -> None:\n \"\"\"Writes re-indented text into the buffer. This rewraps and\n preserves paragraphs.\n \"\"\"\n indent = \" \" * self.current_indent\n self.write(\n wrap_text(\n text,\n self.width,\n initial_indent=indent,\n subsequent_indent=indent,\n preserve_paragraphs=True,\n )\n )\n self.write(\"\\n\")\n\n def write_dl(\n self,\n rows: t.Sequence[t.Tuple[str, str]],\n col_max: int = 30,\n col_spacing: int = 2,\n ) -> None:\n \"\"\"Writes a definition list into the buffer. This is how options\n and commands are usually formatted.\n\n :param rows: a list of two item tuples for the terms and values.\n :param col_max: the maximum width of the first column.\n :param col_spacing: the number of spaces between the first and\n second column.\n \"\"\"\n rows = list(rows)\n widths = measure_table(rows)\n if len(widths) != 2:\n raise TypeError(\"Expected two columns for definition list\")\n\n first_col = min(widths[0], col_max) + col_spacing\n\n for first, second in iter_rows(rows, len(widths)):\n self.write(f\"{'':>{self.current_indent}}{first}\")\n if not second:\n self.write(\"\\n\")\n continue\n if term_len(first) <= first_col - col_spacing:\n self.write(\" \" * (first_col - term_len(first)))\n else:\n self.write(\"\\n\")\n self.write(\" \" * (first_col + self.current_indent))\n\n text_width = max(self.width - first_col - 2, 10)\n wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)\n lines = wrapped_text.splitlines()\n\n if lines:\n self.write(f\"{lines[0]}\\n\")\n\n for line in lines[1:]:\n self.write(f\"{'':>{first_col + self.current_indent}}{line}\\n\")\n else:\n self.write(\"\\n\")\n\n @contextmanager\n def section(self, name: str) -> t.Iterator[None]:\n \"\"\"Helpful context manager that writes a paragraph, a heading,\n and the indents.\n\n :param name: the section name that is written as heading.\n \"\"\"\n self.write_paragraph()\n self.write_heading(name)\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n @contextmanager\n def indentation(self) -> t.Iterator[None]:\n \"\"\"A context manager that increases the indentation.\"\"\"\n self.indent()\n try:\n yield\n finally:\n self.dedent()\n\n def getvalue(self) -> str:\n \"\"\"Returns the buffer contents.\"\"\"\n return \"\".join(self.buffer)\n\n\ndef join_options(options: t.Sequence[str]) -> t.Tuple[str, bool]:\n \"\"\"Given a list of option strings this joins them in the most appropriate\n way and returns them in the form ``(formatted_string,\n any_prefix_is_slash)`` where the second item in the tuple is a flag that\n indicates if any of the option prefixes was a slash.\n \"\"\"\n rv = []\n any_prefix_is_slash = False\n\n for opt in options:\n prefix = split_opt(opt)[0]\n\n if prefix == \"/\":\n any_prefix_is_slash = True\n\n rv.append((len(prefix), opt))\n\n rv.sort(key=lambda x: x[0])\n return \", \".join(x[1] for x in rv), any_prefix_is_slash\n", "path": "src/click/formatting.py"}]}
3,370
142
gh_patches_debug_16146
rasdani/github-patches
git_diff
Nitrate__Nitrate-166
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Installing files under /etc yields SandboxViolation in virtualenv In one of my environments installing a newer version of Nitrate yields: ``` remote: error: Setup script exited with error: SandboxViolation: open('/etc/httpd/conf.d/nitrate-httpd.conf', 'wb') {} ``` this is due to the change made in ff4ecc75 adding the `data_files` attribute to `setup.py`. I propose installing these files under /etc via the RPM packages, not via pip. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 3 import os 4 from setuptools import setup, find_packages 5 6 import tcms 7 8 9 def get_install_requires(): 10 requires = [] 11 links = [] 12 with open('requirements/base.txt', 'r') as f: 13 for line in f: 14 dep_line = line.strip() 15 parts = dep_line.split('#egg=') 16 if len(parts) == 2: 17 links.append(dep_line) 18 requires.append(parts[1]) 19 else: 20 requires.append(dep_line) 21 return requires, links 22 23 install_requires, dependency_links = get_install_requires() 24 25 26 def get_long_description(): 27 with open('README.rst', 'r') as f: 28 return f.read() 29 30 31 setup( 32 name='nitrate', 33 version=tcms.__version__, 34 description='Test Case Management System', 35 long_description=get_long_description(), 36 author='Nitrate Team', 37 maintainer='Chenxiong Qi', 38 maintainer_email='[email protected]', 39 url='https://github.com/Nitrate/Nitrate/', 40 license='GPLv2+', 41 keywords='test case', 42 43 install_requires=install_requires, 44 dependency_links=dependency_links, 45 46 packages=find_packages(), 47 include_package_data=True, 48 data_files=[ 49 ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']), 50 ('/etc/init.d', ['contrib/script/celeryd']), 51 ], 52 53 classifiers=[ 54 'Intended Audience :: Developers', 55 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)', 56 'Programming Language :: Python :: 2', 57 'Programming Language :: Python :: 2.7', 58 'Topic :: Software Development :: Quality Assurance', 59 'Topic :: Software Development :: Testing', 60 ], 61 ) 62 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,6 +1,5 @@ # -*- coding: utf-8 -*- -import os from setuptools import setup, find_packages import tcms @@ -20,6 +19,7 @@ requires.append(dep_line) return requires, links + install_requires, dependency_links = get_install_requires() @@ -45,10 +45,6 @@ packages=find_packages(), include_package_data=True, - data_files=[ - ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']), - ('/etc/init.d', ['contrib/script/celeryd']), - ], classifiers=[ 'Intended Audience :: Developers',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,5 @@\n # -*- coding: utf-8 -*-\n \n-import os\n from setuptools import setup, find_packages\n \n import tcms\n@@ -20,6 +19,7 @@\n requires.append(dep_line)\n return requires, links\n \n+\n install_requires, dependency_links = get_install_requires()\n \n \n@@ -45,10 +45,6 @@\n \n packages=find_packages(),\n include_package_data=True,\n- data_files=[\n- ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),\n- ('/etc/init.d', ['contrib/script/celeryd']),\n- ],\n \n classifiers=[\n 'Intended Audience :: Developers',\n", "issue": "Installing files under /etc yields SandboxViolation in virtualenv\nIn one of my environments installing a newer version of Nitrate yields:\r\n```\r\nremote: error: Setup script exited with error: SandboxViolation: open('/etc/httpd/conf.d/nitrate-httpd.conf', 'wb') {}\r\n```\r\n\r\nthis is due to the change made in ff4ecc75 adding the `data_files` attribute to `setup.py`. I propose installing these files under /etc via the RPM packages, not via pip. \n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\nimport os\nfrom setuptools import setup, find_packages\n\nimport tcms\n\n\ndef get_install_requires():\n requires = []\n links = []\n with open('requirements/base.txt', 'r') as f:\n for line in f:\n dep_line = line.strip()\n parts = dep_line.split('#egg=')\n if len(parts) == 2:\n links.append(dep_line)\n requires.append(parts[1])\n else:\n requires.append(dep_line)\n return requires, links\n\ninstall_requires, dependency_links = get_install_requires()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\nsetup(\n name='nitrate',\n version=tcms.__version__,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n\n install_requires=install_requires,\n dependency_links=dependency_links,\n\n packages=find_packages(),\n include_package_data=True,\n data_files=[\n ('/etc/httpd/conf.d/', ['contrib/conf/nitrate-httpd.conf']),\n ('/etc/init.d', ['contrib/script/celeryd']),\n ],\n\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\nimport tcms\n\n\ndef get_install_requires():\n requires = []\n links = []\n with open('requirements/base.txt', 'r') as f:\n for line in f:\n dep_line = line.strip()\n parts = dep_line.split('#egg=')\n if len(parts) == 2:\n links.append(dep_line)\n requires.append(parts[1])\n else:\n requires.append(dep_line)\n return requires, links\n\n\ninstall_requires, dependency_links = get_install_requires()\n\n\ndef get_long_description():\n with open('README.rst', 'r') as f:\n return f.read()\n\n\nsetup(\n name='nitrate',\n version=tcms.__version__,\n description='Test Case Management System',\n long_description=get_long_description(),\n author='Nitrate Team',\n maintainer='Chenxiong Qi',\n maintainer_email='[email protected]',\n url='https://github.com/Nitrate/Nitrate/',\n license='GPLv2+',\n keywords='test case',\n\n install_requires=install_requires,\n dependency_links=dependency_links,\n\n packages=find_packages(),\n include_package_data=True,\n\n classifiers=[\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Topic :: Software Development :: Quality Assurance',\n 'Topic :: Software Development :: Testing',\n ],\n)\n", "path": "setup.py"}]}
860
171
gh_patches_debug_5746
rasdani/github-patches
git_diff
pyqtgraph__pyqtgraph-1173
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- an issue on ViewBox.mapToView Hi, I do love pyqtgraph, it really save my life, but there is a little problem in it. At first start of pg.GraphicsWindow, it seems that ViewBox return (0, 0) size by ViewBox.rect() method, which lead to ViewBox.updateMatrix get 0 size of bound and so that functions.invertQTransform fail. one of the ways to work around it is mannually resize the Window by setFixedSize() or adjustSize(). For example, I post the cross hair example of pyqtgraph below for illustration. ### Short description <!-- This should summarize the issue. --> GraphicsWindow.addPlot doesn't initialize correct ViewBox size, which lead to exception when use ViewBox.mapToView. My tricky is just resize GraphicsWindow by adjustSize() method, and it does work. ### Code to reproduce ``` """ Demonstrates some customized mouse interaction by drawing a crosshair that follows the mouse. """ #import initExample ## Add path to library (just for examples; you do not need this) import numpy as np import pyqtgraph as pg from pyqtgraph.Qt import QtGui, QtCore from pyqtgraph.Point import Point #generate layout app = QtGui.QApplication([]) #app = QtGui.QApplication() #win = pg.GraphicsLayoutWidget(show=True) #win = pg.GraphicsLayoutWidget(show=True, parent=None) win = pg.GraphicsWindow() # Using GraphicsWindow so can I run the file on PyCharm win.setWindowTitle('pyqtgraph example: crosshair') label = pg.LabelItem(justify='right') win.addItem(label) p1 = win.addPlot(row=1, col=0) p2 = win.addPlot(row=2, col=0) region = pg.LinearRegionItem() region.setZValue(10) #Add the LinearRegionItem to the ViewBox, but tell the ViewBox to exclude this #item when doing auto-range calculations. p2.addItem(region, ignoreBounds=True) #pg.dbg() ''' Set whether automatic range uses only visible data when determining the range to show. ''' #p1.setAutoVisible(x=False, y=False) #create numpy arrays #make the numbers large to show that the xrange shows data from 10000 to all the way 0 data1 = 10000 + 15000 * pg.gaussianFilter(np.random.random(size=10000), 10) + 3000 * np.random.random(size=10000) data2 = 15000 + 15000 * pg.gaussianFilter(np.random.random(size=10000), 10) + 3000 * np.random.random(size=10000) p1.plot(data1, pen="r") p1.plot(data2, pen="g") p2.plot(data1, pen="w") '''*********The Issue*********''' #win.adjustSize() # my trick, please decomment the line and the program would work out print(p1.getViewBox().size()) # ->PyQt5.QtCore.QSizeF() print(p1.getViewBox().viewPixelSize()) # exception raised due to empty viewBox size '''*********The Issue*********''' def update(): region.setZValue(10) minX, maxX = region.getRegion() p1.setXRange(minX, maxX, padding=0) region.sigRegionChanged.connect(update) def updateRegion(window, viewRange): rgn = viewRange[0] region.setRegion(rgn) p1.sigRangeChanged.connect(updateRegion) region.setRegion([1000, 2000]) #cross hair vLine = pg.InfiniteLine(angle=90, movable=False) hLine = pg.InfiniteLine(angle=0, movable=False) p1.addItem(vLine, ignoreBounds=True) p1.addItem(hLine, ignoreBounds=True) vb = p1.vb def mouseMoved(evt): #print(evt) pos = evt[0] #using signal proxy turns original arguments into a tuple print(pos) if p1.sceneBoundingRect().contains(pos): mousePoint = vb.mapSceneToView(pos) print(mousePoint) index = int(mousePoint.x()) if index > 0 and index < len(data1): label.setText("<span style='font-size: 12pt'>x=%0.1f, <span style='color: red'>y1=%0.1f</span>, <span style='color: green'>y2=%0.1f</span>" % (mousePoint.x(), data1[index], data2[index])) vLine.setPos(mousePoint.x()) hLine.setPos(mousePoint.y()) proxy = pg.SignalProxy(p1.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved) #p1.scene().sigMouseMoved.connect(mouseMoved) ##Start Qt event loop unless running in interactive mode or using pyside. if __name__ == '__main__': import sys if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'): QtGui.QApplication.instance().exec_() ``` ### Expected behavior p1.getViewBox().viewPixelSize() should work ### Real behavior but it raises an exception ``` Traceback (most recent call last): File "C:/Users/Lee King Batchelor/Desktop/PythonExcel4Securities/pyqtgraph/CrossHair.py", line 86, in <module> print(p1.getViewBox().viewPixelSize()) # exception raised due to empty viewBox size File "C:\Anaconda3\envs\pyqtgraph\lib\site-packages\pyqtgraph\graphicsItems\ViewBox\ViewBox.py", line 1176, in viewPixelSize o = self.mapToView(Point(0,0)) File "C:\Anaconda3\envs\pyqtgraph\lib\site-packages\pyqtgraph\graphicsItems\ViewBox\ViewBox.py", line 1142, in mapToView m = fn.invertQTransform(self.childTransform()) File "C:\Anaconda3\envs\pyqtgraph\lib\site-packages\pyqtgraph\functions.py", line 2199, in invertQTransform inv = numpy.linalg.inv(arr) File "<__array_function__ internals>", line 6, in inv File "C:\Anaconda3\envs\pyqtgraph\lib\site-packages\numpy\linalg\linalg.py", line 547, in inv ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj) File "C:\Anaconda3\envs\pyqtgraph\lib\site-packages\numpy\linalg\linalg.py", line 97, in _raise_linalgerror_singular raise LinAlgError("Singular matrix") numpy.linalg.LinAlgError: Singular matrix ``` ### Tested environment(s) * PyQtGraph version: 0.1 and 0.11 dev0 * Qt Python binding:PyQt5.9.2 and PySide2 5.13.1 * Python version: 3.7 * NumPy version: 1.8.1 * Operating system: Win 10 * Installation method: pip and system package ### Additional context --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pyqtgraph/graphicsItems/GraphicsLayout.py` Content: ``` 1 from ..Qt import QtGui, QtCore 2 from .. import functions as fn 3 from .GraphicsWidget import GraphicsWidget 4 ## Must be imported at the end to avoid cyclic-dependency hell: 5 from .ViewBox import ViewBox 6 from .PlotItem import PlotItem 7 from .LabelItem import LabelItem 8 9 __all__ = ['GraphicsLayout'] 10 class GraphicsLayout(GraphicsWidget): 11 """ 12 Used for laying out GraphicsWidgets in a grid. 13 This is usually created automatically as part of a :class:`GraphicsWindow <pyqtgraph.GraphicsWindow>` or :class:`GraphicsLayoutWidget <pyqtgraph.GraphicsLayoutWidget>`. 14 """ 15 16 17 def __init__(self, parent=None, border=None): 18 GraphicsWidget.__init__(self, parent) 19 if border is True: 20 border = (100,100,100) 21 self.border = border 22 self.layout = QtGui.QGraphicsGridLayout() 23 self.setLayout(self.layout) 24 self.items = {} ## item: [(row, col), (row, col), ...] lists all cells occupied by the item 25 self.rows = {} ## row: {col1: item1, col2: item2, ...} maps cell location to item 26 self.itemBorders = {} ## {item1: QtGui.QGraphicsRectItem, ...} border rects 27 self.currentRow = 0 28 self.currentCol = 0 29 self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)) 30 31 #def resizeEvent(self, ev): 32 #ret = GraphicsWidget.resizeEvent(self, ev) 33 #print self.pos(), self.mapToDevice(self.rect().topLeft()) 34 #return ret 35 36 def setBorder(self, *args, **kwds): 37 """ 38 Set the pen used to draw border between cells. 39 40 See :func:`mkPen <pyqtgraph.mkPen>` for arguments. 41 """ 42 self.border = fn.mkPen(*args, **kwds) 43 44 for borderRect in self.itemBorders.values(): 45 borderRect.setPen(self.border) 46 47 def nextRow(self): 48 """Advance to next row for automatic item placement""" 49 self.currentRow += 1 50 self.currentCol = -1 51 self.nextColumn() 52 53 def nextColumn(self): 54 """Advance to next available column 55 (generally only for internal use--called by addItem)""" 56 self.currentCol += 1 57 while self.getItem(self.currentRow, self.currentCol) is not None: 58 self.currentCol += 1 59 60 def nextCol(self, *args, **kargs): 61 """Alias of nextColumn""" 62 return self.nextColumn(*args, **kargs) 63 64 def addPlot(self, row=None, col=None, rowspan=1, colspan=1, **kargs): 65 """ 66 Create a PlotItem and place it in the next available cell (or in the cell specified) 67 All extra keyword arguments are passed to :func:`PlotItem.__init__ <pyqtgraph.PlotItem.__init__>` 68 Returns the created item. 69 """ 70 plot = PlotItem(**kargs) 71 self.addItem(plot, row, col, rowspan, colspan) 72 return plot 73 74 def addViewBox(self, row=None, col=None, rowspan=1, colspan=1, **kargs): 75 """ 76 Create a ViewBox and place it in the next available cell (or in the cell specified) 77 All extra keyword arguments are passed to :func:`ViewBox.__init__ <pyqtgraph.ViewBox.__init__>` 78 Returns the created item. 79 """ 80 vb = ViewBox(**kargs) 81 self.addItem(vb, row, col, rowspan, colspan) 82 return vb 83 84 def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs): 85 """ 86 Create a LabelItem with *text* and place it in the next available cell (or in the cell specified) 87 All extra keyword arguments are passed to :func:`LabelItem.__init__ <pyqtgraph.LabelItem.__init__>` 88 Returns the created item. 89 90 To create a vertical label, use *angle* = -90. 91 """ 92 text = LabelItem(text, **kargs) 93 self.addItem(text, row, col, rowspan, colspan) 94 return text 95 96 def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs): 97 """ 98 Create an empty GraphicsLayout and place it in the next available cell (or in the cell specified) 99 All extra keyword arguments are passed to :func:`GraphicsLayout.__init__ <pyqtgraph.GraphicsLayout.__init__>` 100 Returns the created item. 101 """ 102 layout = GraphicsLayout(**kargs) 103 self.addItem(layout, row, col, rowspan, colspan) 104 return layout 105 106 def addItem(self, item, row=None, col=None, rowspan=1, colspan=1): 107 """ 108 Add an item to the layout and place it in the next available cell (or in the cell specified). 109 The item must be an instance of a QGraphicsWidget subclass. 110 """ 111 if row is None: 112 row = self.currentRow 113 if col is None: 114 col = self.currentCol 115 116 self.items[item] = [] 117 for i in range(rowspan): 118 for j in range(colspan): 119 row2 = row + i 120 col2 = col + j 121 if row2 not in self.rows: 122 self.rows[row2] = {} 123 self.rows[row2][col2] = item 124 self.items[item].append((row2, col2)) 125 126 borderRect = QtGui.QGraphicsRectItem() 127 128 borderRect.setParentItem(self) 129 borderRect.setZValue(1e3) 130 borderRect.setPen(fn.mkPen(self.border)) 131 132 self.itemBorders[item] = borderRect 133 134 item.geometryChanged.connect(self._updateItemBorder) 135 136 self.layout.addItem(item, row, col, rowspan, colspan) 137 self.nextColumn() 138 139 def getItem(self, row, col): 140 """Return the item in (*row*, *col*). If the cell is empty, return None.""" 141 return self.rows.get(row, {}).get(col, None) 142 143 def boundingRect(self): 144 return self.rect() 145 146 def itemIndex(self, item): 147 for i in range(self.layout.count()): 148 if self.layout.itemAt(i).graphicsItem() is item: 149 return i 150 raise Exception("Could not determine index of item " + str(item)) 151 152 def removeItem(self, item): 153 """Remove *item* from the layout.""" 154 ind = self.itemIndex(item) 155 self.layout.removeAt(ind) 156 self.scene().removeItem(item) 157 158 for r, c in self.items[item]: 159 del self.rows[r][c] 160 del self.items[item] 161 162 item.geometryChanged.disconnect(self._updateItemBorder) 163 del self.itemBorders[item] 164 165 self.update() 166 167 def clear(self): 168 for i in list(self.items.keys()): 169 self.removeItem(i) 170 self.currentRow = 0 171 self.currentCol = 0 172 173 def setContentsMargins(self, *args): 174 # Wrap calls to layout. This should happen automatically, but there 175 # seems to be a Qt bug: 176 # http://stackoverflow.com/questions/27092164/margins-in-pyqtgraphs-graphicslayout 177 self.layout.setContentsMargins(*args) 178 179 def setSpacing(self, *args): 180 self.layout.setSpacing(*args) 181 182 def _updateItemBorder(self): 183 if self.border is None: 184 return 185 186 item = self.sender() 187 if item is None: 188 return 189 190 r = item.mapRectToParent(item.boundingRect()) 191 self.itemBorders[item].setRect(r) 192 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pyqtgraph/graphicsItems/GraphicsLayout.py b/pyqtgraph/graphicsItems/GraphicsLayout.py --- a/pyqtgraph/graphicsItems/GraphicsLayout.py +++ b/pyqtgraph/graphicsItems/GraphicsLayout.py @@ -134,6 +134,9 @@ item.geometryChanged.connect(self._updateItemBorder) self.layout.addItem(item, row, col, rowspan, colspan) + self.layout.activate() # Update layout, recalculating bounds. + # Allows some PyQtGraph features to also work without Qt event loop. + self.nextColumn() def getItem(self, row, col):
{"golden_diff": "diff --git a/pyqtgraph/graphicsItems/GraphicsLayout.py b/pyqtgraph/graphicsItems/GraphicsLayout.py\n--- a/pyqtgraph/graphicsItems/GraphicsLayout.py\n+++ b/pyqtgraph/graphicsItems/GraphicsLayout.py\n@@ -134,6 +134,9 @@\n item.geometryChanged.connect(self._updateItemBorder)\n \n self.layout.addItem(item, row, col, rowspan, colspan)\n+ self.layout.activate() # Update layout, recalculating bounds.\n+ # Allows some PyQtGraph features to also work without Qt event loop.\n+ \n self.nextColumn()\n \n def getItem(self, row, col):\n", "issue": "an issue on ViewBox.mapToView\nHi, I do love pyqtgraph, it really save my life, but there is a little problem in it. \r\n\r\nAt first start of pg.GraphicsWindow, it seems that ViewBox return (0, 0) size by ViewBox.rect() method, which lead to ViewBox.updateMatrix get 0 size of bound and so that functions.invertQTransform fail.\r\n\r\none of the ways to work around it is mannually resize the Window by setFixedSize() or adjustSize().\r\n\r\nFor example, I post the cross hair example of pyqtgraph below for illustration. \r\n### Short description\r\n<!-- This should summarize the issue. -->\r\nGraphicsWindow.addPlot doesn't initialize correct ViewBox size, which lead to exception when use ViewBox.mapToView.\r\nMy tricky is just resize GraphicsWindow by adjustSize() method, and it does work.\r\n\r\n### Code to reproduce\r\n\r\n```\r\n\"\"\"\r\nDemonstrates some customized mouse interaction by drawing a crosshair that follows\r\nthe mouse.\r\n\"\"\"\r\n\r\n#import initExample ## Add path to library (just for examples; you do not need this)\r\nimport numpy as np\r\nimport pyqtgraph as pg\r\nfrom pyqtgraph.Qt import QtGui, QtCore\r\nfrom pyqtgraph.Point import Point\r\n\r\n\r\n\r\n#generate layout\r\napp = QtGui.QApplication([])\r\n#app = QtGui.QApplication()\r\n#win = pg.GraphicsLayoutWidget(show=True)\r\n#win = pg.GraphicsLayoutWidget(show=True, parent=None)\r\n\r\nwin = pg.GraphicsWindow() # Using GraphicsWindow so can I run the file on PyCharm\r\nwin.setWindowTitle('pyqtgraph example: crosshair')\r\n\r\nlabel = pg.LabelItem(justify='right')\r\nwin.addItem(label)\r\n\r\np1 = win.addPlot(row=1, col=0)\r\np2 = win.addPlot(row=2, col=0)\r\n\r\nregion = pg.LinearRegionItem()\r\nregion.setZValue(10)\r\n#Add the LinearRegionItem to the ViewBox, but tell the ViewBox to exclude this\r\n#item when doing auto-range calculations.\r\n\r\np2.addItem(region, ignoreBounds=True)\r\n\r\n#pg.dbg()\r\n'''\r\nSet whether automatic range uses only visible data when determining the range to show.\r\n'''\r\n#p1.setAutoVisible(x=False, y=False)\r\n\r\n\r\n#create numpy arrays\r\n#make the numbers large to show that the xrange shows data from 10000 to all the way 0\r\ndata1 = 10000 + 15000 * pg.gaussianFilter(np.random.random(size=10000), 10) + 3000 * np.random.random(size=10000)\r\ndata2 = 15000 + 15000 * pg.gaussianFilter(np.random.random(size=10000), 10) + 3000 * np.random.random(size=10000)\r\n\r\np1.plot(data1, pen=\"r\")\r\np1.plot(data2, pen=\"g\")\r\n\r\np2.plot(data1, pen=\"w\")\r\n\r\n'''*********The Issue*********'''\r\n#win.adjustSize() # my trick, please decomment the line and the program would work out\r\n\r\nprint(p1.getViewBox().size()) # ->PyQt5.QtCore.QSizeF()\r\nprint(p1.getViewBox().viewPixelSize()) # exception raised due to empty viewBox size\r\n'''*********The Issue*********'''\r\n\r\ndef update():\r\n region.setZValue(10)\r\n minX, maxX = region.getRegion()\r\n p1.setXRange(minX, maxX, padding=0)\r\n\r\n\r\nregion.sigRegionChanged.connect(update)\r\n\r\ndef updateRegion(window, viewRange):\r\n rgn = viewRange[0]\r\n region.setRegion(rgn)\r\n\r\n\r\np1.sigRangeChanged.connect(updateRegion)\r\n\r\n\r\n\r\nregion.setRegion([1000, 2000])\r\n\r\n#cross hair\r\nvLine = pg.InfiniteLine(angle=90, movable=False)\r\nhLine = pg.InfiniteLine(angle=0, movable=False)\r\np1.addItem(vLine, ignoreBounds=True)\r\np1.addItem(hLine, ignoreBounds=True)\r\n\r\n\r\nvb = p1.vb\r\n\r\n\r\n\r\n\r\ndef mouseMoved(evt):\r\n #print(evt)\r\n\r\n pos = evt[0] #using signal proxy turns original arguments into a tuple\r\n print(pos)\r\n\r\n if p1.sceneBoundingRect().contains(pos):\r\n mousePoint = vb.mapSceneToView(pos)\r\n print(mousePoint)\r\n index = int(mousePoint.x())\r\n if index > 0 and index < len(data1):\r\n label.setText(\"<span style='font-size: 12pt'>x=%0.1f, <span style='color: red'>y1=%0.1f</span>, <span style='color: green'>y2=%0.1f</span>\" % (mousePoint.x(), data1[index], data2[index]))\r\n vLine.setPos(mousePoint.x())\r\n hLine.setPos(mousePoint.y())\r\n\r\n\r\n\r\nproxy = pg.SignalProxy(p1.scene().sigMouseMoved, rateLimit=60, slot=mouseMoved)\r\n#p1.scene().sigMouseMoved.connect(mouseMoved)\r\n\r\n##Start Qt event loop unless running in interactive mode or using pyside.\r\nif __name__ == '__main__':\r\n import sys\r\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\r\n QtGui.QApplication.instance().exec_()\r\n```\r\n\r\n### Expected behavior\r\np1.getViewBox().viewPixelSize() should work\r\n\r\n### Real behavior\r\nbut it raises an exception\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"C:/Users/Lee King Batchelor/Desktop/PythonExcel4Securities/pyqtgraph/CrossHair.py\", line 86, in <module>\r\n print(p1.getViewBox().viewPixelSize()) # exception raised due to empty viewBox size\r\n File \"C:\\Anaconda3\\envs\\pyqtgraph\\lib\\site-packages\\pyqtgraph\\graphicsItems\\ViewBox\\ViewBox.py\", line 1176, in viewPixelSize\r\n o = self.mapToView(Point(0,0))\r\n File \"C:\\Anaconda3\\envs\\pyqtgraph\\lib\\site-packages\\pyqtgraph\\graphicsItems\\ViewBox\\ViewBox.py\", line 1142, in mapToView\r\n m = fn.invertQTransform(self.childTransform())\r\n File \"C:\\Anaconda3\\envs\\pyqtgraph\\lib\\site-packages\\pyqtgraph\\functions.py\", line 2199, in invertQTransform\r\n inv = numpy.linalg.inv(arr)\r\n File \"<__array_function__ internals>\", line 6, in inv\r\n File \"C:\\Anaconda3\\envs\\pyqtgraph\\lib\\site-packages\\numpy\\linalg\\linalg.py\", line 547, in inv\r\n ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)\r\n File \"C:\\Anaconda3\\envs\\pyqtgraph\\lib\\site-packages\\numpy\\linalg\\linalg.py\", line 97, in _raise_linalgerror_singular\r\n raise LinAlgError(\"Singular matrix\")\r\nnumpy.linalg.LinAlgError: Singular matrix\r\n```\r\n\r\n### Tested environment(s)\r\n\r\n * PyQtGraph version: 0.1 and 0.11 dev0\r\n * Qt Python binding:PyQt5.9.2 and PySide2 5.13.1\r\n * Python version: 3.7\r\n * NumPy version: 1.8.1\r\n * Operating system: Win 10\r\n * Installation method: pip and system package\r\n### Additional context\r\n\n", "before_files": [{"content": "from ..Qt import QtGui, QtCore\nfrom .. import functions as fn\nfrom .GraphicsWidget import GraphicsWidget\n## Must be imported at the end to avoid cyclic-dependency hell:\nfrom .ViewBox import ViewBox\nfrom .PlotItem import PlotItem\nfrom .LabelItem import LabelItem\n\n__all__ = ['GraphicsLayout']\nclass GraphicsLayout(GraphicsWidget):\n \"\"\"\n Used for laying out GraphicsWidgets in a grid.\n This is usually created automatically as part of a :class:`GraphicsWindow <pyqtgraph.GraphicsWindow>` or :class:`GraphicsLayoutWidget <pyqtgraph.GraphicsLayoutWidget>`.\n \"\"\"\n\n\n def __init__(self, parent=None, border=None):\n GraphicsWidget.__init__(self, parent)\n if border is True:\n border = (100,100,100)\n self.border = border\n self.layout = QtGui.QGraphicsGridLayout()\n self.setLayout(self.layout)\n self.items = {} ## item: [(row, col), (row, col), ...] lists all cells occupied by the item\n self.rows = {} ## row: {col1: item1, col2: item2, ...} maps cell location to item\n self.itemBorders = {} ## {item1: QtGui.QGraphicsRectItem, ...} border rects\n self.currentRow = 0\n self.currentCol = 0\n self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding))\n \n #def resizeEvent(self, ev):\n #ret = GraphicsWidget.resizeEvent(self, ev)\n #print self.pos(), self.mapToDevice(self.rect().topLeft())\n #return ret\n\n def setBorder(self, *args, **kwds):\n \"\"\"\n Set the pen used to draw border between cells.\n \n See :func:`mkPen <pyqtgraph.mkPen>` for arguments. \n \"\"\"\n self.border = fn.mkPen(*args, **kwds)\n\n for borderRect in self.itemBorders.values():\n borderRect.setPen(self.border)\n\n def nextRow(self):\n \"\"\"Advance to next row for automatic item placement\"\"\"\n self.currentRow += 1\n self.currentCol = -1\n self.nextColumn()\n \n def nextColumn(self):\n \"\"\"Advance to next available column\n (generally only for internal use--called by addItem)\"\"\"\n self.currentCol += 1\n while self.getItem(self.currentRow, self.currentCol) is not None:\n self.currentCol += 1\n \n def nextCol(self, *args, **kargs):\n \"\"\"Alias of nextColumn\"\"\"\n return self.nextColumn(*args, **kargs)\n \n def addPlot(self, row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create a PlotItem and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`PlotItem.__init__ <pyqtgraph.PlotItem.__init__>`\n Returns the created item.\n \"\"\"\n plot = PlotItem(**kargs)\n self.addItem(plot, row, col, rowspan, colspan)\n return plot\n \n def addViewBox(self, row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create a ViewBox and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`ViewBox.__init__ <pyqtgraph.ViewBox.__init__>`\n Returns the created item.\n \"\"\"\n vb = ViewBox(**kargs)\n self.addItem(vb, row, col, rowspan, colspan)\n return vb\n \n def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create a LabelItem with *text* and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`LabelItem.__init__ <pyqtgraph.LabelItem.__init__>`\n Returns the created item.\n \n To create a vertical label, use *angle* = -90.\n \"\"\"\n text = LabelItem(text, **kargs)\n self.addItem(text, row, col, rowspan, colspan)\n return text\n \n def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create an empty GraphicsLayout and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`GraphicsLayout.__init__ <pyqtgraph.GraphicsLayout.__init__>`\n Returns the created item.\n \"\"\"\n layout = GraphicsLayout(**kargs)\n self.addItem(layout, row, col, rowspan, colspan)\n return layout\n \n def addItem(self, item, row=None, col=None, rowspan=1, colspan=1):\n \"\"\"\n Add an item to the layout and place it in the next available cell (or in the cell specified).\n The item must be an instance of a QGraphicsWidget subclass.\n \"\"\"\n if row is None:\n row = self.currentRow\n if col is None:\n col = self.currentCol\n \n self.items[item] = []\n for i in range(rowspan):\n for j in range(colspan):\n row2 = row + i\n col2 = col + j\n if row2 not in self.rows:\n self.rows[row2] = {}\n self.rows[row2][col2] = item\n self.items[item].append((row2, col2))\n\n borderRect = QtGui.QGraphicsRectItem()\n\n borderRect.setParentItem(self)\n borderRect.setZValue(1e3)\n borderRect.setPen(fn.mkPen(self.border))\n\n self.itemBorders[item] = borderRect\n\n item.geometryChanged.connect(self._updateItemBorder)\n\n self.layout.addItem(item, row, col, rowspan, colspan)\n self.nextColumn()\n\n def getItem(self, row, col):\n \"\"\"Return the item in (*row*, *col*). If the cell is empty, return None.\"\"\"\n return self.rows.get(row, {}).get(col, None)\n\n def boundingRect(self):\n return self.rect()\n\n def itemIndex(self, item):\n for i in range(self.layout.count()):\n if self.layout.itemAt(i).graphicsItem() is item:\n return i\n raise Exception(\"Could not determine index of item \" + str(item))\n \n def removeItem(self, item):\n \"\"\"Remove *item* from the layout.\"\"\"\n ind = self.itemIndex(item)\n self.layout.removeAt(ind)\n self.scene().removeItem(item)\n \n for r, c in self.items[item]:\n del self.rows[r][c]\n del self.items[item]\n\n item.geometryChanged.disconnect(self._updateItemBorder)\n del self.itemBorders[item]\n\n self.update()\n \n def clear(self):\n for i in list(self.items.keys()):\n self.removeItem(i)\n self.currentRow = 0\n self.currentCol = 0\n\n def setContentsMargins(self, *args):\n # Wrap calls to layout. This should happen automatically, but there\n # seems to be a Qt bug:\n # http://stackoverflow.com/questions/27092164/margins-in-pyqtgraphs-graphicslayout\n self.layout.setContentsMargins(*args)\n\n def setSpacing(self, *args):\n self.layout.setSpacing(*args)\n\n def _updateItemBorder(self):\n if self.border is None:\n return\n\n item = self.sender()\n if item is None:\n return\n\n r = item.mapRectToParent(item.boundingRect())\n self.itemBorders[item].setRect(r)\n", "path": "pyqtgraph/graphicsItems/GraphicsLayout.py"}], "after_files": [{"content": "from ..Qt import QtGui, QtCore\nfrom .. import functions as fn\nfrom .GraphicsWidget import GraphicsWidget\n## Must be imported at the end to avoid cyclic-dependency hell:\nfrom .ViewBox import ViewBox\nfrom .PlotItem import PlotItem\nfrom .LabelItem import LabelItem\n\n__all__ = ['GraphicsLayout']\nclass GraphicsLayout(GraphicsWidget):\n \"\"\"\n Used for laying out GraphicsWidgets in a grid.\n This is usually created automatically as part of a :class:`GraphicsWindow <pyqtgraph.GraphicsWindow>` or :class:`GraphicsLayoutWidget <pyqtgraph.GraphicsLayoutWidget>`.\n \"\"\"\n\n\n def __init__(self, parent=None, border=None):\n GraphicsWidget.__init__(self, parent)\n if border is True:\n border = (100,100,100)\n self.border = border\n self.layout = QtGui.QGraphicsGridLayout()\n self.setLayout(self.layout)\n self.items = {} ## item: [(row, col), (row, col), ...] lists all cells occupied by the item\n self.rows = {} ## row: {col1: item1, col2: item2, ...} maps cell location to item\n self.itemBorders = {} ## {item1: QtGui.QGraphicsRectItem, ...} border rects\n self.currentRow = 0\n self.currentCol = 0\n self.setSizePolicy(QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding))\n \n #def resizeEvent(self, ev):\n #ret = GraphicsWidget.resizeEvent(self, ev)\n #print self.pos(), self.mapToDevice(self.rect().topLeft())\n #return ret\n\n def setBorder(self, *args, **kwds):\n \"\"\"\n Set the pen used to draw border between cells.\n \n See :func:`mkPen <pyqtgraph.mkPen>` for arguments. \n \"\"\"\n self.border = fn.mkPen(*args, **kwds)\n\n for borderRect in self.itemBorders.values():\n borderRect.setPen(self.border)\n\n def nextRow(self):\n \"\"\"Advance to next row for automatic item placement\"\"\"\n self.currentRow += 1\n self.currentCol = -1\n self.nextColumn()\n \n def nextColumn(self):\n \"\"\"Advance to next available column\n (generally only for internal use--called by addItem)\"\"\"\n self.currentCol += 1\n while self.getItem(self.currentRow, self.currentCol) is not None:\n self.currentCol += 1\n \n def nextCol(self, *args, **kargs):\n \"\"\"Alias of nextColumn\"\"\"\n return self.nextColumn(*args, **kargs)\n \n def addPlot(self, row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create a PlotItem and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`PlotItem.__init__ <pyqtgraph.PlotItem.__init__>`\n Returns the created item.\n \"\"\"\n plot = PlotItem(**kargs)\n self.addItem(plot, row, col, rowspan, colspan)\n return plot\n \n def addViewBox(self, row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create a ViewBox and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`ViewBox.__init__ <pyqtgraph.ViewBox.__init__>`\n Returns the created item.\n \"\"\"\n vb = ViewBox(**kargs)\n self.addItem(vb, row, col, rowspan, colspan)\n return vb\n \n def addLabel(self, text=' ', row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create a LabelItem with *text* and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`LabelItem.__init__ <pyqtgraph.LabelItem.__init__>`\n Returns the created item.\n \n To create a vertical label, use *angle* = -90.\n \"\"\"\n text = LabelItem(text, **kargs)\n self.addItem(text, row, col, rowspan, colspan)\n return text\n \n def addLayout(self, row=None, col=None, rowspan=1, colspan=1, **kargs):\n \"\"\"\n Create an empty GraphicsLayout and place it in the next available cell (or in the cell specified)\n All extra keyword arguments are passed to :func:`GraphicsLayout.__init__ <pyqtgraph.GraphicsLayout.__init__>`\n Returns the created item.\n \"\"\"\n layout = GraphicsLayout(**kargs)\n self.addItem(layout, row, col, rowspan, colspan)\n return layout\n \n def addItem(self, item, row=None, col=None, rowspan=1, colspan=1):\n \"\"\"\n Add an item to the layout and place it in the next available cell (or in the cell specified).\n The item must be an instance of a QGraphicsWidget subclass.\n \"\"\"\n if row is None:\n row = self.currentRow\n if col is None:\n col = self.currentCol\n \n self.items[item] = []\n for i in range(rowspan):\n for j in range(colspan):\n row2 = row + i\n col2 = col + j\n if row2 not in self.rows:\n self.rows[row2] = {}\n self.rows[row2][col2] = item\n self.items[item].append((row2, col2))\n\n borderRect = QtGui.QGraphicsRectItem()\n\n borderRect.setParentItem(self)\n borderRect.setZValue(1e3)\n borderRect.setPen(fn.mkPen(self.border))\n\n self.itemBorders[item] = borderRect\n\n item.geometryChanged.connect(self._updateItemBorder)\n\n self.layout.addItem(item, row, col, rowspan, colspan)\n self.layout.activate() # Update layout, recalculating bounds.\n # Allows some PyQtGraph features to also work without Qt event loop.\n \n self.nextColumn()\n\n def getItem(self, row, col):\n \"\"\"Return the item in (*row*, *col*). If the cell is empty, return None.\"\"\"\n return self.rows.get(row, {}).get(col, None)\n\n def boundingRect(self):\n return self.rect()\n\n def itemIndex(self, item):\n for i in range(self.layout.count()):\n if self.layout.itemAt(i).graphicsItem() is item:\n return i\n raise Exception(\"Could not determine index of item \" + str(item))\n \n def removeItem(self, item):\n \"\"\"Remove *item* from the layout.\"\"\"\n ind = self.itemIndex(item)\n self.layout.removeAt(ind)\n self.scene().removeItem(item)\n \n for r, c in self.items[item]:\n del self.rows[r][c]\n del self.items[item]\n\n item.geometryChanged.disconnect(self._updateItemBorder)\n del self.itemBorders[item]\n\n self.update()\n \n def clear(self):\n for i in list(self.items.keys()):\n self.removeItem(i)\n self.currentRow = 0\n self.currentCol = 0\n\n def setContentsMargins(self, *args):\n # Wrap calls to layout. This should happen automatically, but there\n # seems to be a Qt bug:\n # http://stackoverflow.com/questions/27092164/margins-in-pyqtgraphs-graphicslayout\n self.layout.setContentsMargins(*args)\n\n def setSpacing(self, *args):\n self.layout.setSpacing(*args)\n\n def _updateItemBorder(self):\n if self.border is None:\n return\n\n item = self.sender()\n if item is None:\n return\n\n r = item.mapRectToParent(item.boundingRect())\n self.itemBorders[item].setRect(r)\n", "path": "pyqtgraph/graphicsItems/GraphicsLayout.py"}]}
4,012
136
gh_patches_debug_34419
rasdani/github-patches
git_diff
intel__dffml-567
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- docs: operations: model_predict example usage We need a doctestable example for the `model_predict`/`dffml.model.predict` operation. References: https://intel.github.io/dffml/master/plugins/dffml_operation.html#dffml-model-predict --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dffml/operation/model.py` Content: ``` 1 from typing import Dict, Any 2 3 from ..record import Record 4 from ..base import config 5 from ..model import Model 6 from ..df.types import Definition 7 from ..df.base import op 8 9 10 @config 11 class ModelPredictConfig: 12 model: Model 13 14 def __post_init__(self): 15 if not isinstance(self.model, Model): 16 raise TypeError( 17 "model should be an instance of `dffml.model.model.Model`" 18 ) 19 20 21 @op( 22 name="dffml.model.predict", 23 inputs={ 24 "features": Definition( 25 name="record_features", primitive="Dict[str, Any]" 26 ) 27 }, 28 outputs={ 29 "prediction": Definition( 30 name="model_predictions", primitive="Dict[str, Any]" 31 ) 32 }, 33 config_cls=ModelPredictConfig, 34 imp_enter={"model": (lambda self: self.config.model)}, 35 ctx_enter={"mctx": (lambda self: self.parent.model())}, 36 ) 37 async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]: 38 async def records(): 39 yield Record("", data={"features": features}) 40 41 async for record in self.mctx.predict(records()): 42 return {"prediction": record.predictions()} 43 ``` Path: `docs/doctest_header.py` Content: ``` 1 # This file is used as a header in every file that is created to run each 2 # example when the doctests are run. 3 import os 4 import sys 5 import shutil 6 import atexit 7 import inspect 8 import asyncio 9 import tempfile 10 import builtins 11 import functools 12 from unittest import mock 13 14 # Create a temporary directory for test to run in 15 DOCTEST_TEMPDIR = tempfile.mkdtemp() 16 # Remove it when the test exits 17 atexit.register(functools.partial(shutil.rmtree, DOCTEST_TEMPDIR)) 18 # Change the current working directory to the temporary directory 19 os.chdir(DOCTEST_TEMPDIR) 20 21 from dffml import * 22 from dffml.base import * 23 from dffml.record import * 24 from dffml.df.base import * 25 from dffml.df.types import * 26 from dffml.util.net import * 27 from dffml.df.memory import * 28 from dffml_model_scikit import * 29 from dffml.operation.io import * 30 from dffml.source.memory import * 31 from dffml.operation.output import * 32 from dffml.operation.dataflow import * 33 from dffml.operation.preprocess import * 34 from dffml.operation.mapping import * 35 36 # Used for mocking input() for AcceptUserInput operation. 37 mock.patch("builtins.input", return_value="Data flow is awesome").start() 38 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dffml/operation/model.py b/dffml/operation/model.py --- a/dffml/operation/model.py +++ b/dffml/operation/model.py @@ -35,6 +35,62 @@ ctx_enter={"mctx": (lambda self: self.parent.model())}, ) async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]: + """ + Predict using dffml models. + + Parameters + ++++++++++ + features : dict + A dictionary contaning feature name and feature value. + + Returns + +++++++ + dict + A dictionary containing prediction. + + Examples + ++++++++ + + The following example shows how to use model_predict. + + >>> slr_model = SLRModel( + ... features=Features(DefFeature("Years", int, 1)), + ... predict=DefFeature("Salary", int, 1), + ... ) + >>> dataflow = DataFlow( + ... operations={ + ... "prediction_using_model": model_predict, + ... "get_single": GetSingle, + ... }, + ... configs={"prediction_using_model": ModelPredictConfig(model=slr_model)}, + ... ) + >>> dataflow.seed.append( + ... Input( + ... value=[model_predict.op.outputs["prediction"].name], + ... definition=GetSingle.op.inputs["spec"], + ... ) + ... ) + >>> + >>> async def main(): + ... await train( + ... slr_model, + ... {"Years": 0, "Salary": 10}, + ... {"Years": 1, "Salary": 20}, + ... {"Years": 2, "Salary": 30}, + ... {"Years": 3, "Salary": 40}, + ... ) + ... inputs = [ + ... Input( + ... value={"Years": 4}, definition=model_predict.op.inputs["features"], + ... ) + ... ] + ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs): + ... print(results) + >>> + >>> asyncio.run(main()) + {'model_predictions': {'Salary': {'confidence': 1.0, 'value': 50.0}}} + """ + async def records(): yield Record("", data={"features": features}) diff --git a/docs/doctest_header.py b/docs/doctest_header.py --- a/docs/doctest_header.py +++ b/docs/doctest_header.py @@ -25,9 +25,11 @@ from dffml.df.types import * from dffml.util.net import * from dffml.df.memory import * +from dffml.model.slr import * from dffml_model_scikit import * from dffml.operation.io import * from dffml.source.memory import * +from dffml.operation.model import * from dffml.operation.output import * from dffml.operation.dataflow import * from dffml.operation.preprocess import *
{"golden_diff": "diff --git a/dffml/operation/model.py b/dffml/operation/model.py\n--- a/dffml/operation/model.py\n+++ b/dffml/operation/model.py\n@@ -35,6 +35,62 @@\n ctx_enter={\"mctx\": (lambda self: self.parent.model())},\n )\n async def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:\n+ \"\"\"\n+ Predict using dffml models.\n+\n+ Parameters\n+ ++++++++++\n+ features : dict\n+ A dictionary contaning feature name and feature value.\n+\n+ Returns\n+ +++++++\n+ dict\n+ A dictionary containing prediction.\n+\n+ Examples\n+ ++++++++\n+\n+ The following example shows how to use model_predict.\n+\n+ >>> slr_model = SLRModel(\n+ ... features=Features(DefFeature(\"Years\", int, 1)),\n+ ... predict=DefFeature(\"Salary\", int, 1),\n+ ... )\n+ >>> dataflow = DataFlow(\n+ ... operations={\n+ ... \"prediction_using_model\": model_predict,\n+ ... \"get_single\": GetSingle,\n+ ... },\n+ ... configs={\"prediction_using_model\": ModelPredictConfig(model=slr_model)},\n+ ... )\n+ >>> dataflow.seed.append(\n+ ... Input(\n+ ... value=[model_predict.op.outputs[\"prediction\"].name],\n+ ... definition=GetSingle.op.inputs[\"spec\"],\n+ ... )\n+ ... )\n+ >>>\n+ >>> async def main():\n+ ... await train(\n+ ... slr_model,\n+ ... {\"Years\": 0, \"Salary\": 10},\n+ ... {\"Years\": 1, \"Salary\": 20},\n+ ... {\"Years\": 2, \"Salary\": 30},\n+ ... {\"Years\": 3, \"Salary\": 40},\n+ ... )\n+ ... inputs = [\n+ ... Input(\n+ ... value={\"Years\": 4}, definition=model_predict.op.inputs[\"features\"],\n+ ... )\n+ ... ]\n+ ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n+ ... print(results)\n+ >>>\n+ >>> asyncio.run(main())\n+ {'model_predictions': {'Salary': {'confidence': 1.0, 'value': 50.0}}}\n+ \"\"\"\n+\n async def records():\n yield Record(\"\", data={\"features\": features})\n \ndiff --git a/docs/doctest_header.py b/docs/doctest_header.py\n--- a/docs/doctest_header.py\n+++ b/docs/doctest_header.py\n@@ -25,9 +25,11 @@\n from dffml.df.types import *\n from dffml.util.net import *\n from dffml.df.memory import *\n+from dffml.model.slr import *\n from dffml_model_scikit import *\n from dffml.operation.io import *\n from dffml.source.memory import *\n+from dffml.operation.model import *\n from dffml.operation.output import *\n from dffml.operation.dataflow import *\n from dffml.operation.preprocess import *\n", "issue": "docs: operations: model_predict example usage\nWe need a doctestable example for the `model_predict`/`dffml.model.predict` operation.\r\n\r\nReferences: https://intel.github.io/dffml/master/plugins/dffml_operation.html#dffml-model-predict\n", "before_files": [{"content": "from typing import Dict, Any\n\nfrom ..record import Record\nfrom ..base import config\nfrom ..model import Model\nfrom ..df.types import Definition\nfrom ..df.base import op\n\n\n@config\nclass ModelPredictConfig:\n model: Model\n\n def __post_init__(self):\n if not isinstance(self.model, Model):\n raise TypeError(\n \"model should be an instance of `dffml.model.model.Model`\"\n )\n\n\n@op(\n name=\"dffml.model.predict\",\n inputs={\n \"features\": Definition(\n name=\"record_features\", primitive=\"Dict[str, Any]\"\n )\n },\n outputs={\n \"prediction\": Definition(\n name=\"model_predictions\", primitive=\"Dict[str, Any]\"\n )\n },\n config_cls=ModelPredictConfig,\n imp_enter={\"model\": (lambda self: self.config.model)},\n ctx_enter={\"mctx\": (lambda self: self.parent.model())},\n)\nasync def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:\n async def records():\n yield Record(\"\", data={\"features\": features})\n\n async for record in self.mctx.predict(records()):\n return {\"prediction\": record.predictions()}\n", "path": "dffml/operation/model.py"}, {"content": "# This file is used as a header in every file that is created to run each\n# example when the doctests are run.\nimport os\nimport sys\nimport shutil\nimport atexit\nimport inspect\nimport asyncio\nimport tempfile\nimport builtins\nimport functools\nfrom unittest import mock\n\n# Create a temporary directory for test to run in\nDOCTEST_TEMPDIR = tempfile.mkdtemp()\n# Remove it when the test exits\natexit.register(functools.partial(shutil.rmtree, DOCTEST_TEMPDIR))\n# Change the current working directory to the temporary directory\nos.chdir(DOCTEST_TEMPDIR)\n\nfrom dffml import *\nfrom dffml.base import *\nfrom dffml.record import *\nfrom dffml.df.base import *\nfrom dffml.df.types import *\nfrom dffml.util.net import *\nfrom dffml.df.memory import *\nfrom dffml_model_scikit import *\nfrom dffml.operation.io import *\nfrom dffml.source.memory import *\nfrom dffml.operation.output import *\nfrom dffml.operation.dataflow import *\nfrom dffml.operation.preprocess import *\nfrom dffml.operation.mapping import *\n\n# Used for mocking input() for AcceptUserInput operation.\nmock.patch(\"builtins.input\", return_value=\"Data flow is awesome\").start()\n", "path": "docs/doctest_header.py"}], "after_files": [{"content": "from typing import Dict, Any\n\nfrom ..record import Record\nfrom ..base import config\nfrom ..model import Model\nfrom ..df.types import Definition\nfrom ..df.base import op\n\n\n@config\nclass ModelPredictConfig:\n model: Model\n\n def __post_init__(self):\n if not isinstance(self.model, Model):\n raise TypeError(\n \"model should be an instance of `dffml.model.model.Model`\"\n )\n\n\n@op(\n name=\"dffml.model.predict\",\n inputs={\n \"features\": Definition(\n name=\"record_features\", primitive=\"Dict[str, Any]\"\n )\n },\n outputs={\n \"prediction\": Definition(\n name=\"model_predictions\", primitive=\"Dict[str, Any]\"\n )\n },\n config_cls=ModelPredictConfig,\n imp_enter={\"model\": (lambda self: self.config.model)},\n ctx_enter={\"mctx\": (lambda self: self.parent.model())},\n)\nasync def model_predict(self, features: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"\n Predict using dffml models.\n\n Parameters\n ++++++++++\n features : dict\n A dictionary contaning feature name and feature value.\n\n Returns\n +++++++\n dict\n A dictionary containing prediction.\n\n Examples\n ++++++++\n\n The following example shows how to use model_predict.\n\n >>> slr_model = SLRModel(\n ... features=Features(DefFeature(\"Years\", int, 1)),\n ... predict=DefFeature(\"Salary\", int, 1),\n ... )\n >>> dataflow = DataFlow(\n ... operations={\n ... \"prediction_using_model\": model_predict,\n ... \"get_single\": GetSingle,\n ... },\n ... configs={\"prediction_using_model\": ModelPredictConfig(model=slr_model)},\n ... )\n >>> dataflow.seed.append(\n ... Input(\n ... value=[model_predict.op.outputs[\"prediction\"].name],\n ... definition=GetSingle.op.inputs[\"spec\"],\n ... )\n ... )\n >>>\n >>> async def main():\n ... await train(\n ... slr_model,\n ... {\"Years\": 0, \"Salary\": 10},\n ... {\"Years\": 1, \"Salary\": 20},\n ... {\"Years\": 2, \"Salary\": 30},\n ... {\"Years\": 3, \"Salary\": 40},\n ... )\n ... inputs = [\n ... Input(\n ... value={\"Years\": 4}, definition=model_predict.op.inputs[\"features\"],\n ... )\n ... ]\n ... async for ctx, results in MemoryOrchestrator.run(dataflow, inputs):\n ... print(results)\n >>>\n >>> asyncio.run(main())\n {'model_predictions': {'Salary': {'confidence': 1.0, 'value': 50.0}}}\n \"\"\"\n\n async def records():\n yield Record(\"\", data={\"features\": features})\n\n async for record in self.mctx.predict(records()):\n return {\"prediction\": record.predictions()}\n", "path": "dffml/operation/model.py"}, {"content": "# This file is used as a header in every file that is created to run each\n# example when the doctests are run.\nimport os\nimport sys\nimport shutil\nimport atexit\nimport inspect\nimport asyncio\nimport tempfile\nimport builtins\nimport functools\nfrom unittest import mock\n\n# Create a temporary directory for test to run in\nDOCTEST_TEMPDIR = tempfile.mkdtemp()\n# Remove it when the test exits\natexit.register(functools.partial(shutil.rmtree, DOCTEST_TEMPDIR))\n# Change the current working directory to the temporary directory\nos.chdir(DOCTEST_TEMPDIR)\n\nfrom dffml import *\nfrom dffml.base import *\nfrom dffml.record import *\nfrom dffml.df.base import *\nfrom dffml.df.types import *\nfrom dffml.util.net import *\nfrom dffml.df.memory import *\nfrom dffml.model.slr import *\nfrom dffml_model_scikit import *\nfrom dffml.operation.io import *\nfrom dffml.source.memory import *\nfrom dffml.operation.model import *\nfrom dffml.operation.output import *\nfrom dffml.operation.dataflow import *\nfrom dffml.operation.preprocess import *\nfrom dffml.operation.mapping import *\n\n# Used for mocking input() for AcceptUserInput operation.\nmock.patch(\"builtins.input\", return_value=\"Data flow is awesome\").start()\n", "path": "docs/doctest_header.py"}]}
1,001
707
gh_patches_debug_13068
rasdani/github-patches
git_diff
internetarchive__openlibrary-6846
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add notification bubble on Main Nav for super-librarians # pending MRs <!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] --> ### Describe the problem that you'd like solved <!-- A clear and concise description of what you want to happen. --> Options are to add a bubble to the avatar and then a entry + bubble into the sub-navigation within the hamburger ![testing openlibrary org_merges_mode=closed](https://user-images.githubusercontent.com/978325/182037307-3ba26478-0b2e-4fc3-9676-a63fd4e87dba.png) Or to add a bubble directly to the black IA topbar which when clicked goes directly to /merges ![testing openlibrary org_merges_mode=closed (1)](https://user-images.githubusercontent.com/978325/182037463-db0a2ca7-45c7-44ad-9238-146a5ea37daa.png) ``` .mr-notifications { position: absolute; z-index: 4; background: #02598b; color: white; border-radius: 8px; padding: 3px 7px; font-size: 12px; margin-left: 9px; margin-top: 35px; font-weight: bold; } ``` ### Proposal & Constraints <!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? --> <!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? --> ### Additional context <!-- Add any other context or screenshots about the feature request here. --> ### Stakeholders <!-- @ tag stakeholders of this bug --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `openlibrary/core/edits.py` Content: ``` 1 import datetime 2 import json 3 from typing import Optional 4 import web 5 6 from infogami.utils.view import public 7 8 from openlibrary.i18n import gettext as _ 9 10 from . import db 11 12 13 @public 14 def get_status_for_view(status_code: int) -> str: 15 """Returns localized status string that corresponds with the given status code.""" 16 if status_code == CommunityEditsQueue.STATUS['DECLINED']: 17 return _('Declined') 18 if status_code == CommunityEditsQueue.STATUS['PENDING']: 19 return _('Pending') 20 if status_code == CommunityEditsQueue.STATUS['MERGED']: 21 return _('Merged') 22 return _('Unknown') 23 24 25 class CommunityEditsQueue: 26 27 """Schema 28 id: Primary identifier 29 submitter: username of person that made the request 30 reviewer: The username of the person who reviewed the request 31 url: URL of the merge request 32 status: Either "Pending", "Merged", or "Declined" 33 comment: Short note from reviewer (json blobs (can store timestamps, etc)) 34 created: created timestamp 35 updated: update timestamp 36 """ 37 38 STATUS = { 39 'DECLINED': 0, 40 'PENDING': 1, 41 'MERGED': 2, 42 } 43 44 MODES = { 45 'all': [STATUS['DECLINED'], STATUS['PENDING'], STATUS['MERGED']], 46 'open': [STATUS['PENDING']], 47 'closed': [STATUS['DECLINED'], STATUS['MERGED']], 48 } 49 50 @classmethod 51 def get_requests( 52 cls, 53 limit: int = 50, 54 page: int = 1, 55 mode: str = 'all', 56 order: str = None, 57 **kwargs, 58 ): 59 oldb = db.get_db() 60 61 query_kwargs = { 62 "limit": limit, 63 "offset": limit * (page - 1), 64 "vars": {**kwargs}, 65 } 66 67 query_kwargs['where'] = cls.where_clause(mode, **kwargs) 68 69 if order: 70 query_kwargs['order'] = order 71 return oldb.select("community_edits_queue", **query_kwargs) 72 73 @classmethod 74 def get_counts_by_mode(cls, mode='all', **kwargs): 75 oldb = db.get_db() 76 77 query = 'SELECT count(*) from community_edits_queue' 78 79 where_clause = cls.where_clause(mode, **kwargs) 80 if where_clause: 81 query = f'{query} WHERE {where_clause}' 82 return oldb.query(query, vars=kwargs)[0]['count'] 83 84 @classmethod 85 def where_clause(cls, mode, **kwargs): 86 wheres = [] 87 88 if kwargs.get('reviewer') is not None: 89 wheres.append( 90 # if reviewer="" then get all unassigned MRs 91 "reviewer IS NULL" 92 if not kwargs.get('reviewer') 93 else "reviewer=$reviewer" 94 ) 95 if "submitter" in kwargs: 96 wheres.append( 97 # If submitter not specified, default to any 98 "submitter IS NOT NULL" 99 if kwargs.get("submitter") is None 100 else "submitter=$submitter" 101 ) 102 if "url" in kwargs: 103 wheres.append("url=$url") 104 if "id" in kwargs: 105 wheres.append("id=$id") 106 107 status_list = ( 108 [f'status={status}' for status in cls.MODES[mode]] if mode != 'all' else [] 109 ) 110 111 where_clause = '' 112 113 if wheres: 114 where_clause = f'{" AND ".join(wheres)}' 115 if status_list: 116 status_query = f'({" OR ".join(status_list)})' 117 if where_clause: 118 where_clause = f'{where_clause} AND {status_query}' 119 else: 120 where_clause = status_query 121 122 return where_clause 123 124 @classmethod 125 def submit_work_merge_request( 126 cls, 127 work_ids: list[str], 128 submitter: str, 129 comment: str = None, 130 reviewer: str = None, 131 status: int = STATUS['PENDING'], 132 ): 133 """ 134 Creates new work merge requests with the given work olids. 135 136 Precondition: OLIDs in work_ids list must be sanitized and normalized. 137 """ 138 url = f"/works/merge?records={','.join(work_ids)}" 139 if not cls.exists(url): 140 return cls.submit_request( 141 url, 142 submitter=submitter, 143 comment=comment, 144 reviewer=reviewer, 145 status=status, 146 title=cls.get_work_merge_title(work_ids), 147 ) 148 149 @staticmethod 150 def get_work_merge_title(olids): 151 title = None 152 for olid in olids: 153 book = web.ctx.site.get(f'/works/{olid}') 154 if book and book.title: 155 title = book.title 156 break 157 return title 158 159 @classmethod 160 def submit_author_merge_request(cls, author_ids, submitter, comment=None): 161 if not comment: 162 # some default note from submitter 163 pass 164 # XXX IDs should be santiized & normalized 165 url = f"/authors/merge?key={'&key='.join(author_ids)}" 166 cls.submit_request(url, submitter=submitter, comment=comment) 167 168 @classmethod 169 def submit_delete_request(cls, olid, submitter, comment=None): 170 if not comment: 171 # some default note from submitter 172 pass 173 url = f"{olid}/-/edit?m=delete" 174 cls.submit_request(cls, url, submitter=submitter, comment=comment) 175 176 @classmethod 177 def submit_request( 178 cls, 179 url: str, 180 submitter: str, 181 reviewer: str = None, 182 status: int = STATUS['PENDING'], 183 comment: str = None, 184 title: str = None, 185 ): 186 """ 187 Inserts a new record into the table. 188 189 Preconditions: All data validations should be completed before calling this method. 190 """ 191 oldb = db.get_db() 192 193 comments = [cls.create_comment(submitter, comment)] if comment else [] 194 json_comment = json.dumps({"comments": comments}) 195 196 return oldb.insert( 197 "community_edits_queue", 198 submitter=submitter, 199 reviewer=reviewer, 200 url=url, 201 status=status, 202 comments=json_comment, 203 title=title, 204 ) 205 206 @classmethod 207 def assign_request( 208 cls, rid: int, reviewer: Optional[str] 209 ) -> dict[str, Optional[str]]: 210 """Changes assignees to the request with the given ID. 211 212 This method only modifies requests that are not closed. 213 214 If the given reviewer is the same as the request's reviewer, nothing is 215 modified 216 """ 217 request = cls.find_by_id(rid) 218 219 if request['status'] not in cls.MODES['closed']: 220 if request['reviewer'] == reviewer: 221 return { 222 'status': 'error', 223 'error': f'{reviewer} is already assigned to this request', 224 } 225 oldb = db.get_db() 226 227 oldb.update( 228 "community_edits_queue", 229 where="id=$rid", 230 reviewer=reviewer, 231 status=cls.STATUS['PENDING'], 232 updated=datetime.datetime.utcnow(), 233 vars={"rid": rid}, 234 ) 235 return { 236 'reviewer': reviewer, 237 'newStatus': get_status_for_view(cls.STATUS['PENDING']), 238 } 239 return {'status': 'error', 'error': 'This request has already been closed'} 240 241 @classmethod 242 def unassign_request(cls, rid: int): 243 """ 244 Changes status of given request to "Pending", and sets reviewer to None. 245 """ 246 oldb = db.get_db() 247 oldb.update( 248 "community_edits_queue", 249 where="id=$rid", 250 status=cls.STATUS['PENDING'], 251 reviewer=None, 252 updated=datetime.datetime.utcnow(), 253 vars={"rid": rid}, 254 ) 255 256 @classmethod 257 def update_request_status( 258 cls, rid: int, status: int, reviewer: str, comment: str = None 259 ) -> int: 260 """ 261 Changes the status of the request with the given rid. 262 263 If a comment is included, existing comments list for this request are fetched and 264 the new comment is appended. 265 """ 266 oldb = db.get_db() 267 268 update_kwargs = {} 269 270 # XXX Trim whitespace from comment first 271 if comment: 272 comments = cls.get_comments(rid) 273 comments['comments'].append(cls.create_comment(reviewer, comment)) 274 update_kwargs['comments'] = json.dumps(comments) 275 276 return oldb.update( 277 "community_edits_queue", 278 where="id=$rid", 279 status=status, 280 reviewer=reviewer, 281 updated=datetime.datetime.utcnow(), 282 vars={"rid": rid}, 283 **update_kwargs, 284 ) 285 286 @classmethod 287 def comment_request(cls, rid: int, username: str, comment: str) -> int: 288 oldb = db.get_db() 289 290 comments = cls.get_comments(rid) 291 comments['comments'].append(cls.create_comment(username, comment)) 292 293 return oldb.update( 294 "community_edits_queue", 295 where="id=$rid", 296 comments=json.dumps(comments), 297 updated=datetime.datetime.utcnow(), 298 vars={"rid": rid}, 299 ) 300 301 @classmethod 302 def find_by_id(cls, rid: int): 303 """Returns the record with the given ID.""" 304 return cls.get_requests(id=rid)[0] or None 305 306 @classmethod 307 def exists(cls, url: str) -> bool: 308 """Returns True if a request with the given URL exists in the table.""" 309 return len(cls.get_requests(limit=1, url=url)) > 0 310 311 @classmethod 312 def get_comments(cls, rid: int): 313 """Fetches the comments for the given request, or an empty comments object.""" 314 return cls.get_requests(id=rid)[0]['comments'] or {'comments': []} 315 316 @classmethod 317 def create_comment(cls, username: str, message: str) -> dict[str, str]: 318 """Creates and returns a new comment with the given name and message. 319 Timestamp set as current time. 320 """ 321 return { 322 # isoformat to avoid to-json issues 323 "timestamp": datetime.datetime.utcnow().isoformat(), 324 "username": username, 325 "message": message, 326 # XXX It may be easier to update these comments if they had IDs 327 } 328 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/openlibrary/core/edits.py b/openlibrary/core/edits.py --- a/openlibrary/core/edits.py +++ b/openlibrary/core/edits.py @@ -6,6 +6,8 @@ from infogami.utils.view import public from openlibrary.i18n import gettext as _ +from openlibrary.core import cache +from openlibrary.utils import dateutil from . import db @@ -325,3 +327,12 @@ "message": message, # XXX It may be easier to update these comments if they had IDs } + + +@public +def cached_get_counts_by_mode(mode='all', **kwargs): + return cache.memcache_memoize( + CommunityEditsQueue.get_counts_by_mode, + f"librarian_queue_counts_{mode}", + timeout=dateutil.MINUTE_SECS, + )(mode, **kwargs)
{"golden_diff": "diff --git a/openlibrary/core/edits.py b/openlibrary/core/edits.py\n--- a/openlibrary/core/edits.py\n+++ b/openlibrary/core/edits.py\n@@ -6,6 +6,8 @@\n from infogami.utils.view import public\n \n from openlibrary.i18n import gettext as _\n+from openlibrary.core import cache\n+from openlibrary.utils import dateutil\n \n from . import db\n \n@@ -325,3 +327,12 @@\n \"message\": message,\n # XXX It may be easier to update these comments if they had IDs\n }\n+\n+\n+@public\n+def cached_get_counts_by_mode(mode='all', **kwargs):\n+ return cache.memcache_memoize(\n+ CommunityEditsQueue.get_counts_by_mode,\n+ f\"librarian_queue_counts_{mode}\",\n+ timeout=dateutil.MINUTE_SECS,\n+ )(mode, **kwargs)\n", "issue": "Add notification bubble on Main Nav for super-librarians # pending MRs\n<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->\r\n\r\n### Describe the problem that you'd like solved\r\n<!-- A clear and concise description of what you want to happen. -->\r\n\r\nOptions are to add a bubble to the avatar and then a entry + bubble into the sub-navigation within the hamburger\r\n\r\n![testing openlibrary org_merges_mode=closed](https://user-images.githubusercontent.com/978325/182037307-3ba26478-0b2e-4fc3-9676-a63fd4e87dba.png)\r\n\r\nOr to add a bubble directly to the black IA topbar which when clicked goes directly to /merges\r\n\r\n![testing openlibrary org_merges_mode=closed (1)](https://user-images.githubusercontent.com/978325/182037463-db0a2ca7-45c7-44ad-9238-146a5ea37daa.png)\r\n\r\n```\r\n.mr-notifications {\r\n position: absolute;\r\n z-index: 4;\r\n background: #02598b;\r\n color: white;\r\n border-radius: 8px;\r\n padding: 3px 7px;\r\n font-size: 12px;\r\n margin-left: 9px;\r\n margin-top: 35px;\r\n font-weight: bold;\r\n }\r\n```\r\n\r\n### Proposal & Constraints\r\n<!-- What is the proposed solution / implementation? Is there a precedent of this approach succeeding elsewhere? -->\r\n\r\n<!-- Which suggestions or requirements should be considered for how feature needs to appear or be implemented? -->\r\n\r\n### Additional context\r\n<!-- Add any other context or screenshots about the feature request here. -->\r\n\r\n### Stakeholders\r\n<!-- @ tag stakeholders of this bug -->\r\n\r\n\r\n\n", "before_files": [{"content": "import datetime\nimport json\nfrom typing import Optional\nimport web\n\nfrom infogami.utils.view import public\n\nfrom openlibrary.i18n import gettext as _\n\nfrom . import db\n\n\n@public\ndef get_status_for_view(status_code: int) -> str:\n \"\"\"Returns localized status string that corresponds with the given status code.\"\"\"\n if status_code == CommunityEditsQueue.STATUS['DECLINED']:\n return _('Declined')\n if status_code == CommunityEditsQueue.STATUS['PENDING']:\n return _('Pending')\n if status_code == CommunityEditsQueue.STATUS['MERGED']:\n return _('Merged')\n return _('Unknown')\n\n\nclass CommunityEditsQueue:\n\n \"\"\"Schema\n id: Primary identifier\n submitter: username of person that made the request\n reviewer: The username of the person who reviewed the request\n url: URL of the merge request\n status: Either \"Pending\", \"Merged\", or \"Declined\"\n comment: Short note from reviewer (json blobs (can store timestamps, etc))\n created: created timestamp\n updated: update timestamp\n \"\"\"\n\n STATUS = {\n 'DECLINED': 0,\n 'PENDING': 1,\n 'MERGED': 2,\n }\n\n MODES = {\n 'all': [STATUS['DECLINED'], STATUS['PENDING'], STATUS['MERGED']],\n 'open': [STATUS['PENDING']],\n 'closed': [STATUS['DECLINED'], STATUS['MERGED']],\n }\n\n @classmethod\n def get_requests(\n cls,\n limit: int = 50,\n page: int = 1,\n mode: str = 'all',\n order: str = None,\n **kwargs,\n ):\n oldb = db.get_db()\n\n query_kwargs = {\n \"limit\": limit,\n \"offset\": limit * (page - 1),\n \"vars\": {**kwargs},\n }\n\n query_kwargs['where'] = cls.where_clause(mode, **kwargs)\n\n if order:\n query_kwargs['order'] = order\n return oldb.select(\"community_edits_queue\", **query_kwargs)\n\n @classmethod\n def get_counts_by_mode(cls, mode='all', **kwargs):\n oldb = db.get_db()\n\n query = 'SELECT count(*) from community_edits_queue'\n\n where_clause = cls.where_clause(mode, **kwargs)\n if where_clause:\n query = f'{query} WHERE {where_clause}'\n return oldb.query(query, vars=kwargs)[0]['count']\n\n @classmethod\n def where_clause(cls, mode, **kwargs):\n wheres = []\n\n if kwargs.get('reviewer') is not None:\n wheres.append(\n # if reviewer=\"\" then get all unassigned MRs\n \"reviewer IS NULL\"\n if not kwargs.get('reviewer')\n else \"reviewer=$reviewer\"\n )\n if \"submitter\" in kwargs:\n wheres.append(\n # If submitter not specified, default to any\n \"submitter IS NOT NULL\"\n if kwargs.get(\"submitter\") is None\n else \"submitter=$submitter\"\n )\n if \"url\" in kwargs:\n wheres.append(\"url=$url\")\n if \"id\" in kwargs:\n wheres.append(\"id=$id\")\n\n status_list = (\n [f'status={status}' for status in cls.MODES[mode]] if mode != 'all' else []\n )\n\n where_clause = ''\n\n if wheres:\n where_clause = f'{\" AND \".join(wheres)}'\n if status_list:\n status_query = f'({\" OR \".join(status_list)})'\n if where_clause:\n where_clause = f'{where_clause} AND {status_query}'\n else:\n where_clause = status_query\n\n return where_clause\n\n @classmethod\n def submit_work_merge_request(\n cls,\n work_ids: list[str],\n submitter: str,\n comment: str = None,\n reviewer: str = None,\n status: int = STATUS['PENDING'],\n ):\n \"\"\"\n Creates new work merge requests with the given work olids.\n\n Precondition: OLIDs in work_ids list must be sanitized and normalized.\n \"\"\"\n url = f\"/works/merge?records={','.join(work_ids)}\"\n if not cls.exists(url):\n return cls.submit_request(\n url,\n submitter=submitter,\n comment=comment,\n reviewer=reviewer,\n status=status,\n title=cls.get_work_merge_title(work_ids),\n )\n\n @staticmethod\n def get_work_merge_title(olids):\n title = None\n for olid in olids:\n book = web.ctx.site.get(f'/works/{olid}')\n if book and book.title:\n title = book.title\n break\n return title\n\n @classmethod\n def submit_author_merge_request(cls, author_ids, submitter, comment=None):\n if not comment:\n # some default note from submitter\n pass\n # XXX IDs should be santiized & normalized\n url = f\"/authors/merge?key={'&key='.join(author_ids)}\"\n cls.submit_request(url, submitter=submitter, comment=comment)\n\n @classmethod\n def submit_delete_request(cls, olid, submitter, comment=None):\n if not comment:\n # some default note from submitter\n pass\n url = f\"{olid}/-/edit?m=delete\"\n cls.submit_request(cls, url, submitter=submitter, comment=comment)\n\n @classmethod\n def submit_request(\n cls,\n url: str,\n submitter: str,\n reviewer: str = None,\n status: int = STATUS['PENDING'],\n comment: str = None,\n title: str = None,\n ):\n \"\"\"\n Inserts a new record into the table.\n\n Preconditions: All data validations should be completed before calling this method.\n \"\"\"\n oldb = db.get_db()\n\n comments = [cls.create_comment(submitter, comment)] if comment else []\n json_comment = json.dumps({\"comments\": comments})\n\n return oldb.insert(\n \"community_edits_queue\",\n submitter=submitter,\n reviewer=reviewer,\n url=url,\n status=status,\n comments=json_comment,\n title=title,\n )\n\n @classmethod\n def assign_request(\n cls, rid: int, reviewer: Optional[str]\n ) -> dict[str, Optional[str]]:\n \"\"\"Changes assignees to the request with the given ID.\n\n This method only modifies requests that are not closed.\n\n If the given reviewer is the same as the request's reviewer, nothing is\n modified\n \"\"\"\n request = cls.find_by_id(rid)\n\n if request['status'] not in cls.MODES['closed']:\n if request['reviewer'] == reviewer:\n return {\n 'status': 'error',\n 'error': f'{reviewer} is already assigned to this request',\n }\n oldb = db.get_db()\n\n oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n reviewer=reviewer,\n status=cls.STATUS['PENDING'],\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n return {\n 'reviewer': reviewer,\n 'newStatus': get_status_for_view(cls.STATUS['PENDING']),\n }\n return {'status': 'error', 'error': 'This request has already been closed'}\n\n @classmethod\n def unassign_request(cls, rid: int):\n \"\"\"\n Changes status of given request to \"Pending\", and sets reviewer to None.\n \"\"\"\n oldb = db.get_db()\n oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n status=cls.STATUS['PENDING'],\n reviewer=None,\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n\n @classmethod\n def update_request_status(\n cls, rid: int, status: int, reviewer: str, comment: str = None\n ) -> int:\n \"\"\"\n Changes the status of the request with the given rid.\n\n If a comment is included, existing comments list for this request are fetched and\n the new comment is appended.\n \"\"\"\n oldb = db.get_db()\n\n update_kwargs = {}\n\n # XXX Trim whitespace from comment first\n if comment:\n comments = cls.get_comments(rid)\n comments['comments'].append(cls.create_comment(reviewer, comment))\n update_kwargs['comments'] = json.dumps(comments)\n\n return oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n status=status,\n reviewer=reviewer,\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n **update_kwargs,\n )\n\n @classmethod\n def comment_request(cls, rid: int, username: str, comment: str) -> int:\n oldb = db.get_db()\n\n comments = cls.get_comments(rid)\n comments['comments'].append(cls.create_comment(username, comment))\n\n return oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n comments=json.dumps(comments),\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n\n @classmethod\n def find_by_id(cls, rid: int):\n \"\"\"Returns the record with the given ID.\"\"\"\n return cls.get_requests(id=rid)[0] or None\n\n @classmethod\n def exists(cls, url: str) -> bool:\n \"\"\"Returns True if a request with the given URL exists in the table.\"\"\"\n return len(cls.get_requests(limit=1, url=url)) > 0\n\n @classmethod\n def get_comments(cls, rid: int):\n \"\"\"Fetches the comments for the given request, or an empty comments object.\"\"\"\n return cls.get_requests(id=rid)[0]['comments'] or {'comments': []}\n\n @classmethod\n def create_comment(cls, username: str, message: str) -> dict[str, str]:\n \"\"\"Creates and returns a new comment with the given name and message.\n Timestamp set as current time.\n \"\"\"\n return {\n # isoformat to avoid to-json issues\n \"timestamp\": datetime.datetime.utcnow().isoformat(),\n \"username\": username,\n \"message\": message,\n # XXX It may be easier to update these comments if they had IDs\n }\n", "path": "openlibrary/core/edits.py"}], "after_files": [{"content": "import datetime\nimport json\nfrom typing import Optional\nimport web\n\nfrom infogami.utils.view import public\n\nfrom openlibrary.i18n import gettext as _\nfrom openlibrary.core import cache\nfrom openlibrary.utils import dateutil\n\nfrom . import db\n\n\n@public\ndef get_status_for_view(status_code: int) -> str:\n \"\"\"Returns localized status string that corresponds with the given status code.\"\"\"\n if status_code == CommunityEditsQueue.STATUS['DECLINED']:\n return _('Declined')\n if status_code == CommunityEditsQueue.STATUS['PENDING']:\n return _('Pending')\n if status_code == CommunityEditsQueue.STATUS['MERGED']:\n return _('Merged')\n return _('Unknown')\n\n\nclass CommunityEditsQueue:\n\n \"\"\"Schema\n id: Primary identifier\n submitter: username of person that made the request\n reviewer: The username of the person who reviewed the request\n url: URL of the merge request\n status: Either \"Pending\", \"Merged\", or \"Declined\"\n comment: Short note from reviewer (json blobs (can store timestamps, etc))\n created: created timestamp\n updated: update timestamp\n \"\"\"\n\n STATUS = {\n 'DECLINED': 0,\n 'PENDING': 1,\n 'MERGED': 2,\n }\n\n MODES = {\n 'all': [STATUS['DECLINED'], STATUS['PENDING'], STATUS['MERGED']],\n 'open': [STATUS['PENDING']],\n 'closed': [STATUS['DECLINED'], STATUS['MERGED']],\n }\n\n @classmethod\n def get_requests(\n cls,\n limit: int = 50,\n page: int = 1,\n mode: str = 'all',\n order: str = None,\n **kwargs,\n ):\n oldb = db.get_db()\n\n query_kwargs = {\n \"limit\": limit,\n \"offset\": limit * (page - 1),\n \"vars\": {**kwargs},\n }\n\n query_kwargs['where'] = cls.where_clause(mode, **kwargs)\n\n if order:\n query_kwargs['order'] = order\n return oldb.select(\"community_edits_queue\", **query_kwargs)\n\n @classmethod\n def get_counts_by_mode(cls, mode='all', **kwargs):\n oldb = db.get_db()\n\n query = 'SELECT count(*) from community_edits_queue'\n\n where_clause = cls.where_clause(mode, **kwargs)\n if where_clause:\n query = f'{query} WHERE {where_clause}'\n return oldb.query(query, vars=kwargs)[0]['count']\n\n @classmethod\n def where_clause(cls, mode, **kwargs):\n wheres = []\n\n if kwargs.get('reviewer') is not None:\n wheres.append(\n # if reviewer=\"\" then get all unassigned MRs\n \"reviewer IS NULL\"\n if not kwargs.get('reviewer')\n else \"reviewer=$reviewer\"\n )\n if \"submitter\" in kwargs:\n wheres.append(\n # If submitter not specified, default to any\n \"submitter IS NOT NULL\"\n if kwargs.get(\"submitter\") is None\n else \"submitter=$submitter\"\n )\n if \"url\" in kwargs:\n wheres.append(\"url=$url\")\n if \"id\" in kwargs:\n wheres.append(\"id=$id\")\n\n status_list = (\n [f'status={status}' for status in cls.MODES[mode]] if mode != 'all' else []\n )\n\n where_clause = ''\n\n if wheres:\n where_clause = f'{\" AND \".join(wheres)}'\n if status_list:\n status_query = f'({\" OR \".join(status_list)})'\n if where_clause:\n where_clause = f'{where_clause} AND {status_query}'\n else:\n where_clause = status_query\n\n return where_clause\n\n @classmethod\n def submit_work_merge_request(\n cls,\n work_ids: list[str],\n submitter: str,\n comment: str = None,\n reviewer: str = None,\n status: int = STATUS['PENDING'],\n ):\n \"\"\"\n Creates new work merge requests with the given work olids.\n\n Precondition: OLIDs in work_ids list must be sanitized and normalized.\n \"\"\"\n url = f\"/works/merge?records={','.join(work_ids)}\"\n if not cls.exists(url):\n return cls.submit_request(\n url,\n submitter=submitter,\n comment=comment,\n reviewer=reviewer,\n status=status,\n title=cls.get_work_merge_title(work_ids),\n )\n\n @staticmethod\n def get_work_merge_title(olids):\n title = None\n for olid in olids:\n book = web.ctx.site.get(f'/works/{olid}')\n if book and book.title:\n title = book.title\n break\n return title\n\n @classmethod\n def submit_author_merge_request(cls, author_ids, submitter, comment=None):\n if not comment:\n # some default note from submitter\n pass\n # XXX IDs should be santiized & normalized\n url = f\"/authors/merge?key={'&key='.join(author_ids)}\"\n cls.submit_request(url, submitter=submitter, comment=comment)\n\n @classmethod\n def submit_delete_request(cls, olid, submitter, comment=None):\n if not comment:\n # some default note from submitter\n pass\n url = f\"{olid}/-/edit?m=delete\"\n cls.submit_request(cls, url, submitter=submitter, comment=comment)\n\n @classmethod\n def submit_request(\n cls,\n url: str,\n submitter: str,\n reviewer: str = None,\n status: int = STATUS['PENDING'],\n comment: str = None,\n title: str = None,\n ):\n \"\"\"\n Inserts a new record into the table.\n\n Preconditions: All data validations should be completed before calling this method.\n \"\"\"\n oldb = db.get_db()\n\n comments = [cls.create_comment(submitter, comment)] if comment else []\n json_comment = json.dumps({\"comments\": comments})\n\n return oldb.insert(\n \"community_edits_queue\",\n submitter=submitter,\n reviewer=reviewer,\n url=url,\n status=status,\n comments=json_comment,\n title=title,\n )\n\n @classmethod\n def assign_request(\n cls, rid: int, reviewer: Optional[str]\n ) -> dict[str, Optional[str]]:\n \"\"\"Changes assignees to the request with the given ID.\n\n This method only modifies requests that are not closed.\n\n If the given reviewer is the same as the request's reviewer, nothing is\n modified\n \"\"\"\n request = cls.find_by_id(rid)\n\n if request['status'] not in cls.MODES['closed']:\n if request['reviewer'] == reviewer:\n return {\n 'status': 'error',\n 'error': f'{reviewer} is already assigned to this request',\n }\n oldb = db.get_db()\n\n oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n reviewer=reviewer,\n status=cls.STATUS['PENDING'],\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n return {\n 'reviewer': reviewer,\n 'newStatus': get_status_for_view(cls.STATUS['PENDING']),\n }\n return {'status': 'error', 'error': 'This request has already been closed'}\n\n @classmethod\n def unassign_request(cls, rid: int):\n \"\"\"\n Changes status of given request to \"Pending\", and sets reviewer to None.\n \"\"\"\n oldb = db.get_db()\n oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n status=cls.STATUS['PENDING'],\n reviewer=None,\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n\n @classmethod\n def update_request_status(\n cls, rid: int, status: int, reviewer: str, comment: str = None\n ) -> int:\n \"\"\"\n Changes the status of the request with the given rid.\n\n If a comment is included, existing comments list for this request are fetched and\n the new comment is appended.\n \"\"\"\n oldb = db.get_db()\n\n update_kwargs = {}\n\n # XXX Trim whitespace from comment first\n if comment:\n comments = cls.get_comments(rid)\n comments['comments'].append(cls.create_comment(reviewer, comment))\n update_kwargs['comments'] = json.dumps(comments)\n\n return oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n status=status,\n reviewer=reviewer,\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n **update_kwargs,\n )\n\n @classmethod\n def comment_request(cls, rid: int, username: str, comment: str) -> int:\n oldb = db.get_db()\n\n comments = cls.get_comments(rid)\n comments['comments'].append(cls.create_comment(username, comment))\n\n return oldb.update(\n \"community_edits_queue\",\n where=\"id=$rid\",\n comments=json.dumps(comments),\n updated=datetime.datetime.utcnow(),\n vars={\"rid\": rid},\n )\n\n @classmethod\n def find_by_id(cls, rid: int):\n \"\"\"Returns the record with the given ID.\"\"\"\n return cls.get_requests(id=rid)[0] or None\n\n @classmethod\n def exists(cls, url: str) -> bool:\n \"\"\"Returns True if a request with the given URL exists in the table.\"\"\"\n return len(cls.get_requests(limit=1, url=url)) > 0\n\n @classmethod\n def get_comments(cls, rid: int):\n \"\"\"Fetches the comments for the given request, or an empty comments object.\"\"\"\n return cls.get_requests(id=rid)[0]['comments'] or {'comments': []}\n\n @classmethod\n def create_comment(cls, username: str, message: str) -> dict[str, str]:\n \"\"\"Creates and returns a new comment with the given name and message.\n Timestamp set as current time.\n \"\"\"\n return {\n # isoformat to avoid to-json issues\n \"timestamp\": datetime.datetime.utcnow().isoformat(),\n \"username\": username,\n \"message\": message,\n # XXX It may be easier to update these comments if they had IDs\n }\n\n\n@public\ndef cached_get_counts_by_mode(mode='all', **kwargs):\n return cache.memcache_memoize(\n CommunityEditsQueue.get_counts_by_mode,\n f\"librarian_queue_counts_{mode}\",\n timeout=dateutil.MINUTE_SECS,\n )(mode, **kwargs)\n", "path": "openlibrary/core/edits.py"}]}
3,786
201
gh_patches_debug_16332
rasdani/github-patches
git_diff
microsoft__botbuilder-python-1395
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- SkillDialog doesn't call SkillConversationIdFactory.DeleteConversationReference when using ExpectReplies See [parent](https://github.com/microsoft/botframework-sdk/issues/6019) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from copy import deepcopy 5 from typing import List 6 7 from botframework.connector.token_api.models import TokenExchangeRequest 8 from botbuilder.schema import ( 9 Activity, 10 ActivityTypes, 11 ExpectedReplies, 12 DeliveryModes, 13 SignInConstants, 14 TokenExchangeInvokeRequest, 15 ) 16 from botbuilder.core import BotAdapter, TurnContext, ExtendedUserTokenProvider 17 from botbuilder.core.card_factory import ContentTypes 18 from botbuilder.core.skills import SkillConversationIdFactoryOptions 19 from botbuilder.dialogs import ( 20 Dialog, 21 DialogContext, 22 DialogEvents, 23 DialogReason, 24 DialogInstance, 25 ) 26 27 from .begin_skill_dialog_options import BeginSkillDialogOptions 28 from .skill_dialog_options import SkillDialogOptions 29 30 31 class SkillDialog(Dialog): 32 SKILLCONVERSATIONIDSTATEKEY = ( 33 "Microsoft.Bot.Builder.Dialogs.SkillDialog.SkillConversationId" 34 ) 35 36 def __init__(self, dialog_options: SkillDialogOptions, dialog_id: str): 37 super().__init__(dialog_id) 38 if not dialog_options: 39 raise TypeError("SkillDialog.__init__(): dialog_options cannot be None.") 40 41 self.dialog_options = dialog_options 42 self._deliver_mode_state_key = "deliverymode" 43 44 async def begin_dialog(self, dialog_context: DialogContext, options: object = None): 45 """ 46 Method called when a new dialog has been pushed onto the stack and is being activated. 47 :param dialog_context: The dialog context for the current turn of conversation. 48 :param options: (Optional) additional argument(s) to pass to the dialog being started. 49 """ 50 dialog_args = self._validate_begin_dialog_args(options) 51 52 await dialog_context.context.send_trace_activity( 53 f"{SkillDialog.__name__}.BeginDialogAsync()", 54 label=f"Using activity of type: {dialog_args.activity.type}", 55 ) 56 57 # Create deep clone of the original activity to avoid altering it before forwarding it. 58 skill_activity: Activity = deepcopy(dialog_args.activity) 59 60 # Apply conversation reference and common properties from incoming activity before sending. 61 TurnContext.apply_conversation_reference( 62 skill_activity, 63 TurnContext.get_conversation_reference(dialog_context.context.activity), 64 is_incoming=True, 65 ) 66 67 # Store delivery mode in dialog state for later use. 68 dialog_context.active_dialog.state[ 69 self._deliver_mode_state_key 70 ] = dialog_args.activity.delivery_mode 71 72 # Create the conversationId and store it in the dialog context state so we can use it later 73 skill_conversation_id = await self._create_skill_conversation_id( 74 dialog_context.context, dialog_context.context.activity 75 ) 76 dialog_context.active_dialog.state[ 77 SkillDialog.SKILLCONVERSATIONIDSTATEKEY 78 ] = skill_conversation_id 79 80 # Send the activity to the skill. 81 eoc_activity = await self._send_to_skill( 82 dialog_context.context, skill_activity, skill_conversation_id 83 ) 84 if eoc_activity: 85 return await dialog_context.end_dialog(eoc_activity.value) 86 87 return self.end_of_turn 88 89 async def continue_dialog(self, dialog_context: DialogContext): 90 if not self._on_validate_activity(dialog_context.context.activity): 91 return self.end_of_turn 92 93 await dialog_context.context.send_trace_activity( 94 f"{SkillDialog.__name__}.continue_dialog()", 95 label=f"ActivityType: {dialog_context.context.activity.type}", 96 ) 97 98 # Handle EndOfConversation from the skill (this will be sent to the this dialog by the SkillHandler if 99 # received from the Skill) 100 if dialog_context.context.activity.type == ActivityTypes.end_of_conversation: 101 await dialog_context.context.send_trace_activity( 102 f"{SkillDialog.__name__}.continue_dialog()", 103 label=f"Got {ActivityTypes.end_of_conversation}", 104 ) 105 106 return await dialog_context.end_dialog( 107 dialog_context.context.activity.value 108 ) 109 110 # Create deep clone of the original activity to avoid altering it before forwarding it. 111 skill_activity = deepcopy(dialog_context.context.activity) 112 113 skill_activity.delivery_mode = dialog_context.active_dialog.state[ 114 self._deliver_mode_state_key 115 ] 116 117 # Just forward to the remote skill 118 skill_conversation_id = dialog_context.active_dialog.state[ 119 SkillDialog.SKILLCONVERSATIONIDSTATEKEY 120 ] 121 eoc_activity = await self._send_to_skill( 122 dialog_context.context, skill_activity, skill_conversation_id 123 ) 124 if eoc_activity: 125 return await dialog_context.end_dialog(eoc_activity.value) 126 127 return self.end_of_turn 128 129 async def reprompt_dialog( # pylint: disable=unused-argument 130 self, context: TurnContext, instance: DialogInstance 131 ): 132 # Create and send an event to the skill so it can resume the dialog. 133 reprompt_event = Activity( 134 type=ActivityTypes.event, name=DialogEvents.reprompt_dialog 135 ) 136 137 # Apply conversation reference and common properties from incoming activity before sending. 138 TurnContext.apply_conversation_reference( 139 reprompt_event, 140 TurnContext.get_conversation_reference(context.activity), 141 is_incoming=True, 142 ) 143 144 # connection Name is not applicable for a RePrompt, as we don't expect as OAuthCard in response. 145 skill_conversation_id = instance.state[SkillDialog.SKILLCONVERSATIONIDSTATEKEY] 146 await self._send_to_skill(context, reprompt_event, skill_conversation_id) 147 148 async def resume_dialog( # pylint: disable=unused-argument 149 self, dialog_context: "DialogContext", reason: DialogReason, result: object 150 ): 151 await self.reprompt_dialog(dialog_context.context, dialog_context.active_dialog) 152 return self.end_of_turn 153 154 async def end_dialog( 155 self, context: TurnContext, instance: DialogInstance, reason: DialogReason 156 ): 157 # Send of of conversation to the skill if the dialog has been cancelled. 158 if reason in (DialogReason.CancelCalled, DialogReason.ReplaceCalled): 159 await context.send_trace_activity( 160 f"{SkillDialog.__name__}.end_dialog()", 161 label=f"ActivityType: {context.activity.type}", 162 ) 163 activity = Activity(type=ActivityTypes.end_of_conversation) 164 165 # Apply conversation reference and common properties from incoming activity before sending. 166 TurnContext.apply_conversation_reference( 167 activity, 168 TurnContext.get_conversation_reference(context.activity), 169 is_incoming=True, 170 ) 171 activity.channel_data = context.activity.channel_data 172 activity.additional_properties = context.activity.additional_properties 173 174 # connection Name is not applicable for an EndDialog, as we don't expect as OAuthCard in response. 175 skill_conversation_id = instance.state[ 176 SkillDialog.SKILLCONVERSATIONIDSTATEKEY 177 ] 178 await self._send_to_skill(context, activity, skill_conversation_id) 179 180 await super().end_dialog(context, instance, reason) 181 182 def _validate_begin_dialog_args(self, options: object) -> BeginSkillDialogOptions: 183 if not options: 184 raise TypeError("options cannot be None.") 185 186 dialog_args = BeginSkillDialogOptions.from_object(options) 187 188 if not dialog_args: 189 raise TypeError( 190 "SkillDialog: options object not valid as BeginSkillDialogOptions." 191 ) 192 193 if not dialog_args.activity: 194 raise TypeError( 195 "SkillDialog: activity object in options as BeginSkillDialogOptions cannot be None." 196 ) 197 198 return dialog_args 199 200 def _on_validate_activity( 201 self, activity: Activity # pylint: disable=unused-argument 202 ) -> bool: 203 """ 204 Validates the activity sent during continue_dialog. 205 206 Override this method to implement a custom validator for the activity being sent during continue_dialog. 207 This method can be used to ignore activities of a certain type if needed. 208 If this method returns false, the dialog will end the turn without processing the activity. 209 """ 210 return True 211 212 async def _send_to_skill( 213 self, context: TurnContext, activity: Activity, skill_conversation_id: str 214 ) -> Activity: 215 if activity.type == ActivityTypes.invoke: 216 # Force ExpectReplies for invoke activities so we can get the replies right away and send 217 # them back to the channel if needed. This makes sure that the dialog will receive the Invoke 218 # response from the skill and any other activities sent, including EoC. 219 activity.delivery_mode = DeliveryModes.expect_replies 220 221 # Always save state before forwarding 222 # (the dialog stack won't get updated with the skillDialog and things won't work if you don't) 223 await self.dialog_options.conversation_state.save_changes(context, True) 224 225 skill_info = self.dialog_options.skill 226 response = await self.dialog_options.skill_client.post_activity( 227 self.dialog_options.bot_id, 228 skill_info.app_id, 229 skill_info.skill_endpoint, 230 self.dialog_options.skill_host_endpoint, 231 skill_conversation_id, 232 activity, 233 ) 234 235 # Inspect the skill response status 236 if not 200 <= response.status <= 299: 237 raise Exception( 238 f'Error invoking the skill id: "{skill_info.id}" at "{skill_info.skill_endpoint}"' 239 f" (status is {response.status}). \r\n {response.body}" 240 ) 241 242 eoc_activity: Activity = None 243 if activity.delivery_mode == DeliveryModes.expect_replies and response.body: 244 # Process replies in the response.Body. 245 response.body: List[Activity] 246 response.body = ExpectedReplies().deserialize(response.body).activities 247 248 for from_skill_activity in response.body: 249 if from_skill_activity.type == ActivityTypes.end_of_conversation: 250 # Capture the EndOfConversation activity if it was sent from skill 251 eoc_activity = from_skill_activity 252 elif await self._intercept_oauth_cards( 253 context, from_skill_activity, self.dialog_options.connection_name 254 ): 255 # do nothing. Token exchange succeeded, so no oauthcard needs to be shown to the user 256 pass 257 else: 258 # Send the response back to the channel. 259 await context.send_activity(from_skill_activity) 260 261 return eoc_activity 262 263 async def _create_skill_conversation_id( 264 self, context: TurnContext, activity: Activity 265 ) -> str: 266 # Create a conversationId to interact with the skill and send the activity 267 conversation_id_factory_options = SkillConversationIdFactoryOptions( 268 from_bot_oauth_scope=context.turn_state.get(BotAdapter.BOT_OAUTH_SCOPE_KEY), 269 from_bot_id=self.dialog_options.bot_id, 270 activity=activity, 271 bot_framework_skill=self.dialog_options.skill, 272 ) 273 skill_conversation_id = await self.dialog_options.conversation_id_factory.create_skill_conversation_id( 274 conversation_id_factory_options 275 ) 276 return skill_conversation_id 277 278 async def _intercept_oauth_cards( 279 self, context: TurnContext, activity: Activity, connection_name: str 280 ): 281 """ 282 Tells is if we should intercept the OAuthCard message. 283 """ 284 if not connection_name or not isinstance( 285 context.adapter, ExtendedUserTokenProvider 286 ): 287 # The adapter may choose not to support token exchange, in which case we fallback to 288 # showing an oauth card to the user. 289 return False 290 291 oauth_card_attachment = next( 292 attachment 293 for attachment in activity.attachments 294 if attachment.content_type == ContentTypes.oauth_card 295 ) 296 if oauth_card_attachment: 297 oauth_card = oauth_card_attachment.content 298 if ( 299 oauth_card 300 and oauth_card.token_exchange_resource 301 and oauth_card.token_exchange_resource.uri 302 ): 303 try: 304 result = await context.adapter.exchange_token( 305 turn_context=context, 306 connection_name=connection_name, 307 user_id=context.activity.from_property.id, 308 exchange_request=TokenExchangeRequest( 309 uri=oauth_card.token_exchange_resource.uri 310 ), 311 ) 312 313 if result and result.token: 314 # If token above is null, then SSO has failed and hence we return false. 315 # If not, send an invoke to the skill with the token. 316 return await self._send_token_exchange_invoke_to_skill( 317 activity, 318 oauth_card.token_exchange_resource.id, 319 oauth_card.connection_name, 320 result.token, 321 ) 322 except: 323 # Failures in token exchange are not fatal. They simply mean that the user needs 324 # to be shown the OAuth card. 325 return False 326 327 return False 328 329 async def _send_token_exchange_invoke_to_skill( 330 self, 331 incoming_activity: Activity, 332 request_id: str, 333 connection_name: str, 334 token: str, 335 ): 336 activity = incoming_activity.create_reply() 337 activity.type = ActivityTypes.invoke 338 activity.name = SignInConstants.token_exchange_operation_name 339 activity.value = TokenExchangeInvokeRequest( 340 id=request_id, token=token, connection_name=connection_name, 341 ) 342 343 # route the activity to the skill 344 skill_info = self.dialog_options.skill 345 response = await self.dialog_options.skill_client.post_activity( 346 self.dialog_options.bot_id, 347 skill_info.app_id, 348 skill_info.skill_endpoint, 349 self.dialog_options.skill_host_endpoint, 350 incoming_activity.conversation.id, 351 activity, 352 ) 353 354 # Check response status: true if success, false if failure 355 return response.is_successful_status_code() 356 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py --- a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py +++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py @@ -249,6 +249,11 @@ if from_skill_activity.type == ActivityTypes.end_of_conversation: # Capture the EndOfConversation activity if it was sent from skill eoc_activity = from_skill_activity + + # The conversation has ended, so cleanup the conversation id + await self.dialog_options.conversation_id_factory.delete_conversation_reference( + skill_conversation_id + ) elif await self._intercept_oauth_cards( context, from_skill_activity, self.dialog_options.connection_name ):
{"golden_diff": "diff --git a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py\n--- a/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py\n+++ b/libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py\n@@ -249,6 +249,11 @@\n if from_skill_activity.type == ActivityTypes.end_of_conversation:\n # Capture the EndOfConversation activity if it was sent from skill\n eoc_activity = from_skill_activity\n+\n+ # The conversation has ended, so cleanup the conversation id\n+ await self.dialog_options.conversation_id_factory.delete_conversation_reference(\n+ skill_conversation_id\n+ )\n elif await self._intercept_oauth_cards(\n context, from_skill_activity, self.dialog_options.connection_name\n ):\n", "issue": "SkillDialog doesn't call SkillConversationIdFactory.DeleteConversationReference when using ExpectReplies\nSee [parent](https://github.com/microsoft/botframework-sdk/issues/6019)\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom copy import deepcopy\nfrom typing import List\n\nfrom botframework.connector.token_api.models import TokenExchangeRequest\nfrom botbuilder.schema import (\n Activity,\n ActivityTypes,\n ExpectedReplies,\n DeliveryModes,\n SignInConstants,\n TokenExchangeInvokeRequest,\n)\nfrom botbuilder.core import BotAdapter, TurnContext, ExtendedUserTokenProvider\nfrom botbuilder.core.card_factory import ContentTypes\nfrom botbuilder.core.skills import SkillConversationIdFactoryOptions\nfrom botbuilder.dialogs import (\n Dialog,\n DialogContext,\n DialogEvents,\n DialogReason,\n DialogInstance,\n)\n\nfrom .begin_skill_dialog_options import BeginSkillDialogOptions\nfrom .skill_dialog_options import SkillDialogOptions\n\n\nclass SkillDialog(Dialog):\n SKILLCONVERSATIONIDSTATEKEY = (\n \"Microsoft.Bot.Builder.Dialogs.SkillDialog.SkillConversationId\"\n )\n\n def __init__(self, dialog_options: SkillDialogOptions, dialog_id: str):\n super().__init__(dialog_id)\n if not dialog_options:\n raise TypeError(\"SkillDialog.__init__(): dialog_options cannot be None.\")\n\n self.dialog_options = dialog_options\n self._deliver_mode_state_key = \"deliverymode\"\n\n async def begin_dialog(self, dialog_context: DialogContext, options: object = None):\n \"\"\"\n Method called when a new dialog has been pushed onto the stack and is being activated.\n :param dialog_context: The dialog context for the current turn of conversation.\n :param options: (Optional) additional argument(s) to pass to the dialog being started.\n \"\"\"\n dialog_args = self._validate_begin_dialog_args(options)\n\n await dialog_context.context.send_trace_activity(\n f\"{SkillDialog.__name__}.BeginDialogAsync()\",\n label=f\"Using activity of type: {dialog_args.activity.type}\",\n )\n\n # Create deep clone of the original activity to avoid altering it before forwarding it.\n skill_activity: Activity = deepcopy(dialog_args.activity)\n\n # Apply conversation reference and common properties from incoming activity before sending.\n TurnContext.apply_conversation_reference(\n skill_activity,\n TurnContext.get_conversation_reference(dialog_context.context.activity),\n is_incoming=True,\n )\n\n # Store delivery mode in dialog state for later use.\n dialog_context.active_dialog.state[\n self._deliver_mode_state_key\n ] = dialog_args.activity.delivery_mode\n\n # Create the conversationId and store it in the dialog context state so we can use it later\n skill_conversation_id = await self._create_skill_conversation_id(\n dialog_context.context, dialog_context.context.activity\n )\n dialog_context.active_dialog.state[\n SkillDialog.SKILLCONVERSATIONIDSTATEKEY\n ] = skill_conversation_id\n\n # Send the activity to the skill.\n eoc_activity = await self._send_to_skill(\n dialog_context.context, skill_activity, skill_conversation_id\n )\n if eoc_activity:\n return await dialog_context.end_dialog(eoc_activity.value)\n\n return self.end_of_turn\n\n async def continue_dialog(self, dialog_context: DialogContext):\n if not self._on_validate_activity(dialog_context.context.activity):\n return self.end_of_turn\n\n await dialog_context.context.send_trace_activity(\n f\"{SkillDialog.__name__}.continue_dialog()\",\n label=f\"ActivityType: {dialog_context.context.activity.type}\",\n )\n\n # Handle EndOfConversation from the skill (this will be sent to the this dialog by the SkillHandler if\n # received from the Skill)\n if dialog_context.context.activity.type == ActivityTypes.end_of_conversation:\n await dialog_context.context.send_trace_activity(\n f\"{SkillDialog.__name__}.continue_dialog()\",\n label=f\"Got {ActivityTypes.end_of_conversation}\",\n )\n\n return await dialog_context.end_dialog(\n dialog_context.context.activity.value\n )\n\n # Create deep clone of the original activity to avoid altering it before forwarding it.\n skill_activity = deepcopy(dialog_context.context.activity)\n\n skill_activity.delivery_mode = dialog_context.active_dialog.state[\n self._deliver_mode_state_key\n ]\n\n # Just forward to the remote skill\n skill_conversation_id = dialog_context.active_dialog.state[\n SkillDialog.SKILLCONVERSATIONIDSTATEKEY\n ]\n eoc_activity = await self._send_to_skill(\n dialog_context.context, skill_activity, skill_conversation_id\n )\n if eoc_activity:\n return await dialog_context.end_dialog(eoc_activity.value)\n\n return self.end_of_turn\n\n async def reprompt_dialog( # pylint: disable=unused-argument\n self, context: TurnContext, instance: DialogInstance\n ):\n # Create and send an event to the skill so it can resume the dialog.\n reprompt_event = Activity(\n type=ActivityTypes.event, name=DialogEvents.reprompt_dialog\n )\n\n # Apply conversation reference and common properties from incoming activity before sending.\n TurnContext.apply_conversation_reference(\n reprompt_event,\n TurnContext.get_conversation_reference(context.activity),\n is_incoming=True,\n )\n\n # connection Name is not applicable for a RePrompt, as we don't expect as OAuthCard in response.\n skill_conversation_id = instance.state[SkillDialog.SKILLCONVERSATIONIDSTATEKEY]\n await self._send_to_skill(context, reprompt_event, skill_conversation_id)\n\n async def resume_dialog( # pylint: disable=unused-argument\n self, dialog_context: \"DialogContext\", reason: DialogReason, result: object\n ):\n await self.reprompt_dialog(dialog_context.context, dialog_context.active_dialog)\n return self.end_of_turn\n\n async def end_dialog(\n self, context: TurnContext, instance: DialogInstance, reason: DialogReason\n ):\n # Send of of conversation to the skill if the dialog has been cancelled.\n if reason in (DialogReason.CancelCalled, DialogReason.ReplaceCalled):\n await context.send_trace_activity(\n f\"{SkillDialog.__name__}.end_dialog()\",\n label=f\"ActivityType: {context.activity.type}\",\n )\n activity = Activity(type=ActivityTypes.end_of_conversation)\n\n # Apply conversation reference and common properties from incoming activity before sending.\n TurnContext.apply_conversation_reference(\n activity,\n TurnContext.get_conversation_reference(context.activity),\n is_incoming=True,\n )\n activity.channel_data = context.activity.channel_data\n activity.additional_properties = context.activity.additional_properties\n\n # connection Name is not applicable for an EndDialog, as we don't expect as OAuthCard in response.\n skill_conversation_id = instance.state[\n SkillDialog.SKILLCONVERSATIONIDSTATEKEY\n ]\n await self._send_to_skill(context, activity, skill_conversation_id)\n\n await super().end_dialog(context, instance, reason)\n\n def _validate_begin_dialog_args(self, options: object) -> BeginSkillDialogOptions:\n if not options:\n raise TypeError(\"options cannot be None.\")\n\n dialog_args = BeginSkillDialogOptions.from_object(options)\n\n if not dialog_args:\n raise TypeError(\n \"SkillDialog: options object not valid as BeginSkillDialogOptions.\"\n )\n\n if not dialog_args.activity:\n raise TypeError(\n \"SkillDialog: activity object in options as BeginSkillDialogOptions cannot be None.\"\n )\n\n return dialog_args\n\n def _on_validate_activity(\n self, activity: Activity # pylint: disable=unused-argument\n ) -> bool:\n \"\"\"\n Validates the activity sent during continue_dialog.\n\n Override this method to implement a custom validator for the activity being sent during continue_dialog.\n This method can be used to ignore activities of a certain type if needed.\n If this method returns false, the dialog will end the turn without processing the activity.\n \"\"\"\n return True\n\n async def _send_to_skill(\n self, context: TurnContext, activity: Activity, skill_conversation_id: str\n ) -> Activity:\n if activity.type == ActivityTypes.invoke:\n # Force ExpectReplies for invoke activities so we can get the replies right away and send\n # them back to the channel if needed. This makes sure that the dialog will receive the Invoke\n # response from the skill and any other activities sent, including EoC.\n activity.delivery_mode = DeliveryModes.expect_replies\n\n # Always save state before forwarding\n # (the dialog stack won't get updated with the skillDialog and things won't work if you don't)\n await self.dialog_options.conversation_state.save_changes(context, True)\n\n skill_info = self.dialog_options.skill\n response = await self.dialog_options.skill_client.post_activity(\n self.dialog_options.bot_id,\n skill_info.app_id,\n skill_info.skill_endpoint,\n self.dialog_options.skill_host_endpoint,\n skill_conversation_id,\n activity,\n )\n\n # Inspect the skill response status\n if not 200 <= response.status <= 299:\n raise Exception(\n f'Error invoking the skill id: \"{skill_info.id}\" at \"{skill_info.skill_endpoint}\"'\n f\" (status is {response.status}). \\r\\n {response.body}\"\n )\n\n eoc_activity: Activity = None\n if activity.delivery_mode == DeliveryModes.expect_replies and response.body:\n # Process replies in the response.Body.\n response.body: List[Activity]\n response.body = ExpectedReplies().deserialize(response.body).activities\n\n for from_skill_activity in response.body:\n if from_skill_activity.type == ActivityTypes.end_of_conversation:\n # Capture the EndOfConversation activity if it was sent from skill\n eoc_activity = from_skill_activity\n elif await self._intercept_oauth_cards(\n context, from_skill_activity, self.dialog_options.connection_name\n ):\n # do nothing. Token exchange succeeded, so no oauthcard needs to be shown to the user\n pass\n else:\n # Send the response back to the channel.\n await context.send_activity(from_skill_activity)\n\n return eoc_activity\n\n async def _create_skill_conversation_id(\n self, context: TurnContext, activity: Activity\n ) -> str:\n # Create a conversationId to interact with the skill and send the activity\n conversation_id_factory_options = SkillConversationIdFactoryOptions(\n from_bot_oauth_scope=context.turn_state.get(BotAdapter.BOT_OAUTH_SCOPE_KEY),\n from_bot_id=self.dialog_options.bot_id,\n activity=activity,\n bot_framework_skill=self.dialog_options.skill,\n )\n skill_conversation_id = await self.dialog_options.conversation_id_factory.create_skill_conversation_id(\n conversation_id_factory_options\n )\n return skill_conversation_id\n\n async def _intercept_oauth_cards(\n self, context: TurnContext, activity: Activity, connection_name: str\n ):\n \"\"\"\n Tells is if we should intercept the OAuthCard message.\n \"\"\"\n if not connection_name or not isinstance(\n context.adapter, ExtendedUserTokenProvider\n ):\n # The adapter may choose not to support token exchange, in which case we fallback to\n # showing an oauth card to the user.\n return False\n\n oauth_card_attachment = next(\n attachment\n for attachment in activity.attachments\n if attachment.content_type == ContentTypes.oauth_card\n )\n if oauth_card_attachment:\n oauth_card = oauth_card_attachment.content\n if (\n oauth_card\n and oauth_card.token_exchange_resource\n and oauth_card.token_exchange_resource.uri\n ):\n try:\n result = await context.adapter.exchange_token(\n turn_context=context,\n connection_name=connection_name,\n user_id=context.activity.from_property.id,\n exchange_request=TokenExchangeRequest(\n uri=oauth_card.token_exchange_resource.uri\n ),\n )\n\n if result and result.token:\n # If token above is null, then SSO has failed and hence we return false.\n # If not, send an invoke to the skill with the token.\n return await self._send_token_exchange_invoke_to_skill(\n activity,\n oauth_card.token_exchange_resource.id,\n oauth_card.connection_name,\n result.token,\n )\n except:\n # Failures in token exchange are not fatal. They simply mean that the user needs\n # to be shown the OAuth card.\n return False\n\n return False\n\n async def _send_token_exchange_invoke_to_skill(\n self,\n incoming_activity: Activity,\n request_id: str,\n connection_name: str,\n token: str,\n ):\n activity = incoming_activity.create_reply()\n activity.type = ActivityTypes.invoke\n activity.name = SignInConstants.token_exchange_operation_name\n activity.value = TokenExchangeInvokeRequest(\n id=request_id, token=token, connection_name=connection_name,\n )\n\n # route the activity to the skill\n skill_info = self.dialog_options.skill\n response = await self.dialog_options.skill_client.post_activity(\n self.dialog_options.bot_id,\n skill_info.app_id,\n skill_info.skill_endpoint,\n self.dialog_options.skill_host_endpoint,\n incoming_activity.conversation.id,\n activity,\n )\n\n # Check response status: true if success, false if failure\n return response.is_successful_status_code()\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nfrom copy import deepcopy\nfrom typing import List\n\nfrom botframework.connector.token_api.models import TokenExchangeRequest\nfrom botbuilder.schema import (\n Activity,\n ActivityTypes,\n ExpectedReplies,\n DeliveryModes,\n SignInConstants,\n TokenExchangeInvokeRequest,\n)\nfrom botbuilder.core import BotAdapter, TurnContext, ExtendedUserTokenProvider\nfrom botbuilder.core.card_factory import ContentTypes\nfrom botbuilder.core.skills import SkillConversationIdFactoryOptions\nfrom botbuilder.dialogs import (\n Dialog,\n DialogContext,\n DialogEvents,\n DialogReason,\n DialogInstance,\n)\n\nfrom .begin_skill_dialog_options import BeginSkillDialogOptions\nfrom .skill_dialog_options import SkillDialogOptions\n\n\nclass SkillDialog(Dialog):\n SKILLCONVERSATIONIDSTATEKEY = (\n \"Microsoft.Bot.Builder.Dialogs.SkillDialog.SkillConversationId\"\n )\n\n def __init__(self, dialog_options: SkillDialogOptions, dialog_id: str):\n super().__init__(dialog_id)\n if not dialog_options:\n raise TypeError(\"SkillDialog.__init__(): dialog_options cannot be None.\")\n\n self.dialog_options = dialog_options\n self._deliver_mode_state_key = \"deliverymode\"\n\n async def begin_dialog(self, dialog_context: DialogContext, options: object = None):\n \"\"\"\n Method called when a new dialog has been pushed onto the stack and is being activated.\n :param dialog_context: The dialog context for the current turn of conversation.\n :param options: (Optional) additional argument(s) to pass to the dialog being started.\n \"\"\"\n dialog_args = self._validate_begin_dialog_args(options)\n\n await dialog_context.context.send_trace_activity(\n f\"{SkillDialog.__name__}.BeginDialogAsync()\",\n label=f\"Using activity of type: {dialog_args.activity.type}\",\n )\n\n # Create deep clone of the original activity to avoid altering it before forwarding it.\n skill_activity: Activity = deepcopy(dialog_args.activity)\n\n # Apply conversation reference and common properties from incoming activity before sending.\n TurnContext.apply_conversation_reference(\n skill_activity,\n TurnContext.get_conversation_reference(dialog_context.context.activity),\n is_incoming=True,\n )\n\n # Store delivery mode in dialog state for later use.\n dialog_context.active_dialog.state[\n self._deliver_mode_state_key\n ] = dialog_args.activity.delivery_mode\n\n # Create the conversationId and store it in the dialog context state so we can use it later\n skill_conversation_id = await self._create_skill_conversation_id(\n dialog_context.context, dialog_context.context.activity\n )\n dialog_context.active_dialog.state[\n SkillDialog.SKILLCONVERSATIONIDSTATEKEY\n ] = skill_conversation_id\n\n # Send the activity to the skill.\n eoc_activity = await self._send_to_skill(\n dialog_context.context, skill_activity, skill_conversation_id\n )\n if eoc_activity:\n return await dialog_context.end_dialog(eoc_activity.value)\n\n return self.end_of_turn\n\n async def continue_dialog(self, dialog_context: DialogContext):\n if not self._on_validate_activity(dialog_context.context.activity):\n return self.end_of_turn\n\n await dialog_context.context.send_trace_activity(\n f\"{SkillDialog.__name__}.continue_dialog()\",\n label=f\"ActivityType: {dialog_context.context.activity.type}\",\n )\n\n # Handle EndOfConversation from the skill (this will be sent to the this dialog by the SkillHandler if\n # received from the Skill)\n if dialog_context.context.activity.type == ActivityTypes.end_of_conversation:\n await dialog_context.context.send_trace_activity(\n f\"{SkillDialog.__name__}.continue_dialog()\",\n label=f\"Got {ActivityTypes.end_of_conversation}\",\n )\n\n return await dialog_context.end_dialog(\n dialog_context.context.activity.value\n )\n\n # Create deep clone of the original activity to avoid altering it before forwarding it.\n skill_activity = deepcopy(dialog_context.context.activity)\n\n skill_activity.delivery_mode = dialog_context.active_dialog.state[\n self._deliver_mode_state_key\n ]\n\n # Just forward to the remote skill\n skill_conversation_id = dialog_context.active_dialog.state[\n SkillDialog.SKILLCONVERSATIONIDSTATEKEY\n ]\n eoc_activity = await self._send_to_skill(\n dialog_context.context, skill_activity, skill_conversation_id\n )\n if eoc_activity:\n return await dialog_context.end_dialog(eoc_activity.value)\n\n return self.end_of_turn\n\n async def reprompt_dialog( # pylint: disable=unused-argument\n self, context: TurnContext, instance: DialogInstance\n ):\n # Create and send an event to the skill so it can resume the dialog.\n reprompt_event = Activity(\n type=ActivityTypes.event, name=DialogEvents.reprompt_dialog\n )\n\n # Apply conversation reference and common properties from incoming activity before sending.\n TurnContext.apply_conversation_reference(\n reprompt_event,\n TurnContext.get_conversation_reference(context.activity),\n is_incoming=True,\n )\n\n # connection Name is not applicable for a RePrompt, as we don't expect as OAuthCard in response.\n skill_conversation_id = instance.state[SkillDialog.SKILLCONVERSATIONIDSTATEKEY]\n await self._send_to_skill(context, reprompt_event, skill_conversation_id)\n\n async def resume_dialog( # pylint: disable=unused-argument\n self, dialog_context: \"DialogContext\", reason: DialogReason, result: object\n ):\n await self.reprompt_dialog(dialog_context.context, dialog_context.active_dialog)\n return self.end_of_turn\n\n async def end_dialog(\n self, context: TurnContext, instance: DialogInstance, reason: DialogReason\n ):\n # Send of of conversation to the skill if the dialog has been cancelled.\n if reason in (DialogReason.CancelCalled, DialogReason.ReplaceCalled):\n await context.send_trace_activity(\n f\"{SkillDialog.__name__}.end_dialog()\",\n label=f\"ActivityType: {context.activity.type}\",\n )\n activity = Activity(type=ActivityTypes.end_of_conversation)\n\n # Apply conversation reference and common properties from incoming activity before sending.\n TurnContext.apply_conversation_reference(\n activity,\n TurnContext.get_conversation_reference(context.activity),\n is_incoming=True,\n )\n activity.channel_data = context.activity.channel_data\n activity.additional_properties = context.activity.additional_properties\n\n # connection Name is not applicable for an EndDialog, as we don't expect as OAuthCard in response.\n skill_conversation_id = instance.state[\n SkillDialog.SKILLCONVERSATIONIDSTATEKEY\n ]\n await self._send_to_skill(context, activity, skill_conversation_id)\n\n await super().end_dialog(context, instance, reason)\n\n def _validate_begin_dialog_args(self, options: object) -> BeginSkillDialogOptions:\n if not options:\n raise TypeError(\"options cannot be None.\")\n\n dialog_args = BeginSkillDialogOptions.from_object(options)\n\n if not dialog_args:\n raise TypeError(\n \"SkillDialog: options object not valid as BeginSkillDialogOptions.\"\n )\n\n if not dialog_args.activity:\n raise TypeError(\n \"SkillDialog: activity object in options as BeginSkillDialogOptions cannot be None.\"\n )\n\n return dialog_args\n\n def _on_validate_activity(\n self, activity: Activity # pylint: disable=unused-argument\n ) -> bool:\n \"\"\"\n Validates the activity sent during continue_dialog.\n\n Override this method to implement a custom validator for the activity being sent during continue_dialog.\n This method can be used to ignore activities of a certain type if needed.\n If this method returns false, the dialog will end the turn without processing the activity.\n \"\"\"\n return True\n\n async def _send_to_skill(\n self, context: TurnContext, activity: Activity, skill_conversation_id: str\n ) -> Activity:\n if activity.type == ActivityTypes.invoke:\n # Force ExpectReplies for invoke activities so we can get the replies right away and send\n # them back to the channel if needed. This makes sure that the dialog will receive the Invoke\n # response from the skill and any other activities sent, including EoC.\n activity.delivery_mode = DeliveryModes.expect_replies\n\n # Always save state before forwarding\n # (the dialog stack won't get updated with the skillDialog and things won't work if you don't)\n await self.dialog_options.conversation_state.save_changes(context, True)\n\n skill_info = self.dialog_options.skill\n response = await self.dialog_options.skill_client.post_activity(\n self.dialog_options.bot_id,\n skill_info.app_id,\n skill_info.skill_endpoint,\n self.dialog_options.skill_host_endpoint,\n skill_conversation_id,\n activity,\n )\n\n # Inspect the skill response status\n if not 200 <= response.status <= 299:\n raise Exception(\n f'Error invoking the skill id: \"{skill_info.id}\" at \"{skill_info.skill_endpoint}\"'\n f\" (status is {response.status}). \\r\\n {response.body}\"\n )\n\n eoc_activity: Activity = None\n if activity.delivery_mode == DeliveryModes.expect_replies and response.body:\n # Process replies in the response.Body.\n response.body: List[Activity]\n response.body = ExpectedReplies().deserialize(response.body).activities\n\n for from_skill_activity in response.body:\n if from_skill_activity.type == ActivityTypes.end_of_conversation:\n # Capture the EndOfConversation activity if it was sent from skill\n eoc_activity = from_skill_activity\n\n # The conversation has ended, so cleanup the conversation id\n await self.dialog_options.conversation_id_factory.delete_conversation_reference(\n skill_conversation_id\n )\n elif await self._intercept_oauth_cards(\n context, from_skill_activity, self.dialog_options.connection_name\n ):\n # do nothing. Token exchange succeeded, so no oauthcard needs to be shown to the user\n pass\n else:\n # Send the response back to the channel.\n await context.send_activity(from_skill_activity)\n\n return eoc_activity\n\n async def _create_skill_conversation_id(\n self, context: TurnContext, activity: Activity\n ) -> str:\n # Create a conversationId to interact with the skill and send the activity\n conversation_id_factory_options = SkillConversationIdFactoryOptions(\n from_bot_oauth_scope=context.turn_state.get(BotAdapter.BOT_OAUTH_SCOPE_KEY),\n from_bot_id=self.dialog_options.bot_id,\n activity=activity,\n bot_framework_skill=self.dialog_options.skill,\n )\n skill_conversation_id = await self.dialog_options.conversation_id_factory.create_skill_conversation_id(\n conversation_id_factory_options\n )\n return skill_conversation_id\n\n async def _intercept_oauth_cards(\n self, context: TurnContext, activity: Activity, connection_name: str\n ):\n \"\"\"\n Tells is if we should intercept the OAuthCard message.\n \"\"\"\n if not connection_name or not isinstance(\n context.adapter, ExtendedUserTokenProvider\n ):\n # The adapter may choose not to support token exchange, in which case we fallback to\n # showing an oauth card to the user.\n return False\n\n oauth_card_attachment = next(\n attachment\n for attachment in activity.attachments\n if attachment.content_type == ContentTypes.oauth_card\n )\n if oauth_card_attachment:\n oauth_card = oauth_card_attachment.content\n if (\n oauth_card\n and oauth_card.token_exchange_resource\n and oauth_card.token_exchange_resource.uri\n ):\n try:\n result = await context.adapter.exchange_token(\n turn_context=context,\n connection_name=connection_name,\n user_id=context.activity.from_property.id,\n exchange_request=TokenExchangeRequest(\n uri=oauth_card.token_exchange_resource.uri\n ),\n )\n\n if result and result.token:\n # If token above is null, then SSO has failed and hence we return false.\n # If not, send an invoke to the skill with the token.\n return await self._send_token_exchange_invoke_to_skill(\n activity,\n oauth_card.token_exchange_resource.id,\n oauth_card.connection_name,\n result.token,\n )\n except:\n # Failures in token exchange are not fatal. They simply mean that the user needs\n # to be shown the OAuth card.\n return False\n\n return False\n\n async def _send_token_exchange_invoke_to_skill(\n self,\n incoming_activity: Activity,\n request_id: str,\n connection_name: str,\n token: str,\n ):\n activity = incoming_activity.create_reply()\n activity.type = ActivityTypes.invoke\n activity.name = SignInConstants.token_exchange_operation_name\n activity.value = TokenExchangeInvokeRequest(\n id=request_id, token=token, connection_name=connection_name,\n )\n\n # route the activity to the skill\n skill_info = self.dialog_options.skill\n response = await self.dialog_options.skill_client.post_activity(\n self.dialog_options.bot_id,\n skill_info.app_id,\n skill_info.skill_endpoint,\n self.dialog_options.skill_host_endpoint,\n incoming_activity.conversation.id,\n activity,\n )\n\n # Check response status: true if success, false if failure\n return response.is_successful_status_code()\n", "path": "libraries/botbuilder-dialogs/botbuilder/dialogs/skills/skill_dialog.py"}]}
4,073
202
gh_patches_debug_20979
rasdani/github-patches
git_diff
pre-commit__pre-commit-1399
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- On a Windows system, in either win7 or win2012R2, performing a pre-commit reports an error All: It looks like the pre-commit is using virtualenv I used pre-commit on a Python project. Is there any way to solve this problem? The following log is excerpted from pre-committee.log **Log** ``` An unexpected error has occurred: CalledProcessError: command: ('C:\\Users\\Administrator\\.cache\\pre-commit\\repop9_0qne0\\py_env-default\\Scripts\\pip.EXE', 'install', '.') return code: 1 expected return code: 0 stdout: Looking in indexes: http://pypi.douban.com/simple Processing c:\users\administrator\.cache\pre-commit\repop9_0qne0 Collecting six Downloading http://pypi.doubanio.com/packages/65/eb/1f97cb97bfc2390a276969c6fae16075da282f5058082d4cb10c6c5c1dba/six-1.14.0-py2.py3-none-any.whl (10 kB) Collecting pip-tools==3.6.1 Downloading http://pypi.doubanio.com/packages/06/96/89872db07ae70770fba97205b0737c17ef013d0d1c790899c16bb8bac419/pip_tools-3.6.1-py2.py3-none-any.whl (35 kB) Collecting pip==19.1 Downloading http://pypi.doubanio.com/packages/f9/fb/863012b13912709c13cf5cfdbfb304fa6c727659d6290438e1a88df9d848/pip-19.1-py2.py3-none-any.whl (1.4 MB) Collecting click>=6 Downloading http://pypi.doubanio.com/packages/dd/c0/4d8f43a9b16e289f36478422031b8a63b54b6ac3b1ba605d602f10dd54d6/click-7.1.1-py2.py3-none-any.whl (82 kB) Building wheels for collected packages: pip-tools-compile Building wheel for pip-tools-compile (setup.py): started Building wheel for pip-tools-compile (setup.py): finished with status 'done' Created wheel for pip-tools-compile: filename=pip_tools_compile-1.0-py3-none-any.whl size=17905 sha256=30321f831b5ac147be919304dee138139d055f2bdb52a5511317bc718b29b76d Stored in directory: C:\Users\ADMINI~1\AppData\Local\Temp\pip-ephem-wheel-cache-b67alovp\wheels\f1\34\3c\bed42474e4aeb415aa0bfd1e28124cde97604fa12005eed65b Successfully built pip-tools-compile Installing collected packages: six, click, pip-tools, pip, pip-tools-compile Attempting uninstall: pip Found existing installation: pip 20.0.2 Uninstalling pip-20.0.2: Successfully uninstalled pip-20.0.2 stderr: ERROR: Could not install packages due to an EnvironmentError: [WinError 5] 拒绝访问。: 'C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\pip-uninstall-8wsztgaa\\pip.exe' Consider using the `--user` option or check the permissions. ``` ``` Traceback (most recent call last): File "c:\program files\python37\lib\site-packages\pre_commit\error_handler.py", line 56, in error_handler yield File "c:\program files\python37\lib\site-packages\pre_commit\main.py", line 372, in main args=args.rest[1:], File "c:\program files\python37\lib\site-packages\pre_commit\commands\hook_impl.py", line 187, in hook_impl return retv | run(config, store, ns) File "c:\program files\python37\lib\site-packages\pre_commit\commands\run.py", line 355, in run install_hook_envs(hooks, store) File "c:\program files\python37\lib\site-packages\pre_commit\repository.py", line 200, in install_hook_envs _hook_install(hook) File "c:\program files\python37\lib\site-packages\pre_commit\repository.py", line 83, in _hook_install hook.prefix, hook.language_version, hook.additional_dependencies, File "c:\program files\python37\lib\site-packages\pre_commit\languages\python.py", line 197, in install_environment prefix, ('pip', 'install', '.') + additional_dependencies, File "c:\program files\python37\lib\site-packages\pre_commit\languages\helpers.py", line 25, in run_setup_cmd cmd_output_b(*cmd, cwd=prefix.prefix_dir) File "c:\program files\python37\lib\site-packages\pre_commit\util.py", line 156, in cmd_output_b raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b) pre_commit.util.CalledProcessError: <unprintable CalledProcessError object> ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pre_commit/languages/python.py` Content: ``` 1 import contextlib 2 import functools 3 import os 4 import sys 5 from typing import Callable 6 from typing import ContextManager 7 from typing import Generator 8 from typing import Optional 9 from typing import Sequence 10 from typing import Tuple 11 12 import pre_commit.constants as C 13 from pre_commit.envcontext import envcontext 14 from pre_commit.envcontext import PatchesT 15 from pre_commit.envcontext import UNSET 16 from pre_commit.envcontext import Var 17 from pre_commit.hook import Hook 18 from pre_commit.languages import helpers 19 from pre_commit.parse_shebang import find_executable 20 from pre_commit.prefix import Prefix 21 from pre_commit.util import CalledProcessError 22 from pre_commit.util import clean_path_on_failure 23 from pre_commit.util import cmd_output 24 from pre_commit.util import cmd_output_b 25 26 ENVIRONMENT_DIR = 'py_env' 27 28 29 def bin_dir(venv: str) -> str: 30 """On windows there's a different directory for the virtualenv""" 31 bin_part = 'Scripts' if os.name == 'nt' else 'bin' 32 return os.path.join(venv, bin_part) 33 34 35 def get_env_patch(venv: str) -> PatchesT: 36 return ( 37 ('PYTHONHOME', UNSET), 38 ('VIRTUAL_ENV', venv), 39 ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))), 40 ) 41 42 43 def _find_by_py_launcher( 44 version: str, 45 ) -> Optional[str]: # pragma: no cover (windows only) 46 if version.startswith('python'): 47 num = version[len('python'):] 48 try: 49 cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)') 50 return cmd_output(*cmd)[1].strip() 51 except CalledProcessError: 52 pass 53 return None 54 55 56 def _find_by_sys_executable() -> Optional[str]: 57 def _norm(path: str) -> Optional[str]: 58 _, exe = os.path.split(path.lower()) 59 exe, _, _ = exe.partition('.exe') 60 if exe not in {'python', 'pythonw'} and find_executable(exe): 61 return exe 62 return None 63 64 # On linux, I see these common sys.executables: 65 # 66 # system `python`: /usr/bin/python -> python2.7 67 # system `python2`: /usr/bin/python2 -> python2.7 68 # virtualenv v: v/bin/python (will not return from this loop) 69 # virtualenv v -ppython2: v/bin/python -> python2 70 # virtualenv v -ppython2.7: v/bin/python -> python2.7 71 # virtualenv v -ppypy: v/bin/python -> v/bin/pypy 72 for path in (sys.executable, os.path.realpath(sys.executable)): 73 exe = _norm(path) 74 if exe: 75 return exe 76 return None 77 78 79 @functools.lru_cache(maxsize=1) 80 def get_default_version() -> str: # pragma: no cover (platform dependent) 81 # First attempt from `sys.executable` (or the realpath) 82 exe = _find_by_sys_executable() 83 if exe: 84 return exe 85 86 # Next try the `pythonX.X` executable 87 exe = f'python{sys.version_info[0]}.{sys.version_info[1]}' 88 if find_executable(exe): 89 return exe 90 91 if _find_by_py_launcher(exe): 92 return exe 93 94 # Give a best-effort try for windows 95 default_folder_name = exe.replace('.', '') 96 if os.path.exists(fr'C:\{default_folder_name}\python.exe'): 97 return exe 98 99 # We tried! 100 return C.DEFAULT 101 102 103 def _sys_executable_matches(version: str) -> bool: 104 if version == 'python': 105 return True 106 elif not version.startswith('python'): 107 return False 108 109 try: 110 info = tuple(int(p) for p in version[len('python'):].split('.')) 111 except ValueError: 112 return False 113 114 return sys.version_info[:len(info)] == info 115 116 117 def norm_version(version: str) -> str: 118 # first see if our current executable is appropriate 119 if _sys_executable_matches(version): 120 return sys.executable 121 122 if os.name == 'nt': # pragma: no cover (windows) 123 version_exec = _find_by_py_launcher(version) 124 if version_exec: 125 return version_exec 126 127 # Try looking up by name 128 version_exec = find_executable(version) 129 if version_exec and version_exec != version: 130 return version_exec 131 132 # If it is in the form pythonx.x search in the default 133 # place on windows 134 if version.startswith('python'): 135 default_folder_name = version.replace('.', '') 136 return fr'C:\{default_folder_name}\python.exe' 137 138 # Otherwise assume it is a path 139 return os.path.expanduser(version) 140 141 142 def py_interface( 143 _dir: str, 144 _make_venv: Callable[[str, str], None], 145 ) -> Tuple[ 146 Callable[[Prefix, str], ContextManager[None]], 147 Callable[[Prefix, str], bool], 148 Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]], 149 Callable[[Prefix, str, Sequence[str]], None], 150 ]: 151 @contextlib.contextmanager 152 def in_env( 153 prefix: Prefix, 154 language_version: str, 155 ) -> Generator[None, None, None]: 156 envdir = prefix.path(helpers.environment_dir(_dir, language_version)) 157 with envcontext(get_env_patch(envdir)): 158 yield 159 160 def healthy(prefix: Prefix, language_version: str) -> bool: 161 envdir = helpers.environment_dir(_dir, language_version) 162 exe_name = 'python.exe' if sys.platform == 'win32' else 'python' 163 py_exe = prefix.path(bin_dir(envdir), exe_name) 164 with in_env(prefix, language_version): 165 retcode, _, _ = cmd_output_b( 166 py_exe, '-c', 'import ctypes, datetime, io, os, ssl, weakref', 167 cwd='/', 168 retcode=None, 169 ) 170 return retcode == 0 171 172 def run_hook( 173 hook: Hook, 174 file_args: Sequence[str], 175 color: bool, 176 ) -> Tuple[int, bytes]: 177 with in_env(hook.prefix, hook.language_version): 178 return helpers.run_xargs(hook, hook.cmd, file_args, color=color) 179 180 def install_environment( 181 prefix: Prefix, 182 version: str, 183 additional_dependencies: Sequence[str], 184 ) -> None: 185 additional_dependencies = tuple(additional_dependencies) 186 directory = helpers.environment_dir(_dir, version) 187 188 env_dir = prefix.path(directory) 189 with clean_path_on_failure(env_dir): 190 if version != C.DEFAULT: 191 python = norm_version(version) 192 else: 193 python = os.path.realpath(sys.executable) 194 _make_venv(env_dir, python) 195 with in_env(prefix, version): 196 helpers.run_setup_cmd( 197 prefix, ('pip', 'install', '.') + additional_dependencies, 198 ) 199 200 return in_env, healthy, run_hook, install_environment 201 202 203 def make_venv(envdir: str, python: str) -> None: 204 env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1') 205 cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python) 206 cmd_output_b(*cmd, env=env, cwd='/') 207 208 209 _interface = py_interface(ENVIRONMENT_DIR, make_venv) 210 in_env, healthy, run_hook, install_environment = _interface 211 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py --- a/pre_commit/languages/python.py +++ b/pre_commit/languages/python.py @@ -182,8 +182,8 @@ version: str, additional_dependencies: Sequence[str], ) -> None: - additional_dependencies = tuple(additional_dependencies) directory = helpers.environment_dir(_dir, version) + install = ('python', '-mpip', 'install', '.', *additional_dependencies) env_dir = prefix.path(directory) with clean_path_on_failure(env_dir): @@ -193,9 +193,7 @@ python = os.path.realpath(sys.executable) _make_venv(env_dir, python) with in_env(prefix, version): - helpers.run_setup_cmd( - prefix, ('pip', 'install', '.') + additional_dependencies, - ) + helpers.run_setup_cmd(prefix, install) return in_env, healthy, run_hook, install_environment
{"golden_diff": "diff --git a/pre_commit/languages/python.py b/pre_commit/languages/python.py\n--- a/pre_commit/languages/python.py\n+++ b/pre_commit/languages/python.py\n@@ -182,8 +182,8 @@\n version: str,\n additional_dependencies: Sequence[str],\n ) -> None:\n- additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(_dir, version)\n+ install = ('python', '-mpip', 'install', '.', *additional_dependencies)\n \n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n@@ -193,9 +193,7 @@\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n- helpers.run_setup_cmd(\n- prefix, ('pip', 'install', '.') + additional_dependencies,\n- )\n+ helpers.run_setup_cmd(prefix, install)\n \n return in_env, healthy, run_hook, install_environment\n", "issue": "On a Windows system, in either win7 or win2012R2, performing a pre-commit reports an error\n All:\r\nIt looks like the pre-commit is using virtualenv\r\n\r\nI used pre-commit on a Python project. \r\n\r\nIs there any way to solve this problem?\r\n\r\n\r\nThe following log is excerpted from pre-committee.log \r\n**Log**\r\n```\r\nAn unexpected error has occurred: CalledProcessError: command: ('C:\\\\Users\\\\Administrator\\\\.cache\\\\pre-commit\\\\repop9_0qne0\\\\py_env-default\\\\Scripts\\\\pip.EXE', 'install', '.')\r\nreturn code: 1\r\nexpected return code: 0\r\nstdout:\r\n Looking in indexes: http://pypi.douban.com/simple\r\n Processing c:\\users\\administrator\\.cache\\pre-commit\\repop9_0qne0\r\n Collecting six\r\n Downloading http://pypi.doubanio.com/packages/65/eb/1f97cb97bfc2390a276969c6fae16075da282f5058082d4cb10c6c5c1dba/six-1.14.0-py2.py3-none-any.whl (10 kB)\r\n Collecting pip-tools==3.6.1\r\n Downloading http://pypi.doubanio.com/packages/06/96/89872db07ae70770fba97205b0737c17ef013d0d1c790899c16bb8bac419/pip_tools-3.6.1-py2.py3-none-any.whl (35 kB)\r\n Collecting pip==19.1\r\n Downloading http://pypi.doubanio.com/packages/f9/fb/863012b13912709c13cf5cfdbfb304fa6c727659d6290438e1a88df9d848/pip-19.1-py2.py3-none-any.whl (1.4 MB)\r\n Collecting click>=6\r\n Downloading http://pypi.doubanio.com/packages/dd/c0/4d8f43a9b16e289f36478422031b8a63b54b6ac3b1ba605d602f10dd54d6/click-7.1.1-py2.py3-none-any.whl (82 kB)\r\n Building wheels for collected packages: pip-tools-compile\r\n Building wheel for pip-tools-compile (setup.py): started\r\n Building wheel for pip-tools-compile (setup.py): finished with status 'done'\r\n Created wheel for pip-tools-compile: filename=pip_tools_compile-1.0-py3-none-any.whl size=17905 sha256=30321f831b5ac147be919304dee138139d055f2bdb52a5511317bc718b29b76d\r\n Stored in directory: C:\\Users\\ADMINI~1\\AppData\\Local\\Temp\\pip-ephem-wheel-cache-b67alovp\\wheels\\f1\\34\\3c\\bed42474e4aeb415aa0bfd1e28124cde97604fa12005eed65b\r\n Successfully built pip-tools-compile\r\n Installing collected packages: six, click, pip-tools, pip, pip-tools-compile\r\n Attempting uninstall: pip\r\n Found existing installation: pip 20.0.2\r\n Uninstalling pip-20.0.2:\r\n Successfully uninstalled pip-20.0.2\r\n \r\nstderr:\r\n ERROR: Could not install packages due to an EnvironmentError: [WinError 5] \u62d2\u7edd\u8bbf\u95ee\u3002: 'C:\\\\Users\\\\ADMINI~1\\\\AppData\\\\Local\\\\Temp\\\\pip-uninstall-8wsztgaa\\\\pip.exe'\r\n Consider using the `--user` option or check the permissions.\r\n \r\n \r\n``` \r\n \r\n```\r\nTraceback (most recent call last):\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\error_handler.py\", line 56, in error_handler\r\n yield\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\main.py\", line 372, in main\r\n args=args.rest[1:],\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\commands\\hook_impl.py\", line 187, in hook_impl\r\n return retv | run(config, store, ns)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\commands\\run.py\", line 355, in run\r\n install_hook_envs(hooks, store)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\repository.py\", line 200, in install_hook_envs\r\n _hook_install(hook)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\repository.py\", line 83, in _hook_install\r\n hook.prefix, hook.language_version, hook.additional_dependencies,\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\languages\\python.py\", line 197, in install_environment\r\n prefix, ('pip', 'install', '.') + additional_dependencies,\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\languages\\helpers.py\", line 25, in run_setup_cmd\r\n cmd_output_b(*cmd, cwd=prefix.prefix_dir)\r\n File \"c:\\program files\\python37\\lib\\site-packages\\pre_commit\\util.py\", line 156, in cmd_output_b\r\n raise CalledProcessError(returncode, cmd, retcode, stdout_b, stderr_b)\r\npre_commit.util.CalledProcessError: <unprintable CalledProcessError object>\r\n\r\n```\n", "before_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Callable\nfrom typing import ContextManager\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n try:\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n return cmd_output(*cmd)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n default_folder_name = exe.replace('.', '')\n if os.path.exists(fr'C:\\{default_folder_name}\\python.exe'):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> str:\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n default_folder_name = version.replace('.', '')\n return fr'C:\\{default_folder_name}\\python.exe'\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef py_interface(\n _dir: str,\n _make_venv: Callable[[str, str], None],\n) -> Tuple[\n Callable[[Prefix, str], ContextManager[None]],\n Callable[[Prefix, str], bool],\n Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]],\n Callable[[Prefix, str, Sequence[str]], None],\n]:\n @contextlib.contextmanager\n def in_env(\n prefix: Prefix,\n language_version: str,\n ) -> Generator[None, None, None]:\n envdir = prefix.path(helpers.environment_dir(_dir, language_version))\n with envcontext(get_env_patch(envdir)):\n yield\n\n def healthy(prefix: Prefix, language_version: str) -> bool:\n envdir = helpers.environment_dir(_dir, language_version)\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output_b(\n py_exe, '-c', 'import ctypes, datetime, io, os, ssl, weakref',\n cwd='/',\n retcode=None,\n )\n return retcode == 0\n\n def run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n ) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n\n def install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n ) -> None:\n additional_dependencies = tuple(additional_dependencies)\n directory = helpers.environment_dir(_dir, version)\n\n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n if version != C.DEFAULT:\n python = norm_version(version)\n else:\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n helpers.run_setup_cmd(\n prefix, ('pip', 'install', '.') + additional_dependencies,\n )\n\n return in_env, healthy, run_hook, install_environment\n\n\ndef make_venv(envdir: str, python: str) -> None:\n env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')\n cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n cmd_output_b(*cmd, env=env, cwd='/')\n\n\n_interface = py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python.py"}], "after_files": [{"content": "import contextlib\nimport functools\nimport os\nimport sys\nfrom typing import Callable\nfrom typing import ContextManager\nfrom typing import Generator\nfrom typing import Optional\nfrom typing import Sequence\nfrom typing import Tuple\n\nimport pre_commit.constants as C\nfrom pre_commit.envcontext import envcontext\nfrom pre_commit.envcontext import PatchesT\nfrom pre_commit.envcontext import UNSET\nfrom pre_commit.envcontext import Var\nfrom pre_commit.hook import Hook\nfrom pre_commit.languages import helpers\nfrom pre_commit.parse_shebang import find_executable\nfrom pre_commit.prefix import Prefix\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import clean_path_on_failure\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\n\nENVIRONMENT_DIR = 'py_env'\n\n\ndef bin_dir(venv: str) -> str:\n \"\"\"On windows there's a different directory for the virtualenv\"\"\"\n bin_part = 'Scripts' if os.name == 'nt' else 'bin'\n return os.path.join(venv, bin_part)\n\n\ndef get_env_patch(venv: str) -> PatchesT:\n return (\n ('PYTHONHOME', UNSET),\n ('VIRTUAL_ENV', venv),\n ('PATH', (bin_dir(venv), os.pathsep, Var('PATH'))),\n )\n\n\ndef _find_by_py_launcher(\n version: str,\n) -> Optional[str]: # pragma: no cover (windows only)\n if version.startswith('python'):\n num = version[len('python'):]\n try:\n cmd = ('py', f'-{num}', '-c', 'import sys; print(sys.executable)')\n return cmd_output(*cmd)[1].strip()\n except CalledProcessError:\n pass\n return None\n\n\ndef _find_by_sys_executable() -> Optional[str]:\n def _norm(path: str) -> Optional[str]:\n _, exe = os.path.split(path.lower())\n exe, _, _ = exe.partition('.exe')\n if exe not in {'python', 'pythonw'} and find_executable(exe):\n return exe\n return None\n\n # On linux, I see these common sys.executables:\n #\n # system `python`: /usr/bin/python -> python2.7\n # system `python2`: /usr/bin/python2 -> python2.7\n # virtualenv v: v/bin/python (will not return from this loop)\n # virtualenv v -ppython2: v/bin/python -> python2\n # virtualenv v -ppython2.7: v/bin/python -> python2.7\n # virtualenv v -ppypy: v/bin/python -> v/bin/pypy\n for path in (sys.executable, os.path.realpath(sys.executable)):\n exe = _norm(path)\n if exe:\n return exe\n return None\n\n\[email protected]_cache(maxsize=1)\ndef get_default_version() -> str: # pragma: no cover (platform dependent)\n # First attempt from `sys.executable` (or the realpath)\n exe = _find_by_sys_executable()\n if exe:\n return exe\n\n # Next try the `pythonX.X` executable\n exe = f'python{sys.version_info[0]}.{sys.version_info[1]}'\n if find_executable(exe):\n return exe\n\n if _find_by_py_launcher(exe):\n return exe\n\n # Give a best-effort try for windows\n default_folder_name = exe.replace('.', '')\n if os.path.exists(fr'C:\\{default_folder_name}\\python.exe'):\n return exe\n\n # We tried!\n return C.DEFAULT\n\n\ndef _sys_executable_matches(version: str) -> bool:\n if version == 'python':\n return True\n elif not version.startswith('python'):\n return False\n\n try:\n info = tuple(int(p) for p in version[len('python'):].split('.'))\n except ValueError:\n return False\n\n return sys.version_info[:len(info)] == info\n\n\ndef norm_version(version: str) -> str:\n # first see if our current executable is appropriate\n if _sys_executable_matches(version):\n return sys.executable\n\n if os.name == 'nt': # pragma: no cover (windows)\n version_exec = _find_by_py_launcher(version)\n if version_exec:\n return version_exec\n\n # Try looking up by name\n version_exec = find_executable(version)\n if version_exec and version_exec != version:\n return version_exec\n\n # If it is in the form pythonx.x search in the default\n # place on windows\n if version.startswith('python'):\n default_folder_name = version.replace('.', '')\n return fr'C:\\{default_folder_name}\\python.exe'\n\n # Otherwise assume it is a path\n return os.path.expanduser(version)\n\n\ndef py_interface(\n _dir: str,\n _make_venv: Callable[[str, str], None],\n) -> Tuple[\n Callable[[Prefix, str], ContextManager[None]],\n Callable[[Prefix, str], bool],\n Callable[[Hook, Sequence[str], bool], Tuple[int, bytes]],\n Callable[[Prefix, str, Sequence[str]], None],\n]:\n @contextlib.contextmanager\n def in_env(\n prefix: Prefix,\n language_version: str,\n ) -> Generator[None, None, None]:\n envdir = prefix.path(helpers.environment_dir(_dir, language_version))\n with envcontext(get_env_patch(envdir)):\n yield\n\n def healthy(prefix: Prefix, language_version: str) -> bool:\n envdir = helpers.environment_dir(_dir, language_version)\n exe_name = 'python.exe' if sys.platform == 'win32' else 'python'\n py_exe = prefix.path(bin_dir(envdir), exe_name)\n with in_env(prefix, language_version):\n retcode, _, _ = cmd_output_b(\n py_exe, '-c', 'import ctypes, datetime, io, os, ssl, weakref',\n cwd='/',\n retcode=None,\n )\n return retcode == 0\n\n def run_hook(\n hook: Hook,\n file_args: Sequence[str],\n color: bool,\n ) -> Tuple[int, bytes]:\n with in_env(hook.prefix, hook.language_version):\n return helpers.run_xargs(hook, hook.cmd, file_args, color=color)\n\n def install_environment(\n prefix: Prefix,\n version: str,\n additional_dependencies: Sequence[str],\n ) -> None:\n directory = helpers.environment_dir(_dir, version)\n install = ('python', '-mpip', 'install', '.', *additional_dependencies)\n\n env_dir = prefix.path(directory)\n with clean_path_on_failure(env_dir):\n if version != C.DEFAULT:\n python = norm_version(version)\n else:\n python = os.path.realpath(sys.executable)\n _make_venv(env_dir, python)\n with in_env(prefix, version):\n helpers.run_setup_cmd(prefix, install)\n\n return in_env, healthy, run_hook, install_environment\n\n\ndef make_venv(envdir: str, python: str) -> None:\n env = dict(os.environ, VIRTUALENV_NO_DOWNLOAD='1')\n cmd = (sys.executable, '-mvirtualenv', envdir, '-p', python)\n cmd_output_b(*cmd, env=env, cwd='/')\n\n\n_interface = py_interface(ENVIRONMENT_DIR, make_venv)\nin_env, healthy, run_hook, install_environment = _interface\n", "path": "pre_commit/languages/python.py"}]}
3,809
217
gh_patches_debug_4622
rasdani/github-patches
git_diff
scikit-hep__pyhf-1083
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Migrate from bumpversion to bump2version # Description @dguest has brought to my attention that [`bumpversion` is no longer maintained](https://github.com/peritus/bumpversion) (as of apparently November 2019). Given this we should probably take the project's advice > 🎬 If you want to start using `bumpversion`, you're best advised to install one of the maintained forks, e.g. ➡ @ c4urself's [`bump2version`](https://github.com/c4urself/bump2version/#installation). given that it seems that [transferring ownership and maintainers is taking a very long time/might not happen](https://github.com/c4urself/bump2version/issues/86). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 from setuptools import setup 2 3 extras_require = { 4 'shellcomplete': ['click_completion'], 5 'tensorflow': [ 6 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major 7 'tensorflow-probability~=0.10.0', 8 ], 9 'torch': ['torch~=1.2'], 10 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'], 11 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes 12 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch 13 } 14 extras_require['backends'] = sorted( 15 set( 16 extras_require['tensorflow'] 17 + extras_require['torch'] 18 + extras_require['jax'] 19 + extras_require['minuit'] 20 ) 21 ) 22 extras_require['contrib'] = sorted(set(['matplotlib'])) 23 extras_require['lint'] = sorted(set(['pyflakes', 'black'])) 24 25 extras_require['test'] = sorted( 26 set( 27 extras_require['backends'] 28 + extras_require['xmlio'] 29 + extras_require['contrib'] 30 + extras_require['shellcomplete'] 31 + [ 32 'pytest~=6.0', 33 'pytest-cov>=2.5.1', 34 'pytest-mock', 35 'pytest-benchmark[histogram]', 36 'pytest-console-scripts', 37 'pytest-mpl', 38 'pydocstyle', 39 'coverage>=4.0', # coveralls 40 'papermill~=2.0', 41 'nteract-scrapbook~=0.2', 42 'jupyter', 43 'uproot~=3.3', 44 'graphviz', 45 'jsonpatch', 46 ] 47 ) 48 ) 49 extras_require['docs'] = sorted( 50 set( 51 [ 52 'sphinx>=3.1.2', 53 'sphinxcontrib-bibtex', 54 'sphinx-click', 55 'sphinx_rtd_theme', 56 'nbsphinx', 57 'ipywidgets', 58 'sphinx-issues', 59 'sphinx-copybutton>0.2.9', 60 ] 61 ) 62 ) 63 extras_require['develop'] = sorted( 64 set( 65 extras_require['docs'] 66 + extras_require['lint'] 67 + extras_require['test'] 68 + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine'] 69 ) 70 ) 71 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 72 73 74 setup( 75 extras_require=extras_require, 76 use_scm_version=lambda: {'local_scheme': lambda version: ''}, 77 ) 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ extras_require['docs'] + extras_require['lint'] + extras_require['test'] - + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine'] + + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine'] ) ) extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -65,7 +65,7 @@\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n- + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n+ + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n )\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n", "issue": "Migrate from bumpversion to bump2version\n# Description\r\n\r\n@dguest has brought to my attention that [`bumpversion` is no longer maintained](https://github.com/peritus/bumpversion) (as of apparently November 2019). Given this we should probably take the project's advice\r\n\r\n> \ud83c\udfac If you want to start using `bumpversion`, you're best advised to install one of the maintained forks, e.g. \u27a1 @ c4urself's [`bump2version`](https://github.com/c4urself/bump2version/#installation).\r\n\r\ngiven that it seems that [transferring ownership and maintainers is taking a very long time/might not happen](https://github.com/c4urself/bump2version/issues/86).\n", "before_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bumpversion', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup\n\nextras_require = {\n 'shellcomplete': ['click_completion'],\n 'tensorflow': [\n 'tensorflow~=2.2.0', # TensorFlow minor releases are as volatile as major\n 'tensorflow-probability~=0.10.0',\n ],\n 'torch': ['torch~=1.2'],\n 'jax': ['jax~=0.1,>0.1.51', 'jaxlib~=0.1,>0.1.33'],\n 'xmlio': ['uproot~=3.6'], # Future proof against uproot4 API changes\n 'minuit': ['iminuit~=1.4.3'], # v1.5.0 breaks pyhf for 32b TensorFlow and PyTorch\n}\nextras_require['backends'] = sorted(\n set(\n extras_require['tensorflow']\n + extras_require['torch']\n + extras_require['jax']\n + extras_require['minuit']\n )\n)\nextras_require['contrib'] = sorted(set(['matplotlib']))\nextras_require['lint'] = sorted(set(['pyflakes', 'black']))\n\nextras_require['test'] = sorted(\n set(\n extras_require['backends']\n + extras_require['xmlio']\n + extras_require['contrib']\n + extras_require['shellcomplete']\n + [\n 'pytest~=6.0',\n 'pytest-cov>=2.5.1',\n 'pytest-mock',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'pytest-mpl',\n 'pydocstyle',\n 'coverage>=4.0', # coveralls\n 'papermill~=2.0',\n 'nteract-scrapbook~=0.2',\n 'jupyter',\n 'uproot~=3.3',\n 'graphviz',\n 'jsonpatch',\n ]\n )\n)\nextras_require['docs'] = sorted(\n set(\n [\n 'sphinx>=3.1.2',\n 'sphinxcontrib-bibtex',\n 'sphinx-click',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'ipywidgets',\n 'sphinx-issues',\n 'sphinx-copybutton>0.2.9',\n ]\n )\n)\nextras_require['develop'] = sorted(\n set(\n extras_require['docs']\n + extras_require['lint']\n + extras_require['test']\n + ['nbdime', 'bump2version', 'ipython', 'pre-commit', 'check-manifest', 'twine']\n )\n)\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\n\nsetup(\n extras_require=extras_require,\n use_scm_version=lambda: {'local_scheme': lambda version: ''},\n)\n", "path": "setup.py"}]}
1,158
132
gh_patches_debug_38627
rasdani/github-patches
git_diff
sopel-irc__sopel-1439
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Sopel's web lookups should not work with internal addresses like http://192.168.1.1 Example (also reproduced in freenode.net's official sopel bot via /msg command): [01:51pm] <Ant> http://192.168.1.1 01:51PM <URL> [ 401 Unauthorized ] - 192.168.1.1 [01:51pm] <Ant> http://192.168.100.1 01:51PM <URL> [ Touchstone Status ] - 192.168.100.1 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sopel/modules/url.py` Content: ``` 1 # coding=utf-8 2 """ 3 url.py - Sopel URL Title Module 4 Copyright 2010-2011, Michael Yanovich (yanovich.net) & Kenneth Sham 5 Copyright 2012-2013, Elsie Powell 6 Copyright 2013, Lior Ramati <[email protected]> 7 Copyright 2014, Elad Alfassa <[email protected]> 8 Licensed under the Eiffel Forum License 2. 9 10 https://sopel.chat 11 """ 12 from __future__ import unicode_literals, absolute_import, print_function, division 13 14 import re 15 16 import requests 17 18 from sopel import __version__, module, tools, web 19 from sopel.config.types import ListAttribute, StaticSection, ValidatedAttribute 20 21 USER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__) 22 default_headers = {'User-Agent': USER_AGENT} 23 # These are used to clean up the title tag before actually parsing it. Not the 24 # world's best way to do this, but it'll do for now. 25 title_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE) 26 quoted_title = re.compile('[\'"]<title>[\'"]', re.IGNORECASE) 27 # This is another regex that presumably does something important. 28 re_dcc = re.compile(r'(?i)dcc\ssend') 29 # This sets the maximum number of bytes that should be read in order to find 30 # the title. We don't want it too high, or a link to a big file/stream will 31 # just keep downloading until there's no more memory. 640k ought to be enough 32 # for anybody. 33 max_bytes = 655360 34 35 36 class UrlSection(StaticSection): 37 # TODO some validation rules maybe? 38 exclude = ListAttribute('exclude') 39 """A list of regular expressions to match URLs for which the title should not be shown.""" 40 exclusion_char = ValidatedAttribute('exclusion_char', default='!') 41 """A character (or string) which, when immediately preceding a URL, will stop that URL's title from being shown.""" 42 shorten_url_length = ValidatedAttribute( 43 'shorten_url_length', int, default=0) 44 """If greater than 0, the title fetcher will include a TinyURL version of links longer than this many characters.""" 45 46 47 def configure(config): 48 """ 49 | name | example | purpose | 50 | ---- | ------- | ------- | 51 | exclude | https?://git\\\\.io/.* | A list of regular expressions for URLs for which the title should not be shown. | 52 | exclusion\\_char | ! | A character (or string) which, when immediately preceding a URL, will stop the URL's title from being shown. | 53 | shorten\\_url\\_length | 72 | If greater than 0, the title fetcher will include a TinyURL version of links longer than this many characters. | 54 """ 55 config.define_section('url', UrlSection) 56 config.url.configure_setting( 57 'exclude', 58 'Enter regular expressions for each URL you would like to exclude.' 59 ) 60 config.url.configure_setting( 61 'exclusion_char', 62 'Enter a character which can be prefixed to suppress URL titling' 63 ) 64 config.url.configure_setting( 65 'shorten_url_length', 66 'Enter how many characters a URL should be before the bot puts a' 67 ' shorter version of the URL in the title as a TinyURL link' 68 ' (0 to disable)' 69 ) 70 71 72 def setup(bot): 73 bot.config.define_section('url', UrlSection) 74 75 if bot.config.url.exclude: 76 regexes = [re.compile(s) for s in bot.config.url.exclude] 77 else: 78 regexes = [] 79 80 # We're keeping these in their own list, rather than putting then in the 81 # callbacks list because 1, it's easier to deal with modules that are still 82 # using this list, and not the newer callbacks list and 2, having a lambda 83 # just to pass is kinda ugly. 84 if 'url_exclude' not in bot.memory: 85 bot.memory['url_exclude'] = regexes 86 else: 87 exclude = bot.memory['url_exclude'] 88 if regexes: 89 exclude.extend(regexes) 90 bot.memory['url_exclude'] = exclude 91 92 # Ensure last_seen_url is in memory 93 if 'last_seen_url' not in bot.memory: 94 bot.memory['last_seen_url'] = tools.SopelMemory() 95 96 # Initialize shortened_urls as a dict if it doesn't exist. 97 if 'shortened_urls' not in bot.memory: 98 bot.memory['shortened_urls'] = tools.SopelMemory() 99 100 101 @module.commands('title') 102 @module.example( 103 '.title https://www.google.com', 104 '[ Google ] - www.google.com', 105 online=True) 106 def title_command(bot, trigger): 107 """ 108 Show the title or URL information for the given URL, or the last URL seen 109 in this channel. 110 """ 111 if not trigger.group(2): 112 if trigger.sender not in bot.memory['last_seen_url']: 113 return 114 matched = check_callbacks( 115 bot, bot.memory['last_seen_url'][trigger.sender]) 116 if matched: 117 return 118 else: 119 urls = [bot.memory['last_seen_url'][trigger.sender]] 120 else: 121 urls = web.search_urls( 122 trigger, 123 exclusion_char=bot.config.url.exclusion_char) 124 125 for url, title, domain, tinyurl in process_urls(bot, trigger, urls): 126 message = '[ %s ] - %s' % (title, domain) 127 if tinyurl: 128 message += ' ( %s )' % tinyurl 129 bot.reply(message) 130 bot.memory['last_seen_url'][trigger.sender] = url 131 132 133 @module.rule(r'(?u).*(https?://\S+).*') 134 def title_auto(bot, trigger): 135 """ 136 Automatically show titles for URLs. For shortened URLs/redirects, find 137 where the URL redirects to and show the title for that (or call a function 138 from another module to give more information). 139 """ 140 if re.match(bot.config.core.prefix + 'title', trigger): 141 return 142 143 # Avoid fetching known malicious links 144 if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']: 145 if bot.memory['safety_cache'][trigger]['positives'] > 1: 146 return 147 148 urls = web.search_urls( 149 trigger, exclusion_char=bot.config.url.exclusion_char, clean=True) 150 151 for url, title, domain, tinyurl in process_urls(bot, trigger, urls): 152 message = '[ %s ] - %s' % (title, domain) 153 if tinyurl: 154 message += ' ( %s )' % tinyurl 155 # Guard against responding to other instances of this bot. 156 if message != trigger: 157 bot.say(message) 158 bot.memory['last_seen_url'][trigger.sender] = url 159 160 161 def process_urls(bot, trigger, urls): 162 """ 163 For each URL in the list, ensure that it isn't handled by another module. 164 If not, find where it redirects to, if anywhere. If that redirected URL 165 should be handled by another module, dispatch the callback for it. 166 Return a list of (title, hostname) tuples for each URL which is not handled 167 by another module. 168 """ 169 shorten_url_length = bot.config.url.shorten_url_length 170 for url in urls: 171 # Exclude URLs that start with the exclusion char 172 if url.startswith(bot.config.url.exclusion_char): 173 continue 174 175 # Check the URL does not match an existing URL callback 176 if check_callbacks(bot, url): 177 continue 178 179 # Call the URL to get a title, if possible 180 title = find_title(url, verify=bot.config.core.verify_ssl) 181 if not title: 182 # No title found: don't handle this URL 183 continue 184 185 # If the URL is over bot.config.url.shorten_url_length, shorten the URL 186 tinyurl = None 187 if (shorten_url_length > 0) and (len(url) > shorten_url_length): 188 tinyurl = get_or_create_shorturl(bot, url) 189 190 yield (url, title, get_hostname(url), tinyurl) 191 192 193 def check_callbacks(bot, url): 194 """Check if ``url`` is excluded or matches any URL callback patterns. 195 196 :param bot: Sopel instance 197 :param str url: URL to check 198 :return: True if ``url`` is excluded or matches any URL Callback pattern 199 200 This function looks at the ``bot.memory`` for ``url_exclude`` patterns and 201 it returns ``True`` if any matches the given ``url``. Otherwise, it looks 202 at the ``bot``'s URL Callback patterns, and it returns ``True`` if any 203 matches, ``False`` otherwise. 204 205 .. seealso:: 206 207 The :func:`~sopel.modules.url.setup` function that defines the 208 ``url_exclude`` in ``bot.memory``. 209 210 .. versionchanged:: 7.0 211 212 This function **does not** trigger URL callbacks anymore when ``url`` 213 matches a pattern. 214 215 """ 216 # Check if it matches the exclusion list first 217 matched = any(regex.search(url) for regex in bot.memory['url_exclude']) 218 return matched or any(bot.search_url_callbacks(url)) 219 220 221 def find_title(url, verify=True): 222 """Return the title for the given URL.""" 223 try: 224 response = requests.get(url, stream=True, verify=verify, 225 headers=default_headers) 226 content = b'' 227 for byte in response.iter_content(chunk_size=512): 228 content += byte 229 if b'</title>' in content or len(content) > max_bytes: 230 break 231 content = content.decode('utf-8', errors='ignore') 232 # Need to close the connection because we have not read all 233 # the data 234 response.close() 235 except requests.exceptions.ConnectionError: 236 return None 237 238 # Some cleanup that I don't really grok, but was in the original, so 239 # we'll keep it (with the compiled regexes made global) for now. 240 content = title_tag_data.sub(r'<\1title>', content) 241 content = quoted_title.sub('', content) 242 243 start = content.rfind('<title>') 244 end = content.rfind('</title>') 245 if start == -1 or end == -1: 246 return 247 title = web.decode(content[start + 7:end]) 248 title = title.strip()[:200] 249 250 title = ' '.join(title.split()) # cleanly remove multiple spaces 251 252 # More cryptic regex substitutions. This one looks to be myano's invention. 253 title = re_dcc.sub('', title) 254 255 return title or None 256 257 258 def get_hostname(url): 259 idx = 7 260 if url.startswith('https://'): 261 idx = 8 262 elif url.startswith('ftp://'): 263 idx = 6 264 hostname = url[idx:] 265 slash = hostname.find('/') 266 if slash != -1: 267 hostname = hostname[:slash] 268 return hostname 269 270 271 def get_or_create_shorturl(bot, url): 272 """Get or create a short URL for ``url`` 273 274 :param bot: Sopel instance 275 :param str url: URL to get or create a short URL for 276 :return: A short URL 277 :rtype: str 278 279 It gets the short URL for ``url`` from the bot's memory if it exists. 280 Otherwise, it creates a short URL (see :func:`get_tinyurl`), stores it 281 into the bot's memory, then returns it. 282 """ 283 # Check bot memory to see if the shortened URL is already in 284 # memory 285 if url in bot.memory['shortened_urls']: 286 return bot.memory['shortened_urls'][url] 287 288 tinyurl = get_tinyurl(url) 289 bot.memory['shortened_urls'][url] = tinyurl 290 return tinyurl 291 292 293 def get_tinyurl(url): 294 """Returns a shortened tinyURL link of the URL""" 295 base_url = "https://tinyurl.com/api-create.php" 296 tinyurl = "%s?%s" % (base_url, web.urlencode({'url': url})) 297 try: 298 res = requests.get(tinyurl) 299 res.raise_for_status() 300 except requests.exceptions.RequestException: 301 return None 302 # Replace text output with https instead of http to make the 303 # result an HTTPS link. 304 return res.text.replace("http://", "https://") 305 306 307 if __name__ == "__main__": 308 from sopel.test_tools import run_example_tests 309 run_example_tests(__file__) 310 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sopel/modules/url.py b/sopel/modules/url.py --- a/sopel/modules/url.py +++ b/sopel/modules/url.py @@ -13,11 +13,19 @@ import re +import dns.resolver +import ipaddress import requests from sopel import __version__, module, tools, web from sopel.config.types import ListAttribute, StaticSection, ValidatedAttribute +# Python3 vs Python2 +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + USER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__) default_headers = {'User-Agent': USER_AGENT} # These are used to clean up the title tag before actually parsing it. Not the @@ -42,6 +50,10 @@ shorten_url_length = ValidatedAttribute( 'shorten_url_length', int, default=0) """If greater than 0, the title fetcher will include a TinyURL version of links longer than this many characters.""" + enable_private_resolution = ValidatedAttribute( + 'enable_private_resolution', bool, default=False) + enable_dns_resolution = ValidatedAttribute( + 'enable_dns_resolution', bool, default=False) def configure(config): @@ -67,6 +79,14 @@ ' shorter version of the URL in the title as a TinyURL link' ' (0 to disable)' ) + config.url.configure_setting( + 'enable_private_resolution', + 'Enable URL lookups for RFC1918 addresses?' + ) + config.url.configure_setting( + 'enable_dns_resolution', + 'Enable DNS resolution for all domains to validate if there are RFC1918 resolutions?' + ) def setup(bot): @@ -173,6 +193,26 @@ if check_callbacks(bot, url): continue + # Prevent private addresses form being queried if enable_private_resolution is False + if not bot.config.url.enable_private_resolution: + parsed = urlparse(url) + # Check if it's an address like http://192.168.1.1 + try: + if ipaddress.ip_address(parsed.hostname).is_private or ipaddress.ip_address(parsed.hostname).is_loopback: + continue + except ValueError: + pass + + # Check if domains are RFC1918 addresses if enable_dns_resolutions is set + if bot.config.url.enable_dns_resolution: + private = False + for result in dns.resolver.query(parsed.hostname): + if ipaddress.ip_address(result).is_private: + private = True + break + if private: + continue + # Call the URL to get a title, if possible title = find_title(url, verify=bot.config.core.verify_ssl) if not title:
{"golden_diff": "diff --git a/sopel/modules/url.py b/sopel/modules/url.py\n--- a/sopel/modules/url.py\n+++ b/sopel/modules/url.py\n@@ -13,11 +13,19 @@\n \n import re\n \n+import dns.resolver\n+import ipaddress\n import requests\n \n from sopel import __version__, module, tools, web\n from sopel.config.types import ListAttribute, StaticSection, ValidatedAttribute\n \n+# Python3 vs Python2\n+try:\n+ from urllib.parse import urlparse\n+except ImportError:\n+ from urlparse import urlparse\n+\n USER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__)\n default_headers = {'User-Agent': USER_AGENT}\n # These are used to clean up the title tag before actually parsing it. Not the\n@@ -42,6 +50,10 @@\n shorten_url_length = ValidatedAttribute(\n 'shorten_url_length', int, default=0)\n \"\"\"If greater than 0, the title fetcher will include a TinyURL version of links longer than this many characters.\"\"\"\n+ enable_private_resolution = ValidatedAttribute(\n+ 'enable_private_resolution', bool, default=False)\n+ enable_dns_resolution = ValidatedAttribute(\n+ 'enable_dns_resolution', bool, default=False)\n \n \n def configure(config):\n@@ -67,6 +79,14 @@\n ' shorter version of the URL in the title as a TinyURL link'\n ' (0 to disable)'\n )\n+ config.url.configure_setting(\n+ 'enable_private_resolution',\n+ 'Enable URL lookups for RFC1918 addresses?'\n+ )\n+ config.url.configure_setting(\n+ 'enable_dns_resolution',\n+ 'Enable DNS resolution for all domains to validate if there are RFC1918 resolutions?'\n+ )\n \n \n def setup(bot):\n@@ -173,6 +193,26 @@\n if check_callbacks(bot, url):\n continue\n \n+ # Prevent private addresses form being queried if enable_private_resolution is False\n+ if not bot.config.url.enable_private_resolution:\n+ parsed = urlparse(url)\n+ # Check if it's an address like http://192.168.1.1\n+ try:\n+ if ipaddress.ip_address(parsed.hostname).is_private or ipaddress.ip_address(parsed.hostname).is_loopback:\n+ continue\n+ except ValueError:\n+ pass\n+\n+ # Check if domains are RFC1918 addresses if enable_dns_resolutions is set\n+ if bot.config.url.enable_dns_resolution:\n+ private = False\n+ for result in dns.resolver.query(parsed.hostname):\n+ if ipaddress.ip_address(result).is_private:\n+ private = True\n+ break\n+ if private:\n+ continue\n+\n # Call the URL to get a title, if possible\n title = find_title(url, verify=bot.config.core.verify_ssl)\n if not title:\n", "issue": "Sopel's web lookups should not work with internal addresses like http://192.168.1.1\nExample (also reproduced in freenode.net's official sopel bot via /msg command):\r\n[01:51pm] <Ant> http://192.168.1.1\r\n01:51PM <URL> [ 401 Unauthorized ] - 192.168.1.1\r\n[01:51pm] <Ant> http://192.168.100.1\r\n01:51PM <URL> [ Touchstone Status ] - 192.168.100.1\n", "before_files": [{"content": "# coding=utf-8\n\"\"\"\nurl.py - Sopel URL Title Module\nCopyright 2010-2011, Michael Yanovich (yanovich.net) & Kenneth Sham\nCopyright 2012-2013, Elsie Powell\nCopyright 2013, Lior Ramati <[email protected]>\nCopyright 2014, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nimport requests\n\nfrom sopel import __version__, module, tools, web\nfrom sopel.config.types import ListAttribute, StaticSection, ValidatedAttribute\n\nUSER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__)\ndefault_headers = {'User-Agent': USER_AGENT}\n# These are used to clean up the title tag before actually parsing it. Not the\n# world's best way to do this, but it'll do for now.\ntitle_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)\nquoted_title = re.compile('[\\'\"]<title>[\\'\"]', re.IGNORECASE)\n# This is another regex that presumably does something important.\nre_dcc = re.compile(r'(?i)dcc\\ssend')\n# This sets the maximum number of bytes that should be read in order to find\n# the title. We don't want it too high, or a link to a big file/stream will\n# just keep downloading until there's no more memory. 640k ought to be enough\n# for anybody.\nmax_bytes = 655360\n\n\nclass UrlSection(StaticSection):\n # TODO some validation rules maybe?\n exclude = ListAttribute('exclude')\n \"\"\"A list of regular expressions to match URLs for which the title should not be shown.\"\"\"\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n \"\"\"A character (or string) which, when immediately preceding a URL, will stop that URL's title from being shown.\"\"\"\n shorten_url_length = ValidatedAttribute(\n 'shorten_url_length', int, default=0)\n \"\"\"If greater than 0, the title fetcher will include a TinyURL version of links longer than this many characters.\"\"\"\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | exclude | https?://git\\\\\\\\.io/.* | A list of regular expressions for URLs for which the title should not be shown. |\n | exclusion\\\\_char | ! | A character (or string) which, when immediately preceding a URL, will stop the URL's title from being shown. |\n | shorten\\\\_url\\\\_length | 72 | If greater than 0, the title fetcher will include a TinyURL version of links longer than this many characters. |\n \"\"\"\n config.define_section('url', UrlSection)\n config.url.configure_setting(\n 'exclude',\n 'Enter regular expressions for each URL you would like to exclude.'\n )\n config.url.configure_setting(\n 'exclusion_char',\n 'Enter a character which can be prefixed to suppress URL titling'\n )\n config.url.configure_setting(\n 'shorten_url_length',\n 'Enter how many characters a URL should be before the bot puts a'\n ' shorter version of the URL in the title as a TinyURL link'\n ' (0 to disable)'\n )\n\n\ndef setup(bot):\n bot.config.define_section('url', UrlSection)\n\n if bot.config.url.exclude:\n regexes = [re.compile(s) for s in bot.config.url.exclude]\n else:\n regexes = []\n\n # We're keeping these in their own list, rather than putting then in the\n # callbacks list because 1, it's easier to deal with modules that are still\n # using this list, and not the newer callbacks list and 2, having a lambda\n # just to pass is kinda ugly.\n if 'url_exclude' not in bot.memory:\n bot.memory['url_exclude'] = regexes\n else:\n exclude = bot.memory['url_exclude']\n if regexes:\n exclude.extend(regexes)\n bot.memory['url_exclude'] = exclude\n\n # Ensure last_seen_url is in memory\n if 'last_seen_url' not in bot.memory:\n bot.memory['last_seen_url'] = tools.SopelMemory()\n\n # Initialize shortened_urls as a dict if it doesn't exist.\n if 'shortened_urls' not in bot.memory:\n bot.memory['shortened_urls'] = tools.SopelMemory()\n\n\[email protected]('title')\[email protected](\n '.title https://www.google.com',\n '[ Google ] - www.google.com',\n online=True)\ndef title_command(bot, trigger):\n \"\"\"\n Show the title or URL information for the given URL, or the last URL seen\n in this channel.\n \"\"\"\n if not trigger.group(2):\n if trigger.sender not in bot.memory['last_seen_url']:\n return\n matched = check_callbacks(\n bot, bot.memory['last_seen_url'][trigger.sender])\n if matched:\n return\n else:\n urls = [bot.memory['last_seen_url'][trigger.sender]]\n else:\n urls = web.search_urls(\n trigger,\n exclusion_char=bot.config.url.exclusion_char)\n\n for url, title, domain, tinyurl in process_urls(bot, trigger, urls):\n message = '[ %s ] - %s' % (title, domain)\n if tinyurl:\n message += ' ( %s )' % tinyurl\n bot.reply(message)\n bot.memory['last_seen_url'][trigger.sender] = url\n\n\[email protected](r'(?u).*(https?://\\S+).*')\ndef title_auto(bot, trigger):\n \"\"\"\n Automatically show titles for URLs. For shortened URLs/redirects, find\n where the URL redirects to and show the title for that (or call a function\n from another module to give more information).\n \"\"\"\n if re.match(bot.config.core.prefix + 'title', trigger):\n return\n\n # Avoid fetching known malicious links\n if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:\n if bot.memory['safety_cache'][trigger]['positives'] > 1:\n return\n\n urls = web.search_urls(\n trigger, exclusion_char=bot.config.url.exclusion_char, clean=True)\n\n for url, title, domain, tinyurl in process_urls(bot, trigger, urls):\n message = '[ %s ] - %s' % (title, domain)\n if tinyurl:\n message += ' ( %s )' % tinyurl\n # Guard against responding to other instances of this bot.\n if message != trigger:\n bot.say(message)\n bot.memory['last_seen_url'][trigger.sender] = url\n\n\ndef process_urls(bot, trigger, urls):\n \"\"\"\n For each URL in the list, ensure that it isn't handled by another module.\n If not, find where it redirects to, if anywhere. If that redirected URL\n should be handled by another module, dispatch the callback for it.\n Return a list of (title, hostname) tuples for each URL which is not handled\n by another module.\n \"\"\"\n shorten_url_length = bot.config.url.shorten_url_length\n for url in urls:\n # Exclude URLs that start with the exclusion char\n if url.startswith(bot.config.url.exclusion_char):\n continue\n\n # Check the URL does not match an existing URL callback\n if check_callbacks(bot, url):\n continue\n\n # Call the URL to get a title, if possible\n title = find_title(url, verify=bot.config.core.verify_ssl)\n if not title:\n # No title found: don't handle this URL\n continue\n\n # If the URL is over bot.config.url.shorten_url_length, shorten the URL\n tinyurl = None\n if (shorten_url_length > 0) and (len(url) > shorten_url_length):\n tinyurl = get_or_create_shorturl(bot, url)\n\n yield (url, title, get_hostname(url), tinyurl)\n\n\ndef check_callbacks(bot, url):\n \"\"\"Check if ``url`` is excluded or matches any URL callback patterns.\n\n :param bot: Sopel instance\n :param str url: URL to check\n :return: True if ``url`` is excluded or matches any URL Callback pattern\n\n This function looks at the ``bot.memory`` for ``url_exclude`` patterns and\n it returns ``True`` if any matches the given ``url``. Otherwise, it looks\n at the ``bot``'s URL Callback patterns, and it returns ``True`` if any\n matches, ``False`` otherwise.\n\n .. seealso::\n\n The :func:`~sopel.modules.url.setup` function that defines the\n ``url_exclude`` in ``bot.memory``.\n\n .. versionchanged:: 7.0\n\n This function **does not** trigger URL callbacks anymore when ``url``\n matches a pattern.\n\n \"\"\"\n # Check if it matches the exclusion list first\n matched = any(regex.search(url) for regex in bot.memory['url_exclude'])\n return matched or any(bot.search_url_callbacks(url))\n\n\ndef find_title(url, verify=True):\n \"\"\"Return the title for the given URL.\"\"\"\n try:\n response = requests.get(url, stream=True, verify=verify,\n headers=default_headers)\n content = b''\n for byte in response.iter_content(chunk_size=512):\n content += byte\n if b'</title>' in content or len(content) > max_bytes:\n break\n content = content.decode('utf-8', errors='ignore')\n # Need to close the connection because we have not read all\n # the data\n response.close()\n except requests.exceptions.ConnectionError:\n return None\n\n # Some cleanup that I don't really grok, but was in the original, so\n # we'll keep it (with the compiled regexes made global) for now.\n content = title_tag_data.sub(r'<\\1title>', content)\n content = quoted_title.sub('', content)\n\n start = content.rfind('<title>')\n end = content.rfind('</title>')\n if start == -1 or end == -1:\n return\n title = web.decode(content[start + 7:end])\n title = title.strip()[:200]\n\n title = ' '.join(title.split()) # cleanly remove multiple spaces\n\n # More cryptic regex substitutions. This one looks to be myano's invention.\n title = re_dcc.sub('', title)\n\n return title or None\n\n\ndef get_hostname(url):\n idx = 7\n if url.startswith('https://'):\n idx = 8\n elif url.startswith('ftp://'):\n idx = 6\n hostname = url[idx:]\n slash = hostname.find('/')\n if slash != -1:\n hostname = hostname[:slash]\n return hostname\n\n\ndef get_or_create_shorturl(bot, url):\n \"\"\"Get or create a short URL for ``url``\n\n :param bot: Sopel instance\n :param str url: URL to get or create a short URL for\n :return: A short URL\n :rtype: str\n\n It gets the short URL for ``url`` from the bot's memory if it exists.\n Otherwise, it creates a short URL (see :func:`get_tinyurl`), stores it\n into the bot's memory, then returns it.\n \"\"\"\n # Check bot memory to see if the shortened URL is already in\n # memory\n if url in bot.memory['shortened_urls']:\n return bot.memory['shortened_urls'][url]\n\n tinyurl = get_tinyurl(url)\n bot.memory['shortened_urls'][url] = tinyurl\n return tinyurl\n\n\ndef get_tinyurl(url):\n \"\"\"Returns a shortened tinyURL link of the URL\"\"\"\n base_url = \"https://tinyurl.com/api-create.php\"\n tinyurl = \"%s?%s\" % (base_url, web.urlencode({'url': url}))\n try:\n res = requests.get(tinyurl)\n res.raise_for_status()\n except requests.exceptions.RequestException:\n return None\n # Replace text output with https instead of http to make the\n # result an HTTPS link.\n return res.text.replace(\"http://\", \"https://\")\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/url.py"}], "after_files": [{"content": "# coding=utf-8\n\"\"\"\nurl.py - Sopel URL Title Module\nCopyright 2010-2011, Michael Yanovich (yanovich.net) & Kenneth Sham\nCopyright 2012-2013, Elsie Powell\nCopyright 2013, Lior Ramati <[email protected]>\nCopyright 2014, Elad Alfassa <[email protected]>\nLicensed under the Eiffel Forum License 2.\n\nhttps://sopel.chat\n\"\"\"\nfrom __future__ import unicode_literals, absolute_import, print_function, division\n\nimport re\n\nimport dns.resolver\nimport ipaddress\nimport requests\n\nfrom sopel import __version__, module, tools, web\nfrom sopel.config.types import ListAttribute, StaticSection, ValidatedAttribute\n\n# Python3 vs Python2\ntry:\n from urllib.parse import urlparse\nexcept ImportError:\n from urlparse import urlparse\n\nUSER_AGENT = 'Sopel/{} (https://sopel.chat)'.format(__version__)\ndefault_headers = {'User-Agent': USER_AGENT}\n# These are used to clean up the title tag before actually parsing it. Not the\n# world's best way to do this, but it'll do for now.\ntitle_tag_data = re.compile('<(/?)title( [^>]+)?>', re.IGNORECASE)\nquoted_title = re.compile('[\\'\"]<title>[\\'\"]', re.IGNORECASE)\n# This is another regex that presumably does something important.\nre_dcc = re.compile(r'(?i)dcc\\ssend')\n# This sets the maximum number of bytes that should be read in order to find\n# the title. We don't want it too high, or a link to a big file/stream will\n# just keep downloading until there's no more memory. 640k ought to be enough\n# for anybody.\nmax_bytes = 655360\n\n\nclass UrlSection(StaticSection):\n # TODO some validation rules maybe?\n exclude = ListAttribute('exclude')\n \"\"\"A list of regular expressions to match URLs for which the title should not be shown.\"\"\"\n exclusion_char = ValidatedAttribute('exclusion_char', default='!')\n \"\"\"A character (or string) which, when immediately preceding a URL, will stop that URL's title from being shown.\"\"\"\n shorten_url_length = ValidatedAttribute(\n 'shorten_url_length', int, default=0)\n \"\"\"If greater than 0, the title fetcher will include a TinyURL version of links longer than this many characters.\"\"\"\n enable_private_resolution = ValidatedAttribute(\n 'enable_private_resolution', bool, default=False)\n enable_dns_resolution = ValidatedAttribute(\n 'enable_dns_resolution', bool, default=False)\n\n\ndef configure(config):\n \"\"\"\n | name | example | purpose |\n | ---- | ------- | ------- |\n | exclude | https?://git\\\\\\\\.io/.* | A list of regular expressions for URLs for which the title should not be shown. |\n | exclusion\\\\_char | ! | A character (or string) which, when immediately preceding a URL, will stop the URL's title from being shown. |\n | shorten\\\\_url\\\\_length | 72 | If greater than 0, the title fetcher will include a TinyURL version of links longer than this many characters. |\n \"\"\"\n config.define_section('url', UrlSection)\n config.url.configure_setting(\n 'exclude',\n 'Enter regular expressions for each URL you would like to exclude.'\n )\n config.url.configure_setting(\n 'exclusion_char',\n 'Enter a character which can be prefixed to suppress URL titling'\n )\n config.url.configure_setting(\n 'shorten_url_length',\n 'Enter how many characters a URL should be before the bot puts a'\n ' shorter version of the URL in the title as a TinyURL link'\n ' (0 to disable)'\n )\n config.url.configure_setting(\n 'enable_private_resolution',\n 'Enable URL lookups for RFC1918 addresses?'\n )\n config.url.configure_setting(\n 'enable_dns_resolution',\n 'Enable DNS resolution for all domains to validate if there are RFC1918 resolutions?'\n )\n\n\ndef setup(bot):\n bot.config.define_section('url', UrlSection)\n\n if bot.config.url.exclude:\n regexes = [re.compile(s) for s in bot.config.url.exclude]\n else:\n regexes = []\n\n # We're keeping these in their own list, rather than putting then in the\n # callbacks list because 1, it's easier to deal with modules that are still\n # using this list, and not the newer callbacks list and 2, having a lambda\n # just to pass is kinda ugly.\n if 'url_exclude' not in bot.memory:\n bot.memory['url_exclude'] = regexes\n else:\n exclude = bot.memory['url_exclude']\n if regexes:\n exclude.extend(regexes)\n bot.memory['url_exclude'] = exclude\n\n # Ensure last_seen_url is in memory\n if 'last_seen_url' not in bot.memory:\n bot.memory['last_seen_url'] = tools.SopelMemory()\n\n # Initialize shortened_urls as a dict if it doesn't exist.\n if 'shortened_urls' not in bot.memory:\n bot.memory['shortened_urls'] = tools.SopelMemory()\n\n\[email protected]('title')\[email protected]('.title https://www.google.com', '[ Google ] - www.google.com')\ndef title_command(bot, trigger):\n \"\"\"\n Show the title or URL information for the given URL, or the last URL seen\n in this channel.\n \"\"\"\n if not trigger.group(2):\n if trigger.sender not in bot.memory['last_seen_url']:\n return\n matched = check_callbacks(\n bot, bot.memory['last_seen_url'][trigger.sender])\n if matched:\n return\n else:\n urls = [bot.memory['last_seen_url'][trigger.sender]]\n else:\n urls = web.search_urls(\n trigger,\n exclusion_char=bot.config.url.exclusion_char)\n\n for url, title, domain, tinyurl in process_urls(bot, trigger, urls):\n message = '[ %s ] - %s' % (title, domain)\n if tinyurl:\n message += ' ( %s )' % tinyurl\n bot.reply(message)\n bot.memory['last_seen_url'][trigger.sender] = url\n\n\[email protected](r'(?u).*(https?://\\S+).*')\ndef title_auto(bot, trigger):\n \"\"\"\n Automatically show titles for URLs. For shortened URLs/redirects, find\n where the URL redirects to and show the title for that (or call a function\n from another module to give more information).\n \"\"\"\n if re.match(bot.config.core.prefix + 'title', trigger):\n return\n\n # Avoid fetching known malicious links\n if 'safety_cache' in bot.memory and trigger in bot.memory['safety_cache']:\n if bot.memory['safety_cache'][trigger]['positives'] > 1:\n return\n\n urls = web.search_urls(\n trigger, exclusion_char=bot.config.url.exclusion_char, clean=True)\n\n for url, title, domain, tinyurl in process_urls(bot, trigger, urls):\n message = '[ %s ] - %s' % (title, domain)\n if tinyurl:\n message += ' ( %s )' % tinyurl\n # Guard against responding to other instances of this bot.\n if message != trigger:\n bot.say(message)\n bot.memory['last_seen_url'][trigger.sender] = url\n\n\ndef process_urls(bot, trigger, urls):\n \"\"\"\n For each URL in the list, ensure that it isn't handled by another module.\n If not, find where it redirects to, if anywhere. If that redirected URL\n should be handled by another module, dispatch the callback for it.\n Return a list of (title, hostname) tuples for each URL which is not handled\n by another module.\n \"\"\"\n shorten_url_length = bot.config.url.shorten_url_length\n for url in urls:\n # Exclude URLs that start with the exclusion char\n if url.startswith(bot.config.url.exclusion_char):\n continue\n\n # Check the URL does not match an existing URL callback\n if check_callbacks(bot, url):\n continue\n\n # Prevent private addresses form being queried if enable_private_resolution is False\n if not bot.config.url.enable_private_resolution:\n parsed = urlparse(url)\n # Check if it's an address like http://192.168.1.1\n try:\n if ipaddress.ip_address(parsed.hostname).is_private or ipaddress.ip_address(parsed.hostname).is_loopback:\n continue\n except ValueError:\n pass\n\n # Check if domains are RFC1918 addresses if enable_dns_resolutions is set\n if bot.config.url.enable_dns_resolution:\n private = False\n for result in dns.resolver.query(parsed.hostname):\n if ipaddress.ip_address(result).is_private:\n private = True\n break\n if private:\n continue\n\n # Call the URL to get a title, if possible\n title = find_title(url, verify=bot.config.core.verify_ssl)\n if not title:\n # No title found: don't handle this URL\n continue\n\n # If the URL is over bot.config.url.shorten_url_length, shorten the URL\n tinyurl = None\n if (shorten_url_length > 0) and (len(url) > shorten_url_length):\n tinyurl = get_or_create_shorturl(bot, url)\n\n yield (url, title, get_hostname(url), tinyurl)\n\n\ndef check_callbacks(bot, url):\n \"\"\"Check if ``url`` is excluded or matches any URL callback patterns.\n\n :param bot: Sopel instance\n :param str url: URL to check\n :return: True if ``url`` is excluded or matches any URL Callback pattern\n\n This function looks at the ``bot.memory`` for ``url_exclude`` patterns and\n it returns ``True`` if any matches the given ``url``. Otherwise, it looks\n at the ``bot``'s URL Callback patterns, and it returns ``True`` if any\n matches, ``False`` otherwise.\n\n .. seealso::\n\n The :func:`~sopel.modules.url.setup` function that defines the\n ``url_exclude`` in ``bot.memory``.\n\n .. versionchanged:: 7.0\n\n This function **does not** trigger URL callbacks anymore when ``url``\n matches a pattern.\n\n \"\"\"\n # Check if it matches the exclusion list first\n matched = any(regex.search(url) for regex in bot.memory['url_exclude'])\n return matched or any(bot.search_url_callbacks(url))\n\n\ndef find_title(url, verify=True):\n \"\"\"Return the title for the given URL.\"\"\"\n try:\n response = requests.get(url, stream=True, verify=verify,\n headers=default_headers)\n content = b''\n for byte in response.iter_content(chunk_size=512):\n content += byte\n if b'</title>' in content or len(content) > max_bytes:\n break\n content = content.decode('utf-8', errors='ignore')\n # Need to close the connection because we have not read all\n # the data\n response.close()\n except requests.exceptions.ConnectionError:\n return None\n\n # Some cleanup that I don't really grok, but was in the original, so\n # we'll keep it (with the compiled regexes made global) for now.\n content = title_tag_data.sub(r'<\\1title>', content)\n content = quoted_title.sub('', content)\n\n start = content.rfind('<title>')\n end = content.rfind('</title>')\n if start == -1 or end == -1:\n return\n title = web.decode(content[start + 7:end])\n title = title.strip()[:200]\n\n title = ' '.join(title.split()) # cleanly remove multiple spaces\n\n # More cryptic regex substitutions. This one looks to be myano's invention.\n title = re_dcc.sub('', title)\n\n return title or None\n\n\ndef get_hostname(url):\n idx = 7\n if url.startswith('https://'):\n idx = 8\n elif url.startswith('ftp://'):\n idx = 6\n hostname = url[idx:]\n slash = hostname.find('/')\n if slash != -1:\n hostname = hostname[:slash]\n return hostname\n\n\ndef get_or_create_shorturl(bot, url):\n \"\"\"Get or create a short URL for ``url``\n\n :param bot: Sopel instance\n :param str url: URL to get or create a short URL for\n :return: A short URL\n :rtype: str\n\n It gets the short URL for ``url`` from the bot's memory if it exists.\n Otherwise, it creates a short URL (see :func:`get_tinyurl`), stores it\n into the bot's memory, then returns it.\n \"\"\"\n # Check bot memory to see if the shortened URL is already in\n # memory\n if url in bot.memory['shortened_urls']:\n return bot.memory['shortened_urls'][url]\n\n tinyurl = get_tinyurl(url)\n bot.memory['shortened_urls'][url] = tinyurl\n return tinyurl\n\n\ndef get_tinyurl(url):\n \"\"\"Returns a shortened tinyURL link of the URL\"\"\"\n base_url = \"https://tinyurl.com/api-create.php\"\n tinyurl = \"%s?%s\" % (base_url, web.urlencode({'url': url}))\n try:\n res = requests.get(tinyurl)\n res.raise_for_status()\n except requests.exceptions.RequestException:\n return None\n # Replace text output with https instead of http to make the\n # result an HTTPS link.\n return res.text.replace(\"http://\", \"https://\")\n\n\nif __name__ == \"__main__\":\n from sopel.test_tools import run_example_tests\n run_example_tests(__file__)\n", "path": "sopel/modules/url.py"}]}
3,986
642
gh_patches_debug_14290
rasdani/github-patches
git_diff
open-telemetry__opentelemetry-python-2759
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add min/max fields to Histogram in otlp metrics exporter From proto https://github.com/open-telemetry/opentelemetry-proto/pull/279 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py` Content: ``` 1 # Copyright The OpenTelemetry Authors 2 # Licensed under the Apache License, Version 2.0 (the "License"); 3 # you may not use this file except in compliance with the License. 4 # You may obtain a copy of the License at 5 # 6 # http://www.apache.org/licenses/LICENSE-2.0 7 # 8 # Unless required by applicable law or agreed to in writing, software 9 # distributed under the License is distributed on an "AS IS" BASIS, 10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 # See the License for the specific language governing permissions and 12 # limitations under the License. 13 14 from logging import getLogger 15 from os import environ 16 from typing import Optional, Sequence 17 from grpc import ChannelCredentials, Compression 18 from opentelemetry.exporter.otlp.proto.grpc.exporter import ( 19 OTLPExporterMixin, 20 get_resource_data, 21 ) 22 from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import ( 23 ExportMetricsServiceRequest, 24 ) 25 from opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import ( 26 MetricsServiceStub, 27 ) 28 from opentelemetry.proto.common.v1.common_pb2 import InstrumentationScope 29 from opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2 30 from opentelemetry.sdk.environment_variables import ( 31 OTEL_EXPORTER_OTLP_METRICS_INSECURE, 32 ) 33 from opentelemetry.sdk.metrics.export import ( 34 Gauge, 35 Histogram, 36 Metric, 37 Sum, 38 ) 39 40 from opentelemetry.sdk.metrics.export import ( 41 MetricExporter, 42 MetricExportResult, 43 MetricsData, 44 ) 45 46 _logger = getLogger(__name__) 47 48 49 class OTLPMetricExporter( 50 MetricExporter, 51 OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult], 52 ): 53 _result = MetricExportResult 54 _stub = MetricsServiceStub 55 56 def __init__( 57 self, 58 endpoint: Optional[str] = None, 59 insecure: Optional[bool] = None, 60 credentials: Optional[ChannelCredentials] = None, 61 headers: Optional[Sequence] = None, 62 timeout: Optional[int] = None, 63 compression: Optional[Compression] = None, 64 ): 65 66 if insecure is None: 67 insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE) 68 if insecure is not None: 69 insecure = insecure.lower() == "true" 70 71 super().__init__( 72 **{ 73 "endpoint": endpoint, 74 "insecure": insecure, 75 "credentials": credentials, 76 "headers": headers, 77 "timeout": timeout, 78 "compression": compression, 79 } 80 ) 81 82 def _translate_data( 83 self, data: MetricsData 84 ) -> ExportMetricsServiceRequest: 85 86 resource_metrics_dict = {} 87 88 for resource_metrics in data.resource_metrics: 89 90 resource = resource_metrics.resource 91 92 # It is safe to assume that each entry in data.resource_metrics is 93 # associated with an unique resource. 94 scope_metrics_dict = {} 95 96 resource_metrics_dict[resource] = scope_metrics_dict 97 98 for scope_metrics in resource_metrics.scope_metrics: 99 100 instrumentation_scope = scope_metrics.scope 101 102 # The SDK groups metrics in instrumentation scopes already so 103 # there is no need to check for existing instrumentation scopes 104 # here. 105 pb2_scope_metrics = pb2.ScopeMetrics( 106 scope=InstrumentationScope( 107 name=instrumentation_scope.name, 108 version=instrumentation_scope.version, 109 ) 110 ) 111 112 scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics 113 114 for metric in scope_metrics.metrics: 115 pb2_metric = pb2.Metric( 116 name=metric.name, 117 description=metric.description, 118 unit=metric.unit, 119 ) 120 121 if isinstance(metric.data, Gauge): 122 for data_point in metric.data.data_points: 123 pt = pb2.NumberDataPoint( 124 attributes=self._translate_attributes( 125 data_point.attributes 126 ), 127 time_unix_nano=data_point.time_unix_nano, 128 ) 129 if isinstance(data_point.value, int): 130 pt.as_int = data_point.value 131 else: 132 pt.as_double = data_point.value 133 pb2_metric.gauge.data_points.append(pt) 134 135 elif isinstance(metric.data, Histogram): 136 for data_point in metric.data.data_points: 137 pt = pb2.HistogramDataPoint( 138 attributes=self._translate_attributes( 139 data_point.attributes 140 ), 141 time_unix_nano=data_point.time_unix_nano, 142 start_time_unix_nano=( 143 data_point.start_time_unix_nano 144 ), 145 count=data_point.count, 146 sum=data_point.sum, 147 bucket_counts=data_point.bucket_counts, 148 explicit_bounds=data_point.explicit_bounds, 149 ) 150 pb2_metric.histogram.aggregation_temporality = ( 151 metric.data.aggregation_temporality 152 ) 153 pb2_metric.histogram.data_points.append(pt) 154 155 elif isinstance(metric.data, Sum): 156 for data_point in metric.data.data_points: 157 pt = pb2.NumberDataPoint( 158 attributes=self._translate_attributes( 159 data_point.attributes 160 ), 161 start_time_unix_nano=( 162 data_point.start_time_unix_nano 163 ), 164 time_unix_nano=data_point.time_unix_nano, 165 ) 166 if isinstance(data_point.value, int): 167 pt.as_int = data_point.value 168 else: 169 pt.as_double = data_point.value 170 # note that because sum is a message type, the 171 # fields must be set individually rather than 172 # instantiating a pb2.Sum and setting it once 173 pb2_metric.sum.aggregation_temporality = ( 174 metric.data.aggregation_temporality 175 ) 176 pb2_metric.sum.is_monotonic = ( 177 metric.data.is_monotonic 178 ) 179 pb2_metric.sum.data_points.append(pt) 180 else: 181 _logger.warn( 182 "unsupported datapoint type %s", metric.point 183 ) 184 continue 185 186 pb2_scope_metrics.metrics.append(pb2_metric) 187 188 return ExportMetricsServiceRequest( 189 resource_metrics=get_resource_data( 190 resource_metrics_dict, 191 pb2.ResourceMetrics, 192 "metrics", 193 ) 194 ) 195 196 def export( 197 self, 198 metrics_data: MetricsData, 199 timeout_millis: float = 10_000, 200 **kwargs, 201 ) -> MetricExportResult: 202 # TODO(#2663): OTLPExporterMixin should pass timeout to gRPC 203 return self._export(metrics_data) 204 205 def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None: 206 pass 207 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py --- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py +++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py @@ -146,6 +146,8 @@ sum=data_point.sum, bucket_counts=data_point.bucket_counts, explicit_bounds=data_point.explicit_bounds, + max=data_point.max, + min=data_point.min, ) pb2_metric.histogram.aggregation_temporality = ( metric.data.aggregation_temporality
{"golden_diff": "diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py\n--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py\n+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py\n@@ -146,6 +146,8 @@\n sum=data_point.sum,\n bucket_counts=data_point.bucket_counts,\n explicit_bounds=data_point.explicit_bounds,\n+ max=data_point.max,\n+ min=data_point.min,\n )\n pb2_metric.histogram.aggregation_temporality = (\n metric.data.aggregation_temporality\n", "issue": "Add min/max fields to Histogram in otlp metrics exporter\nFrom proto https://github.com/open-telemetry/opentelemetry-proto/pull/279\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Optional, Sequence\nfrom grpc import ChannelCredentials, Compression\nfrom opentelemetry.exporter.otlp.proto.grpc.exporter import (\n OTLPExporterMixin,\n get_resource_data,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (\n ExportMetricsServiceRequest,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (\n MetricsServiceStub,\n)\nfrom opentelemetry.proto.common.v1.common_pb2 import InstrumentationScope\nfrom opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_METRICS_INSECURE,\n)\nfrom opentelemetry.sdk.metrics.export import (\n Gauge,\n Histogram,\n Metric,\n Sum,\n)\n\nfrom opentelemetry.sdk.metrics.export import (\n MetricExporter,\n MetricExportResult,\n MetricsData,\n)\n\n_logger = getLogger(__name__)\n\n\nclass OTLPMetricExporter(\n MetricExporter,\n OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult],\n):\n _result = MetricExportResult\n _stub = MetricsServiceStub\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n insecure: Optional[bool] = None,\n credentials: Optional[ChannelCredentials] = None,\n headers: Optional[Sequence] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n\n if insecure is None:\n insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE)\n if insecure is not None:\n insecure = insecure.lower() == \"true\"\n\n super().__init__(\n **{\n \"endpoint\": endpoint,\n \"insecure\": insecure,\n \"credentials\": credentials,\n \"headers\": headers,\n \"timeout\": timeout,\n \"compression\": compression,\n }\n )\n\n def _translate_data(\n self, data: MetricsData\n ) -> ExportMetricsServiceRequest:\n\n resource_metrics_dict = {}\n\n for resource_metrics in data.resource_metrics:\n\n resource = resource_metrics.resource\n\n # It is safe to assume that each entry in data.resource_metrics is\n # associated with an unique resource.\n scope_metrics_dict = {}\n\n resource_metrics_dict[resource] = scope_metrics_dict\n\n for scope_metrics in resource_metrics.scope_metrics:\n\n instrumentation_scope = scope_metrics.scope\n\n # The SDK groups metrics in instrumentation scopes already so\n # there is no need to check for existing instrumentation scopes\n # here.\n pb2_scope_metrics = pb2.ScopeMetrics(\n scope=InstrumentationScope(\n name=instrumentation_scope.name,\n version=instrumentation_scope.version,\n )\n )\n\n scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics\n\n for metric in scope_metrics.metrics:\n pb2_metric = pb2.Metric(\n name=metric.name,\n description=metric.description,\n unit=metric.unit,\n )\n\n if isinstance(metric.data, Gauge):\n for data_point in metric.data.data_points:\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n time_unix_nano=data_point.time_unix_nano,\n )\n if isinstance(data_point.value, int):\n pt.as_int = data_point.value\n else:\n pt.as_double = data_point.value\n pb2_metric.gauge.data_points.append(pt)\n\n elif isinstance(metric.data, Histogram):\n for data_point in metric.data.data_points:\n pt = pb2.HistogramDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n time_unix_nano=data_point.time_unix_nano,\n start_time_unix_nano=(\n data_point.start_time_unix_nano\n ),\n count=data_point.count,\n sum=data_point.sum,\n bucket_counts=data_point.bucket_counts,\n explicit_bounds=data_point.explicit_bounds,\n )\n pb2_metric.histogram.aggregation_temporality = (\n metric.data.aggregation_temporality\n )\n pb2_metric.histogram.data_points.append(pt)\n\n elif isinstance(metric.data, Sum):\n for data_point in metric.data.data_points:\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n start_time_unix_nano=(\n data_point.start_time_unix_nano\n ),\n time_unix_nano=data_point.time_unix_nano,\n )\n if isinstance(data_point.value, int):\n pt.as_int = data_point.value\n else:\n pt.as_double = data_point.value\n # note that because sum is a message type, the\n # fields must be set individually rather than\n # instantiating a pb2.Sum and setting it once\n pb2_metric.sum.aggregation_temporality = (\n metric.data.aggregation_temporality\n )\n pb2_metric.sum.is_monotonic = (\n metric.data.is_monotonic\n )\n pb2_metric.sum.data_points.append(pt)\n else:\n _logger.warn(\n \"unsupported datapoint type %s\", metric.point\n )\n continue\n\n pb2_scope_metrics.metrics.append(pb2_metric)\n\n return ExportMetricsServiceRequest(\n resource_metrics=get_resource_data(\n resource_metrics_dict,\n pb2.ResourceMetrics,\n \"metrics\",\n )\n )\n\n def export(\n self,\n metrics_data: MetricsData,\n timeout_millis: float = 10_000,\n **kwargs,\n ) -> MetricExportResult:\n # TODO(#2663): OTLPExporterMixin should pass timeout to gRPC\n return self._export(metrics_data)\n\n def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:\n pass\n", "path": "exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom logging import getLogger\nfrom os import environ\nfrom typing import Optional, Sequence\nfrom grpc import ChannelCredentials, Compression\nfrom opentelemetry.exporter.otlp.proto.grpc.exporter import (\n OTLPExporterMixin,\n get_resource_data,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2 import (\n ExportMetricsServiceRequest,\n)\nfrom opentelemetry.proto.collector.metrics.v1.metrics_service_pb2_grpc import (\n MetricsServiceStub,\n)\nfrom opentelemetry.proto.common.v1.common_pb2 import InstrumentationScope\nfrom opentelemetry.proto.metrics.v1 import metrics_pb2 as pb2\nfrom opentelemetry.sdk.environment_variables import (\n OTEL_EXPORTER_OTLP_METRICS_INSECURE,\n)\nfrom opentelemetry.sdk.metrics.export import (\n Gauge,\n Histogram,\n Metric,\n Sum,\n)\n\nfrom opentelemetry.sdk.metrics.export import (\n MetricExporter,\n MetricExportResult,\n MetricsData,\n)\n\n_logger = getLogger(__name__)\n\n\nclass OTLPMetricExporter(\n MetricExporter,\n OTLPExporterMixin[Metric, ExportMetricsServiceRequest, MetricExportResult],\n):\n _result = MetricExportResult\n _stub = MetricsServiceStub\n\n def __init__(\n self,\n endpoint: Optional[str] = None,\n insecure: Optional[bool] = None,\n credentials: Optional[ChannelCredentials] = None,\n headers: Optional[Sequence] = None,\n timeout: Optional[int] = None,\n compression: Optional[Compression] = None,\n ):\n\n if insecure is None:\n insecure = environ.get(OTEL_EXPORTER_OTLP_METRICS_INSECURE)\n if insecure is not None:\n insecure = insecure.lower() == \"true\"\n\n super().__init__(\n **{\n \"endpoint\": endpoint,\n \"insecure\": insecure,\n \"credentials\": credentials,\n \"headers\": headers,\n \"timeout\": timeout,\n \"compression\": compression,\n }\n )\n\n def _translate_data(\n self, data: MetricsData\n ) -> ExportMetricsServiceRequest:\n\n resource_metrics_dict = {}\n\n for resource_metrics in data.resource_metrics:\n\n resource = resource_metrics.resource\n\n # It is safe to assume that each entry in data.resource_metrics is\n # associated with an unique resource.\n scope_metrics_dict = {}\n\n resource_metrics_dict[resource] = scope_metrics_dict\n\n for scope_metrics in resource_metrics.scope_metrics:\n\n instrumentation_scope = scope_metrics.scope\n\n # The SDK groups metrics in instrumentation scopes already so\n # there is no need to check for existing instrumentation scopes\n # here.\n pb2_scope_metrics = pb2.ScopeMetrics(\n scope=InstrumentationScope(\n name=instrumentation_scope.name,\n version=instrumentation_scope.version,\n )\n )\n\n scope_metrics_dict[instrumentation_scope] = pb2_scope_metrics\n\n for metric in scope_metrics.metrics:\n pb2_metric = pb2.Metric(\n name=metric.name,\n description=metric.description,\n unit=metric.unit,\n )\n\n if isinstance(metric.data, Gauge):\n for data_point in metric.data.data_points:\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n time_unix_nano=data_point.time_unix_nano,\n )\n if isinstance(data_point.value, int):\n pt.as_int = data_point.value\n else:\n pt.as_double = data_point.value\n pb2_metric.gauge.data_points.append(pt)\n\n elif isinstance(metric.data, Histogram):\n for data_point in metric.data.data_points:\n pt = pb2.HistogramDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n time_unix_nano=data_point.time_unix_nano,\n start_time_unix_nano=(\n data_point.start_time_unix_nano\n ),\n count=data_point.count,\n sum=data_point.sum,\n bucket_counts=data_point.bucket_counts,\n explicit_bounds=data_point.explicit_bounds,\n max=data_point.max,\n min=data_point.min,\n )\n pb2_metric.histogram.aggregation_temporality = (\n metric.data.aggregation_temporality\n )\n pb2_metric.histogram.data_points.append(pt)\n\n elif isinstance(metric.data, Sum):\n for data_point in metric.data.data_points:\n pt = pb2.NumberDataPoint(\n attributes=self._translate_attributes(\n data_point.attributes\n ),\n start_time_unix_nano=(\n data_point.start_time_unix_nano\n ),\n time_unix_nano=data_point.time_unix_nano,\n )\n if isinstance(data_point.value, int):\n pt.as_int = data_point.value\n else:\n pt.as_double = data_point.value\n # note that because sum is a message type, the\n # fields must be set individually rather than\n # instantiating a pb2.Sum and setting it once\n pb2_metric.sum.aggregation_temporality = (\n metric.data.aggregation_temporality\n )\n pb2_metric.sum.is_monotonic = (\n metric.data.is_monotonic\n )\n pb2_metric.sum.data_points.append(pt)\n else:\n _logger.warn(\n \"unsupported datapoint type %s\", metric.point\n )\n continue\n\n pb2_scope_metrics.metrics.append(pb2_metric)\n\n return ExportMetricsServiceRequest(\n resource_metrics=get_resource_data(\n resource_metrics_dict,\n pb2.ResourceMetrics,\n \"metrics\",\n )\n )\n\n def export(\n self,\n metrics_data: MetricsData,\n timeout_millis: float = 10_000,\n **kwargs,\n ) -> MetricExportResult:\n # TODO(#2663): OTLPExporterMixin should pass timeout to gRPC\n return self._export(metrics_data)\n\n def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:\n pass\n", "path": "exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py"}]}
2,210
218
gh_patches_debug_12004
rasdani/github-patches
git_diff
opsdroid__opsdroid-1432
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Opsdroid logger returning errors # Description Hi, I have been using `opsdroid` for a few days now, and while it is a good framework, I have been having a lot of trouble. Most recently I have been getting a error `formatter refrenced before use`. I have linted my configuration.yaml and none of my python files have errors. The error message below only shows errors in opsdroid library files. Even so, I am probably doing something wrong. Any help is greatly appreciated! ## Steps to Reproduce I just linted and built my config. Neither of those actions returned errors. ## Expected Functionality My bot should have run on Telegram and in bash. ## Experienced Functionality ```bash Traceback (most recent call last): File "/home/gideongrinberg/.local/bin/opsdroid", line 8, in <module> sys.exit(cli()) File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 764, in __call__ return self.main(*args, **kwargs) File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 717, in main rv = self.invoke(ctx) File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 1137, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 956, in invoke return ctx.invoke(self.callback, **ctx.params) File "/usr/local/lib/python3.6/dist-packages/click/core.py", line 555, in invoke return callback(*args, **kwargs) File "/home/gideongrinberg/.local/lib/python3.6/site-packages/opsdroid/cli/start.py", line 38, in start configure_logging(config) File "/home/gideongrinberg/.local/lib/python3.6/site-packages/opsdroid/logging.py", line 93, in configure_logging console_handler.setFormatter(formatter) UnboundLocalError: local variable 'formatter' referenced before assignment ``` ## Versions - **Opsdroid version:** 0.17.1 - **Python version:** Python 3.6.9 - **OS/Docker version:** Ubuntu 18.04 on Window Subsystem Linux (Windows 10) ## Configuration File My config.yaml is to large to include, but this is the only line I've change from the example (other than adding tokens) ```yaml recycle-nlp: path: '~/opdroid_recycle/skill-recycle-nlp' ``` Again, that file returns no errors when I run `opsdroid config -f [PATH] lint` or `opsdroid config -f [PATH] build`. Additionally, the python file: ```python from opsdroid.skill import Skill from opsdroid.matchers import match_luisai_intent class recycle-nlp(Skill): @match_luisai_intent('recycle') async def recycle-nlp(self, message): if message.luisai["topScoringIntent"]["intent"]=="recycle": await message.respond(str(message.luisai)) ``` My directory structure (/home for WSL, not windows): ``` | /home |____ opsdroid_recycle | |_____ config.yaml |_____skill-recycle-nlp | |____ __init__.py |______ README.md |______ LICENSE | |___.local/lib/python3.6/site-packages ``` ## Additional Details Interestingly, my bot worked fine with the example config Any help is much appreciated! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `opsdroid/logging.py` Content: ``` 1 """Class for Filter logs and logging logic.""" 2 3 import os 4 import logging 5 import contextlib 6 7 from logging.handlers import RotatingFileHandler 8 from opsdroid.const import DEFAULT_LOG_FILENAME, __version__ 9 10 _LOGGER = logging.getLogger(__name__) 11 12 13 class ParsingFilter(logging.Filter): 14 """Class that filters logs.""" 15 16 def __init__(self, config, *parse_list): 17 """Create object to implement filtering.""" 18 super(ParsingFilter, self).__init__() 19 self.config = config["logging"] 20 try: 21 if ( 22 self.config["filter"]["whitelist"] 23 and self.config["filter"]["blacklist"] 24 ): 25 _LOGGER.warning( 26 _( 27 "Both whitelist and blacklist filters found in configuration. " 28 "Only one can be used at a time - only the whitelist filter will be used." 29 ) 30 ) 31 self.parse_list = [ 32 logging.Filter(name) for name in parse_list[0]["whitelist"] 33 ] 34 except KeyError: 35 self.parse_list = parse_list[0].get("whitelist") or parse_list[0].get( 36 "blacklist" 37 ) 38 39 self.parse_list = [logging.Filter(name) for name in self.parse_list] 40 41 def filter(self, record): 42 """Apply filter to the log message. 43 44 This is a subset of Logger.filter, this method applies the logger 45 filters and returns a bool. If the value is true the record will 46 be passed to the handlers and the log shown. If the value is 47 false it will be ignored. 48 49 Args: 50 record: a log record containing the log message and the 51 name of the log - example: opsdroid.core. 52 53 Returns: 54 Boolean: If True - pass the log to handler. 55 56 """ 57 58 if self.config["filter"].get("whitelist"): 59 return any(name.filter(record) for name in self.parse_list) 60 return not any(name.filter(record) for name in self.parse_list) 61 62 63 def configure_logging(config): 64 """Configure the root logger based on user config.""" 65 rootlogger = logging.getLogger() 66 logging_config = config or {} 67 68 while rootlogger.handlers: 69 rootlogger.handlers.pop() 70 71 try: 72 if config["logging"]["path"]: 73 logfile_path = os.path.expanduser(config["logging"]["path"]) 74 else: 75 logfile_path = config["logging"]["path"] 76 except KeyError: 77 logfile_path = DEFAULT_LOG_FILENAME 78 79 try: 80 log_level = get_logging_level(config["logging"]["level"]) 81 except KeyError: 82 log_level = logging.INFO 83 84 rootlogger.setLevel(log_level) 85 86 try: 87 if config["logging"]["extended"]: 88 formatter = logging.Formatter( 89 "%(levelname)s %(name)s.%(funcName)s(): %(message)s" 90 ) 91 except KeyError: 92 formatter = logging.Formatter("%(levelname)s %(name)s: %(message)s") 93 94 console_handler = logging.StreamHandler() 95 console_handler.setLevel(log_level) 96 console_handler.setFormatter(formatter) 97 98 with contextlib.suppress(KeyError): 99 console_handler.addFilter(ParsingFilter(config, config["logging"]["filter"])) 100 101 rootlogger.addHandler(console_handler) 102 103 with contextlib.suppress(KeyError): 104 if not config["logging"]["console"]: 105 console_handler.setLevel(logging.CRITICAL) 106 107 if logfile_path: 108 logdir = os.path.dirname(os.path.realpath(logfile_path)) 109 if not os.path.isdir(logdir): 110 os.makedirs(logdir) 111 112 file_handler = RotatingFileHandler( 113 logfile_path, maxBytes=logging_config.get("file-size", 50e6) 114 ) 115 116 file_handler.setLevel(log_level) 117 file_handler.setFormatter(formatter) 118 119 with contextlib.suppress(KeyError): 120 file_handler.addFilter(ParsingFilter(config, config["logging"]["filter"])) 121 122 rootlogger.addHandler(file_handler) 123 _LOGGER.info("=" * 40) 124 _LOGGER.info(_("Started opsdroid %s."), __version__) 125 126 127 def get_logging_level(logging_level): 128 """Get the logger level based on the user configuration. 129 130 Args: 131 logging_level: logging level from config file 132 133 Returns: 134 logging LEVEL -> 135 CRITICAL = 50 136 FATAL = CRITICAL 137 ERROR = 40 138 WARNING = 30 139 WARN = WARNING 140 INFO = 20 141 DEBUG = 10 142 NOTSET = 0 143 144 """ 145 if logging_level == "critical": 146 return logging.CRITICAL 147 148 if logging_level == "error": 149 return logging.ERROR 150 if logging_level == "warning": 151 return logging.WARNING 152 153 if logging_level == "debug": 154 return logging.DEBUG 155 156 return logging.INFO 157 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/opsdroid/logging.py b/opsdroid/logging.py --- a/opsdroid/logging.py +++ b/opsdroid/logging.py @@ -83,13 +83,13 @@ rootlogger.setLevel(log_level) - try: + formatter = logging.Formatter("%(levelname)s %(name)s: %(message)s") + + with contextlib.suppress(KeyError): if config["logging"]["extended"]: formatter = logging.Formatter( "%(levelname)s %(name)s.%(funcName)s(): %(message)s" ) - except KeyError: - formatter = logging.Formatter("%(levelname)s %(name)s: %(message)s") console_handler = logging.StreamHandler() console_handler.setLevel(log_level)
{"golden_diff": "diff --git a/opsdroid/logging.py b/opsdroid/logging.py\n--- a/opsdroid/logging.py\n+++ b/opsdroid/logging.py\n@@ -83,13 +83,13 @@\n \n rootlogger.setLevel(log_level)\n \n- try:\n+ formatter = logging.Formatter(\"%(levelname)s %(name)s: %(message)s\")\n+\n+ with contextlib.suppress(KeyError):\n if config[\"logging\"][\"extended\"]:\n formatter = logging.Formatter(\n \"%(levelname)s %(name)s.%(funcName)s(): %(message)s\"\n )\n- except KeyError:\n- formatter = logging.Formatter(\"%(levelname)s %(name)s: %(message)s\")\n \n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n", "issue": "Opsdroid logger returning errors\n# Description\r\nHi, I have been using `opsdroid` for a few days now, and while it is a good framework, I have been having a lot of trouble. Most recently I have been getting a error `formatter refrenced before use`. I have linted my configuration.yaml and none of my python files have errors. The error message below only shows errors in opsdroid library files. Even so, I am probably doing something wrong. Any help is greatly appreciated!\r\n\r\n## Steps to Reproduce\r\nI just linted and built my config. Neither of those actions returned errors.\r\n\r\n## Expected Functionality\r\nMy bot should have run on Telegram and in bash.\r\n\r\n## Experienced Functionality\r\n```bash\r\nTraceback (most recent call last):\r\n File \"/home/gideongrinberg/.local/bin/opsdroid\", line 8, in <module>\r\n sys.exit(cli())\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 764, in __call__\r\n return self.main(*args, **kwargs)\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 717, in main\r\n rv = self.invoke(ctx)\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 1137, in invoke\r\n return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 956, in invoke\r\n return ctx.invoke(self.callback, **ctx.params)\r\n File \"/usr/local/lib/python3.6/dist-packages/click/core.py\", line 555, in invoke\r\n return callback(*args, **kwargs)\r\n File \"/home/gideongrinberg/.local/lib/python3.6/site-packages/opsdroid/cli/start.py\", line 38, in start\r\n configure_logging(config)\r\n File \"/home/gideongrinberg/.local/lib/python3.6/site-packages/opsdroid/logging.py\", line 93, in configure_logging\r\n console_handler.setFormatter(formatter)\r\nUnboundLocalError: local variable 'formatter' referenced before assignment\r\n```\r\n## Versions\r\n- **Opsdroid version:** 0.17.1\r\n- **Python version:** Python 3.6.9\r\n- **OS/Docker version:** Ubuntu 18.04 on Window Subsystem Linux (Windows 10)\r\n\r\n## Configuration File\r\nMy config.yaml is to large to include, but this is the only line I've change from the example (other than adding tokens)\r\n```yaml\r\n\r\n recycle-nlp:\r\n path: '~/opdroid_recycle/skill-recycle-nlp'\r\n```\r\nAgain, that file returns no errors when I run `opsdroid config -f [PATH] lint` or `opsdroid config -f [PATH] build`.\r\n\r\nAdditionally, the python file:\r\n```python\r\nfrom opsdroid.skill import Skill\r\nfrom opsdroid.matchers import match_luisai_intent\r\n\r\nclass recycle-nlp(Skill):\r\n@match_luisai_intent('recycle')\r\n async def recycle-nlp(self, message):\r\n if message.luisai[\"topScoringIntent\"][\"intent\"]==\"recycle\":\r\n await message.respond(str(message.luisai))\r\n```\r\nMy directory structure (/home for WSL, not windows):\r\n```\r\n| /home\r\n |____ opsdroid_recycle\r\n |\r\n |_____ config.yaml\r\n |_____skill-recycle-nlp\r\n |\r\n |____ __init__.py\r\n |______ README.md\r\n |______ LICENSE\r\n |\r\n |___.local/lib/python3.6/site-packages\r\n```\r\n## Additional Details\r\nInterestingly, my bot worked fine with the example config\r\n\r\nAny help is much appreciated! \r\n\n", "before_files": [{"content": "\"\"\"Class for Filter logs and logging logic.\"\"\"\n\nimport os\nimport logging\nimport contextlib\n\nfrom logging.handlers import RotatingFileHandler\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, __version__\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ParsingFilter(logging.Filter):\n \"\"\"Class that filters logs.\"\"\"\n\n def __init__(self, config, *parse_list):\n \"\"\"Create object to implement filtering.\"\"\"\n super(ParsingFilter, self).__init__()\n self.config = config[\"logging\"]\n try:\n if (\n self.config[\"filter\"][\"whitelist\"]\n and self.config[\"filter\"][\"blacklist\"]\n ):\n _LOGGER.warning(\n _(\n \"Both whitelist and blacklist filters found in configuration. \"\n \"Only one can be used at a time - only the whitelist filter will be used.\"\n )\n )\n self.parse_list = [\n logging.Filter(name) for name in parse_list[0][\"whitelist\"]\n ]\n except KeyError:\n self.parse_list = parse_list[0].get(\"whitelist\") or parse_list[0].get(\n \"blacklist\"\n )\n\n self.parse_list = [logging.Filter(name) for name in self.parse_list]\n\n def filter(self, record):\n \"\"\"Apply filter to the log message.\n\n This is a subset of Logger.filter, this method applies the logger\n filters and returns a bool. If the value is true the record will\n be passed to the handlers and the log shown. If the value is\n false it will be ignored.\n\n Args:\n record: a log record containing the log message and the\n name of the log - example: opsdroid.core.\n\n Returns:\n Boolean: If True - pass the log to handler.\n\n \"\"\"\n\n if self.config[\"filter\"].get(\"whitelist\"):\n return any(name.filter(record) for name in self.parse_list)\n return not any(name.filter(record) for name in self.parse_list)\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n logging_config = config or {}\n\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n\n try:\n if config[\"logging\"][\"extended\"]:\n formatter = logging.Formatter(\n \"%(levelname)s %(name)s.%(funcName)s(): %(message)s\"\n )\n except KeyError:\n formatter = logging.Formatter(\"%(levelname)s %(name)s: %(message)s\")\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n\n with contextlib.suppress(KeyError):\n console_handler.addFilter(ParsingFilter(config, config[\"logging\"][\"filter\"]))\n\n rootlogger.addHandler(console_handler)\n\n with contextlib.suppress(KeyError):\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n file_handler = RotatingFileHandler(\n logfile_path, maxBytes=logging_config.get(\"file-size\", 50e6)\n )\n\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n\n with contextlib.suppress(KeyError):\n file_handler.addFilter(ParsingFilter(config, config[\"logging\"][\"filter\"]))\n\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"Started opsdroid %s.\"), __version__)\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\n\n Args:\n logging_level: logging level from config file\n\n Returns:\n logging LEVEL ->\n CRITICAL = 50\n FATAL = CRITICAL\n ERROR = 40\n WARNING = 30\n WARN = WARNING\n INFO = 20\n DEBUG = 10\n NOTSET = 0\n\n \"\"\"\n if logging_level == \"critical\":\n return logging.CRITICAL\n\n if logging_level == \"error\":\n return logging.ERROR\n if logging_level == \"warning\":\n return logging.WARNING\n\n if logging_level == \"debug\":\n return logging.DEBUG\n\n return logging.INFO\n", "path": "opsdroid/logging.py"}], "after_files": [{"content": "\"\"\"Class for Filter logs and logging logic.\"\"\"\n\nimport os\nimport logging\nimport contextlib\n\nfrom logging.handlers import RotatingFileHandler\nfrom opsdroid.const import DEFAULT_LOG_FILENAME, __version__\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass ParsingFilter(logging.Filter):\n \"\"\"Class that filters logs.\"\"\"\n\n def __init__(self, config, *parse_list):\n \"\"\"Create object to implement filtering.\"\"\"\n super(ParsingFilter, self).__init__()\n self.config = config[\"logging\"]\n try:\n if (\n self.config[\"filter\"][\"whitelist\"]\n and self.config[\"filter\"][\"blacklist\"]\n ):\n _LOGGER.warning(\n _(\n \"Both whitelist and blacklist filters found in configuration. \"\n \"Only one can be used at a time - only the whitelist filter will be used.\"\n )\n )\n self.parse_list = [\n logging.Filter(name) for name in parse_list[0][\"whitelist\"]\n ]\n except KeyError:\n self.parse_list = parse_list[0].get(\"whitelist\") or parse_list[0].get(\n \"blacklist\"\n )\n\n self.parse_list = [logging.Filter(name) for name in self.parse_list]\n\n def filter(self, record):\n \"\"\"Apply filter to the log message.\n\n This is a subset of Logger.filter, this method applies the logger\n filters and returns a bool. If the value is true the record will\n be passed to the handlers and the log shown. If the value is\n false it will be ignored.\n\n Args:\n record: a log record containing the log message and the\n name of the log - example: opsdroid.core.\n\n Returns:\n Boolean: If True - pass the log to handler.\n\n \"\"\"\n\n if self.config[\"filter\"].get(\"whitelist\"):\n return any(name.filter(record) for name in self.parse_list)\n return not any(name.filter(record) for name in self.parse_list)\n\n\ndef configure_logging(config):\n \"\"\"Configure the root logger based on user config.\"\"\"\n rootlogger = logging.getLogger()\n logging_config = config or {}\n\n while rootlogger.handlers:\n rootlogger.handlers.pop()\n\n try:\n if config[\"logging\"][\"path\"]:\n logfile_path = os.path.expanduser(config[\"logging\"][\"path\"])\n else:\n logfile_path = config[\"logging\"][\"path\"]\n except KeyError:\n logfile_path = DEFAULT_LOG_FILENAME\n\n try:\n log_level = get_logging_level(config[\"logging\"][\"level\"])\n except KeyError:\n log_level = logging.INFO\n\n rootlogger.setLevel(log_level)\n\n formatter = logging.Formatter(\"%(levelname)s %(name)s: %(message)s\")\n\n with contextlib.suppress(KeyError):\n if config[\"logging\"][\"extended\"]:\n formatter = logging.Formatter(\n \"%(levelname)s %(name)s.%(funcName)s(): %(message)s\"\n )\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(log_level)\n console_handler.setFormatter(formatter)\n\n with contextlib.suppress(KeyError):\n console_handler.addFilter(ParsingFilter(config, config[\"logging\"][\"filter\"]))\n\n rootlogger.addHandler(console_handler)\n\n with contextlib.suppress(KeyError):\n if not config[\"logging\"][\"console\"]:\n console_handler.setLevel(logging.CRITICAL)\n\n if logfile_path:\n logdir = os.path.dirname(os.path.realpath(logfile_path))\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n file_handler = RotatingFileHandler(\n logfile_path, maxBytes=logging_config.get(\"file-size\", 50e6)\n )\n\n file_handler.setLevel(log_level)\n file_handler.setFormatter(formatter)\n\n with contextlib.suppress(KeyError):\n file_handler.addFilter(ParsingFilter(config, config[\"logging\"][\"filter\"]))\n\n rootlogger.addHandler(file_handler)\n _LOGGER.info(\"=\" * 40)\n _LOGGER.info(_(\"Started opsdroid %s.\"), __version__)\n\n\ndef get_logging_level(logging_level):\n \"\"\"Get the logger level based on the user configuration.\n\n Args:\n logging_level: logging level from config file\n\n Returns:\n logging LEVEL ->\n CRITICAL = 50\n FATAL = CRITICAL\n ERROR = 40\n WARNING = 30\n WARN = WARNING\n INFO = 20\n DEBUG = 10\n NOTSET = 0\n\n \"\"\"\n if logging_level == \"critical\":\n return logging.CRITICAL\n\n if logging_level == \"error\":\n return logging.ERROR\n if logging_level == \"warning\":\n return logging.WARNING\n\n if logging_level == \"debug\":\n return logging.DEBUG\n\n return logging.INFO\n", "path": "opsdroid/logging.py"}]}
2,426
165
gh_patches_debug_3312
rasdani/github-patches
git_diff
sktime__sktime-5422
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] MCDCNNClassifier.fit runs only 1 training epoch Despite n_epochs being set to 200, only one training epoch will be executed here: ```python from sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier from keras.losses import binary_crossentropy model = MCDCNNClassifier(n_epochs=200, batch_size=64, loss=binary_crossentropy, random_state=42, metrics=['binary_accuracy']) model.fit(x_train, y_train) ``` The problem is that model.n_epochs is not passed to self.model_.fit in MCDCNNClassifier._fit (mcdcnn.py): ```python def _fit(self, X, y): ... self.history = self.model_.fit( X, y_onehot, epochs=self.n_epochs, **#<<<-----------------THIS LINS IS MISSING #** batch_size=self.batch_size, verbose=self.verbose, callbacks=self.callbacks_, ) return self ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sktime/classification/deep_learning/mcdcnn.py` Content: ``` 1 """Multi Channel Deep Convolutional Neural Classifier (MCDCNN).""" 2 3 __author__ = [ 4 "JamesLarge", 5 ] 6 7 from copy import deepcopy 8 9 import numpy as np 10 from sklearn.utils import check_random_state 11 12 from sktime.classification.deep_learning.base import BaseDeepClassifier 13 from sktime.networks.mcdcnn import MCDCNNNetwork 14 from sktime.utils.validation._dependencies import _check_dl_dependencies 15 16 17 class MCDCNNClassifier(BaseDeepClassifier): 18 """Multi Channel Deep Convolutional Neural Classifier, as described in [1]_. 19 20 Parameters 21 ---------- 22 n_epochs : int, optional (default=120) 23 The number of epochs to train the model. 24 batch_size : int, optional (default=16) 25 The number of samples per gradient update. 26 kernel_size : int, optional (default=5) 27 The size of kernel in Conv1D layer. 28 pool_size : int, optional (default=2) 29 The size of kernel in (Max) Pool layer. 30 filter_sizes : tuple, optional (default=(8, 8)) 31 The sizes of filter for Conv1D layer corresponding 32 to each Conv1D in the block. 33 dense_units : int, optional (default=732) 34 The number of output units of the final Dense 35 layer of this Network. This is NOT the final layer 36 but the penultimate layer. 37 conv_padding : str or None, optional (default="same") 38 The type of padding to be applied to convolutional 39 layers. 40 pool_padding : str or None, optional (default="same") 41 The type of padding to be applied to pooling layers. 42 loss : str, optional (default="categorical_crossentropy") 43 The name of the loss function to be used during training, 44 should be supported by keras. 45 activation : str, optional (default="sigmoid") 46 The activation function to apply at the output. It should be 47 "software" if response variable has more than two types. 48 use_bias : bool, optional (default=True) 49 Whether bias should be included in the output layer. 50 metrics : None or string, optional (default=None) 51 The string which will be used during model compilation. If left as None, 52 then "accuracy" is passed to `model.compile()`. 53 optimizer: None or keras.optimizers.Optimizer instance, optional (default=None) 54 The optimizer that is used for model compiltation. If left as None, 55 then `keras.optimizers.SGD` is used with the following parameters - 56 `learning_rate=0.01, momentum=0.9, weight_decay=0.0005`. 57 callbacks : None or list of keras.callbacks.Callback, optinal (default=None) 58 The callback(s) to use during training. 59 random_state : int, optional (default=0) 60 The seed to any random action. 61 62 Notes 63 ----- 64 Adapted from the implementation of Fawaz et. al 65 https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mcdcnn.py 66 67 References 68 ---------- 69 .. [1] Zheng et. al, Time series classification using multi-channels deep 70 convolutional neural networks, International Conference on 71 Web-Age Information Management, Pages 298-310, year 2014, organization: Springer. 72 73 Examples 74 -------- 75 >>> from sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier 76 >>> from sktime.datasets import load_unit_test 77 >>> X_train, y_tain = load_unit_test(split="train") 78 >>> mcdcnn = MCDCNNClassifier() # doctest: +SKIP 79 >>> mcdcnn.fit(X_train, y_train) # doctest: +SKIP 80 MCDCNNClassifier(...) 81 """ 82 83 _tags = {"python_dependencies": "tensorflow"} 84 85 def __init__( 86 self, 87 n_epochs=120, 88 batch_size=16, 89 kernel_size=5, 90 pool_size=2, 91 filter_sizes=(8, 8), 92 dense_units=732, 93 conv_padding="same", 94 pool_padding="same", 95 loss="categorical_crossentropy", 96 activation="sigmoid", 97 use_bias=True, 98 callbacks=None, 99 metrics=None, 100 optimizer=None, 101 verbose=False, 102 random_state=0, 103 ): 104 _check_dl_dependencies(severity="error") 105 super().__init__() 106 107 self.n_epochs = n_epochs 108 self.batch_size = batch_size 109 self.kernel_size = kernel_size 110 self.pool_size = pool_size 111 self.filter_sizes = filter_sizes 112 self.dense_units = dense_units 113 self.conv_padding = conv_padding 114 self.pool_padding = pool_padding 115 self.loss = loss 116 self.activation = activation 117 self.use_bias = use_bias 118 self.callbacks = callbacks 119 self.metrics = metrics 120 self.optimizer = optimizer 121 self.verbose = verbose 122 self.random_state = random_state 123 self.history = None 124 self._network = MCDCNNNetwork( 125 kernel_size=self.kernel_size, 126 pool_size=self.pool_size, 127 filter_sizes=self.filter_sizes, 128 dense_units=self.dense_units, 129 conv_padding=self.conv_padding, 130 pool_padding=self.pool_padding, 131 random_state=self.random_state, 132 ) 133 134 def build_model(self, input_shape, n_classes, **kwargs): 135 """Construct a compiled, un-trained, keras model that is ready for training. 136 137 In sktime, time series are stored in numpy arrays of shape (d,m), where d 138 is the number of dimensions, m is the series length. Keras/tensorflow assume 139 data is in shape (m,d). This method also assumes (m,d). Transpose should 140 happen in fit. 141 142 Parameters 143 ---------- 144 input_shape : tuple 145 The shape of the data fed into the input layer, should be (m,d) 146 n_classes: int 147 The number of classes, which becomes the size of the output layer 148 149 Returns 150 ------- 151 output : a compiled Keras Model 152 """ 153 import tensorflow as tf 154 from tensorflow import keras 155 156 tf.random.set_seed(self.random_state) 157 158 metrics = ["accuracy"] if self.metrics is None else self.metrics 159 160 input_layers, output_layer = self._network.build_network(input_shape, **kwargs) 161 162 output_layer = keras.layers.Dense( 163 units=n_classes, 164 activation=self.activation, 165 use_bias=self.use_bias, 166 )(output_layer) 167 168 self.optimizer_ = ( 169 keras.optimizers.SGD( 170 learning_rate=0.01, 171 momentum=0.9, 172 weight_decay=0.0005, 173 ) 174 if self.optimizer is None 175 else self.optimizer 176 ) 177 178 model = keras.models.Model(inputs=input_layers, outputs=output_layer) 179 180 model.compile( 181 loss=self.loss, 182 optimizer=self.optimizer_, 183 metrics=metrics, 184 ) 185 186 return model 187 188 def _fit(self, X, y): 189 """Fit the classifier on the training set (X, y). 190 191 Parameters 192 ---------- 193 X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m)) 194 The training input samples. 195 y : np.ndarray of shape n 196 The training data class labels. 197 198 Returns 199 ------- 200 self : object 201 """ 202 y_onehot = self.convert_y_to_keras(y) 203 X = X.transpose(0, 2, 1) 204 self.input_shape = X.shape[1:] 205 X = self._network._prepare_input(X) 206 207 check_random_state(self.random_state) 208 209 self.model_ = self.build_model(self.input_shape, self.n_classes_) 210 self.callbacks_ = deepcopy(self.callbacks) 211 212 if self.verbose: 213 self.model_.summary() 214 215 self.history = self.model_.fit( 216 X, 217 y_onehot, 218 batch_size=self.batch_size, 219 verbose=self.verbose, 220 callbacks=self.callbacks_, 221 ) 222 223 return self 224 225 def _predict_proba(self, X, **kwargs): 226 """Find probability estimates for each class for all cases in X. 227 228 Parameters 229 ---------- 230 X : an np.ndarray of shape = (n_instances, n_dimensions, series_length) 231 The training input samples. 232 233 Returns 234 ------- 235 output : array of shape = [n_instances, n_classes] of probabilities 236 """ 237 self.check_is_fitted() 238 X = X.transpose([0, 2, 1]) 239 X = self._network._prepare_input(X) 240 241 probs = self.model_.predict(X, self.batch_size, **kwargs) 242 243 if probs.shape[1] == 1: 244 probs = np.hstack([1 - probs, probs]) 245 probs = probs / probs.sum(axis=1, keepdims=1) 246 return probs 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sktime/classification/deep_learning/mcdcnn.py b/sktime/classification/deep_learning/mcdcnn.py --- a/sktime/classification/deep_learning/mcdcnn.py +++ b/sktime/classification/deep_learning/mcdcnn.py @@ -215,6 +215,7 @@ self.history = self.model_.fit( X, y_onehot, + epochs=self.n_epochs, batch_size=self.batch_size, verbose=self.verbose, callbacks=self.callbacks_,
{"golden_diff": "diff --git a/sktime/classification/deep_learning/mcdcnn.py b/sktime/classification/deep_learning/mcdcnn.py\n--- a/sktime/classification/deep_learning/mcdcnn.py\n+++ b/sktime/classification/deep_learning/mcdcnn.py\n@@ -215,6 +215,7 @@\n self.history = self.model_.fit(\r\n X,\r\n y_onehot,\r\n+ epochs=self.n_epochs,\r\n batch_size=self.batch_size,\r\n verbose=self.verbose,\r\n callbacks=self.callbacks_,\n", "issue": "[BUG] MCDCNNClassifier.fit runs only 1 training epoch\nDespite n_epochs being set to 200, only one training epoch will be executed here:\r\n\r\n```python\r\nfrom sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier\r\nfrom keras.losses import binary_crossentropy\r\n\r\nmodel = MCDCNNClassifier(n_epochs=200, batch_size=64, loss=binary_crossentropy, random_state=42, metrics=['binary_accuracy'])\r\nmodel.fit(x_train, y_train)\r\n```\r\n\r\nThe problem is that model.n_epochs is not passed to self.model_.fit in MCDCNNClassifier._fit (mcdcnn.py):\r\n\r\n```python\r\n def _fit(self, X, y):\r\n ...\r\n\r\n self.history = self.model_.fit(\r\n X,\r\n y_onehot,\r\n epochs=self.n_epochs, **#<<<-----------------THIS LINS IS MISSING #**\r\n batch_size=self.batch_size,\r\n verbose=self.verbose,\r\n callbacks=self.callbacks_,\r\n )\r\n\r\n return self\r\n```\n", "before_files": [{"content": "\"\"\"Multi Channel Deep Convolutional Neural Classifier (MCDCNN).\"\"\"\r\n\r\n__author__ = [\r\n \"JamesLarge\",\r\n]\r\n\r\nfrom copy import deepcopy\r\n\r\nimport numpy as np\r\nfrom sklearn.utils import check_random_state\r\n\r\nfrom sktime.classification.deep_learning.base import BaseDeepClassifier\r\nfrom sktime.networks.mcdcnn import MCDCNNNetwork\r\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\r\n\r\n\r\nclass MCDCNNClassifier(BaseDeepClassifier):\r\n \"\"\"Multi Channel Deep Convolutional Neural Classifier, as described in [1]_.\r\n\r\n Parameters\r\n ----------\r\n n_epochs : int, optional (default=120)\r\n The number of epochs to train the model.\r\n batch_size : int, optional (default=16)\r\n The number of samples per gradient update.\r\n kernel_size : int, optional (default=5)\r\n The size of kernel in Conv1D layer.\r\n pool_size : int, optional (default=2)\r\n The size of kernel in (Max) Pool layer.\r\n filter_sizes : tuple, optional (default=(8, 8))\r\n The sizes of filter for Conv1D layer corresponding\r\n to each Conv1D in the block.\r\n dense_units : int, optional (default=732)\r\n The number of output units of the final Dense\r\n layer of this Network. This is NOT the final layer\r\n but the penultimate layer.\r\n conv_padding : str or None, optional (default=\"same\")\r\n The type of padding to be applied to convolutional\r\n layers.\r\n pool_padding : str or None, optional (default=\"same\")\r\n The type of padding to be applied to pooling layers.\r\n loss : str, optional (default=\"categorical_crossentropy\")\r\n The name of the loss function to be used during training,\r\n should be supported by keras.\r\n activation : str, optional (default=\"sigmoid\")\r\n The activation function to apply at the output. It should be\r\n \"software\" if response variable has more than two types.\r\n use_bias : bool, optional (default=True)\r\n Whether bias should be included in the output layer.\r\n metrics : None or string, optional (default=None)\r\n The string which will be used during model compilation. If left as None,\r\n then \"accuracy\" is passed to `model.compile()`.\r\n optimizer: None or keras.optimizers.Optimizer instance, optional (default=None)\r\n The optimizer that is used for model compiltation. If left as None,\r\n then `keras.optimizers.SGD` is used with the following parameters -\r\n `learning_rate=0.01, momentum=0.9, weight_decay=0.0005`.\r\n callbacks : None or list of keras.callbacks.Callback, optinal (default=None)\r\n The callback(s) to use during training.\r\n random_state : int, optional (default=0)\r\n The seed to any random action.\r\n\r\n Notes\r\n -----\r\n Adapted from the implementation of Fawaz et. al\r\n https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mcdcnn.py\r\n\r\n References\r\n ----------\r\n .. [1] Zheng et. al, Time series classification using multi-channels deep\r\n convolutional neural networks, International Conference on\r\n Web-Age Information Management, Pages 298-310, year 2014, organization: Springer.\r\n\r\n Examples\r\n --------\r\n >>> from sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier\r\n >>> from sktime.datasets import load_unit_test\r\n >>> X_train, y_tain = load_unit_test(split=\"train\")\r\n >>> mcdcnn = MCDCNNClassifier() # doctest: +SKIP\r\n >>> mcdcnn.fit(X_train, y_train) # doctest: +SKIP\r\n MCDCNNClassifier(...)\r\n \"\"\"\r\n\r\n _tags = {\"python_dependencies\": \"tensorflow\"}\r\n\r\n def __init__(\r\n self,\r\n n_epochs=120,\r\n batch_size=16,\r\n kernel_size=5,\r\n pool_size=2,\r\n filter_sizes=(8, 8),\r\n dense_units=732,\r\n conv_padding=\"same\",\r\n pool_padding=\"same\",\r\n loss=\"categorical_crossentropy\",\r\n activation=\"sigmoid\",\r\n use_bias=True,\r\n callbacks=None,\r\n metrics=None,\r\n optimizer=None,\r\n verbose=False,\r\n random_state=0,\r\n ):\r\n _check_dl_dependencies(severity=\"error\")\r\n super().__init__()\r\n\r\n self.n_epochs = n_epochs\r\n self.batch_size = batch_size\r\n self.kernel_size = kernel_size\r\n self.pool_size = pool_size\r\n self.filter_sizes = filter_sizes\r\n self.dense_units = dense_units\r\n self.conv_padding = conv_padding\r\n self.pool_padding = pool_padding\r\n self.loss = loss\r\n self.activation = activation\r\n self.use_bias = use_bias\r\n self.callbacks = callbacks\r\n self.metrics = metrics\r\n self.optimizer = optimizer\r\n self.verbose = verbose\r\n self.random_state = random_state\r\n self.history = None\r\n self._network = MCDCNNNetwork(\r\n kernel_size=self.kernel_size,\r\n pool_size=self.pool_size,\r\n filter_sizes=self.filter_sizes,\r\n dense_units=self.dense_units,\r\n conv_padding=self.conv_padding,\r\n pool_padding=self.pool_padding,\r\n random_state=self.random_state,\r\n )\r\n\r\n def build_model(self, input_shape, n_classes, **kwargs):\r\n \"\"\"Construct a compiled, un-trained, keras model that is ready for training.\r\n\r\n In sktime, time series are stored in numpy arrays of shape (d,m), where d\r\n is the number of dimensions, m is the series length. Keras/tensorflow assume\r\n data is in shape (m,d). This method also assumes (m,d). Transpose should\r\n happen in fit.\r\n\r\n Parameters\r\n ----------\r\n input_shape : tuple\r\n The shape of the data fed into the input layer, should be (m,d)\r\n n_classes: int\r\n The number of classes, which becomes the size of the output layer\r\n\r\n Returns\r\n -------\r\n output : a compiled Keras Model\r\n \"\"\"\r\n import tensorflow as tf\r\n from tensorflow import keras\r\n\r\n tf.random.set_seed(self.random_state)\r\n\r\n metrics = [\"accuracy\"] if self.metrics is None else self.metrics\r\n\r\n input_layers, output_layer = self._network.build_network(input_shape, **kwargs)\r\n\r\n output_layer = keras.layers.Dense(\r\n units=n_classes,\r\n activation=self.activation,\r\n use_bias=self.use_bias,\r\n )(output_layer)\r\n\r\n self.optimizer_ = (\r\n keras.optimizers.SGD(\r\n learning_rate=0.01,\r\n momentum=0.9,\r\n weight_decay=0.0005,\r\n )\r\n if self.optimizer is None\r\n else self.optimizer\r\n )\r\n\r\n model = keras.models.Model(inputs=input_layers, outputs=output_layer)\r\n\r\n model.compile(\r\n loss=self.loss,\r\n optimizer=self.optimizer_,\r\n metrics=metrics,\r\n )\r\n\r\n return model\r\n\r\n def _fit(self, X, y):\r\n \"\"\"Fit the classifier on the training set (X, y).\r\n\r\n Parameters\r\n ----------\r\n X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))\r\n The training input samples.\r\n y : np.ndarray of shape n\r\n The training data class labels.\r\n\r\n Returns\r\n -------\r\n self : object\r\n \"\"\"\r\n y_onehot = self.convert_y_to_keras(y)\r\n X = X.transpose(0, 2, 1)\r\n self.input_shape = X.shape[1:]\r\n X = self._network._prepare_input(X)\r\n\r\n check_random_state(self.random_state)\r\n\r\n self.model_ = self.build_model(self.input_shape, self.n_classes_)\r\n self.callbacks_ = deepcopy(self.callbacks)\r\n\r\n if self.verbose:\r\n self.model_.summary()\r\n\r\n self.history = self.model_.fit(\r\n X,\r\n y_onehot,\r\n batch_size=self.batch_size,\r\n verbose=self.verbose,\r\n callbacks=self.callbacks_,\r\n )\r\n\r\n return self\r\n\r\n def _predict_proba(self, X, **kwargs):\r\n \"\"\"Find probability estimates for each class for all cases in X.\r\n\r\n Parameters\r\n ----------\r\n X : an np.ndarray of shape = (n_instances, n_dimensions, series_length)\r\n The training input samples.\r\n\r\n Returns\r\n -------\r\n output : array of shape = [n_instances, n_classes] of probabilities\r\n \"\"\"\r\n self.check_is_fitted()\r\n X = X.transpose([0, 2, 1])\r\n X = self._network._prepare_input(X)\r\n\r\n probs = self.model_.predict(X, self.batch_size, **kwargs)\r\n\r\n if probs.shape[1] == 1:\r\n probs = np.hstack([1 - probs, probs])\r\n probs = probs / probs.sum(axis=1, keepdims=1)\r\n return probs\r\n", "path": "sktime/classification/deep_learning/mcdcnn.py"}], "after_files": [{"content": "\"\"\"Multi Channel Deep Convolutional Neural Classifier (MCDCNN).\"\"\"\r\n\r\n__author__ = [\r\n \"JamesLarge\",\r\n]\r\n\r\nfrom copy import deepcopy\r\n\r\nimport numpy as np\r\nfrom sklearn.utils import check_random_state\r\n\r\nfrom sktime.classification.deep_learning.base import BaseDeepClassifier\r\nfrom sktime.networks.mcdcnn import MCDCNNNetwork\r\nfrom sktime.utils.validation._dependencies import _check_dl_dependencies\r\n\r\n\r\nclass MCDCNNClassifier(BaseDeepClassifier):\r\n \"\"\"Multi Channel Deep Convolutional Neural Classifier, as described in [1]_.\r\n\r\n Parameters\r\n ----------\r\n n_epochs : int, optional (default=120)\r\n The number of epochs to train the model.\r\n batch_size : int, optional (default=16)\r\n The number of samples per gradient update.\r\n kernel_size : int, optional (default=5)\r\n The size of kernel in Conv1D layer.\r\n pool_size : int, optional (default=2)\r\n The size of kernel in (Max) Pool layer.\r\n filter_sizes : tuple, optional (default=(8, 8))\r\n The sizes of filter for Conv1D layer corresponding\r\n to each Conv1D in the block.\r\n dense_units : int, optional (default=732)\r\n The number of output units of the final Dense\r\n layer of this Network. This is NOT the final layer\r\n but the penultimate layer.\r\n conv_padding : str or None, optional (default=\"same\")\r\n The type of padding to be applied to convolutional\r\n layers.\r\n pool_padding : str or None, optional (default=\"same\")\r\n The type of padding to be applied to pooling layers.\r\n loss : str, optional (default=\"categorical_crossentropy\")\r\n The name of the loss function to be used during training,\r\n should be supported by keras.\r\n activation : str, optional (default=\"sigmoid\")\r\n The activation function to apply at the output. It should be\r\n \"software\" if response variable has more than two types.\r\n use_bias : bool, optional (default=True)\r\n Whether bias should be included in the output layer.\r\n metrics : None or string, optional (default=None)\r\n The string which will be used during model compilation. If left as None,\r\n then \"accuracy\" is passed to `model.compile()`.\r\n optimizer: None or keras.optimizers.Optimizer instance, optional (default=None)\r\n The optimizer that is used for model compiltation. If left as None,\r\n then `keras.optimizers.SGD` is used with the following parameters -\r\n `learning_rate=0.01, momentum=0.9, weight_decay=0.0005`.\r\n callbacks : None or list of keras.callbacks.Callback, optinal (default=None)\r\n The callback(s) to use during training.\r\n random_state : int, optional (default=0)\r\n The seed to any random action.\r\n\r\n Notes\r\n -----\r\n Adapted from the implementation of Fawaz et. al\r\n https://github.com/hfawaz/dl-4-tsc/blob/master/classifiers/mcdcnn.py\r\n\r\n References\r\n ----------\r\n .. [1] Zheng et. al, Time series classification using multi-channels deep\r\n convolutional neural networks, International Conference on\r\n Web-Age Information Management, Pages 298-310, year 2014, organization: Springer.\r\n\r\n Examples\r\n --------\r\n >>> from sktime.classification.deep_learning.mcdcnn import MCDCNNClassifier\r\n >>> from sktime.datasets import load_unit_test\r\n >>> X_train, y_tain = load_unit_test(split=\"train\")\r\n >>> mcdcnn = MCDCNNClassifier() # doctest: +SKIP\r\n >>> mcdcnn.fit(X_train, y_train) # doctest: +SKIP\r\n MCDCNNClassifier(...)\r\n \"\"\"\r\n\r\n _tags = {\"python_dependencies\": \"tensorflow\"}\r\n\r\n def __init__(\r\n self,\r\n n_epochs=120,\r\n batch_size=16,\r\n kernel_size=5,\r\n pool_size=2,\r\n filter_sizes=(8, 8),\r\n dense_units=732,\r\n conv_padding=\"same\",\r\n pool_padding=\"same\",\r\n loss=\"categorical_crossentropy\",\r\n activation=\"sigmoid\",\r\n use_bias=True,\r\n callbacks=None,\r\n metrics=None,\r\n optimizer=None,\r\n verbose=False,\r\n random_state=0,\r\n ):\r\n _check_dl_dependencies(severity=\"error\")\r\n super().__init__()\r\n\r\n self.n_epochs = n_epochs\r\n self.batch_size = batch_size\r\n self.kernel_size = kernel_size\r\n self.pool_size = pool_size\r\n self.filter_sizes = filter_sizes\r\n self.dense_units = dense_units\r\n self.conv_padding = conv_padding\r\n self.pool_padding = pool_padding\r\n self.loss = loss\r\n self.activation = activation\r\n self.use_bias = use_bias\r\n self.callbacks = callbacks\r\n self.metrics = metrics\r\n self.optimizer = optimizer\r\n self.verbose = verbose\r\n self.random_state = random_state\r\n self.history = None\r\n self._network = MCDCNNNetwork(\r\n kernel_size=self.kernel_size,\r\n pool_size=self.pool_size,\r\n filter_sizes=self.filter_sizes,\r\n dense_units=self.dense_units,\r\n conv_padding=self.conv_padding,\r\n pool_padding=self.pool_padding,\r\n random_state=self.random_state,\r\n )\r\n\r\n def build_model(self, input_shape, n_classes, **kwargs):\r\n \"\"\"Construct a compiled, un-trained, keras model that is ready for training.\r\n\r\n In sktime, time series are stored in numpy arrays of shape (d,m), where d\r\n is the number of dimensions, m is the series length. Keras/tensorflow assume\r\n data is in shape (m,d). This method also assumes (m,d). Transpose should\r\n happen in fit.\r\n\r\n Parameters\r\n ----------\r\n input_shape : tuple\r\n The shape of the data fed into the input layer, should be (m,d)\r\n n_classes: int\r\n The number of classes, which becomes the size of the output layer\r\n\r\n Returns\r\n -------\r\n output : a compiled Keras Model\r\n \"\"\"\r\n import tensorflow as tf\r\n from tensorflow import keras\r\n\r\n tf.random.set_seed(self.random_state)\r\n\r\n metrics = [\"accuracy\"] if self.metrics is None else self.metrics\r\n\r\n input_layers, output_layer = self._network.build_network(input_shape, **kwargs)\r\n\r\n output_layer = keras.layers.Dense(\r\n units=n_classes,\r\n activation=self.activation,\r\n use_bias=self.use_bias,\r\n )(output_layer)\r\n\r\n self.optimizer_ = (\r\n keras.optimizers.SGD(\r\n learning_rate=0.01,\r\n momentum=0.9,\r\n weight_decay=0.0005,\r\n )\r\n if self.optimizer is None\r\n else self.optimizer\r\n )\r\n\r\n model = keras.models.Model(inputs=input_layers, outputs=output_layer)\r\n\r\n model.compile(\r\n loss=self.loss,\r\n optimizer=self.optimizer_,\r\n metrics=metrics,\r\n )\r\n\r\n return model\r\n\r\n def _fit(self, X, y):\r\n \"\"\"Fit the classifier on the training set (X, y).\r\n\r\n Parameters\r\n ----------\r\n X : np.ndarray of shape = (n_instances (n), n_dimensions (d), series_length (m))\r\n The training input samples.\r\n y : np.ndarray of shape n\r\n The training data class labels.\r\n\r\n Returns\r\n -------\r\n self : object\r\n \"\"\"\r\n y_onehot = self.convert_y_to_keras(y)\r\n X = X.transpose(0, 2, 1)\r\n self.input_shape = X.shape[1:]\r\n X = self._network._prepare_input(X)\r\n\r\n check_random_state(self.random_state)\r\n\r\n self.model_ = self.build_model(self.input_shape, self.n_classes_)\r\n self.callbacks_ = deepcopy(self.callbacks)\r\n\r\n if self.verbose:\r\n self.model_.summary()\r\n\r\n self.history = self.model_.fit(\r\n X,\r\n y_onehot,\r\n epochs=self.n_epochs,\r\n batch_size=self.batch_size,\r\n verbose=self.verbose,\r\n callbacks=self.callbacks_,\r\n )\r\n\r\n return self\r\n\r\n def _predict_proba(self, X, **kwargs):\r\n \"\"\"Find probability estimates for each class for all cases in X.\r\n\r\n Parameters\r\n ----------\r\n X : an np.ndarray of shape = (n_instances, n_dimensions, series_length)\r\n The training input samples.\r\n\r\n Returns\r\n -------\r\n output : array of shape = [n_instances, n_classes] of probabilities\r\n \"\"\"\r\n self.check_is_fitted()\r\n X = X.transpose([0, 2, 1])\r\n X = self._network._prepare_input(X)\r\n\r\n probs = self.model_.predict(X, self.batch_size, **kwargs)\r\n\r\n if probs.shape[1] == 1:\r\n probs = np.hstack([1 - probs, probs])\r\n probs = probs / probs.sum(axis=1, keepdims=1)\r\n return probs\r\n", "path": "sktime/classification/deep_learning/mcdcnn.py"}]}
3,024
115
gh_patches_debug_19873
rasdani/github-patches
git_diff
CTFd__CTFd-2067
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Size limits on logo, favicon, image uploads Sometimes people upload really big images for small things like the logo or the favicon. We should impose some kind of size limit or automatic resizing. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `CTFd/forms/setup.py` Content: ``` 1 from wtforms import ( 2 FileField, 3 HiddenField, 4 PasswordField, 5 RadioField, 6 SelectField, 7 StringField, 8 TextAreaField, 9 ) 10 from wtforms.fields.html5 import EmailField 11 from wtforms.validators import InputRequired 12 13 from CTFd.constants.themes import DEFAULT_THEME 14 from CTFd.forms import BaseForm 15 from CTFd.forms.fields import SubmitField 16 from CTFd.utils.config import get_themes 17 18 19 class SetupForm(BaseForm): 20 ctf_name = StringField( 21 "Event Name", description="The name of your CTF event/workshop" 22 ) 23 ctf_description = TextAreaField( 24 "Event Description", description="Description for the CTF" 25 ) 26 user_mode = RadioField( 27 "User Mode", 28 choices=[("teams", "Team Mode"), ("users", "User Mode")], 29 default="teams", 30 description="Controls whether users join together in teams to play (Team Mode) or play as themselves (User Mode)", 31 validators=[InputRequired()], 32 ) 33 34 name = StringField( 35 "Admin Username", 36 description="Your username for the administration account", 37 validators=[InputRequired()], 38 ) 39 email = EmailField( 40 "Admin Email", 41 description="Your email address for the administration account", 42 validators=[InputRequired()], 43 ) 44 password = PasswordField( 45 "Admin Password", 46 description="Your password for the administration account", 47 validators=[InputRequired()], 48 ) 49 50 ctf_logo = FileField( 51 "Logo", 52 description="Logo to use for the website instead of a CTF name. Used as the home page button.", 53 ) 54 ctf_banner = FileField("Banner", description="Banner to use for the homepage.") 55 ctf_small_icon = FileField( 56 "Small Icon", 57 description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.", 58 ) 59 ctf_theme = SelectField( 60 "Theme", 61 description="CTFd Theme to use", 62 choices=list(zip(get_themes(), get_themes())), 63 default=DEFAULT_THEME, 64 validators=[InputRequired()], 65 ) 66 theme_color = HiddenField( 67 "Theme Color", 68 description="Color used by theme to control aesthetics. Requires theme support. Optional.", 69 ) 70 71 start = StringField( 72 "Start Time", description="Time when your CTF is scheduled to start. Optional." 73 ) 74 end = StringField( 75 "End Time", description="Time when your CTF is scheduled to end. Optional." 76 ) 77 submit = SubmitField("Finish") 78 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/CTFd/forms/setup.py b/CTFd/forms/setup.py --- a/CTFd/forms/setup.py +++ b/CTFd/forms/setup.py @@ -49,16 +49,18 @@ ctf_logo = FileField( "Logo", - description="Logo to use for the website instead of a CTF name. Used as the home page button.", + description="Logo to use for the website instead of a CTF name. Used as the home page button. Optional.", + ) + ctf_banner = FileField( + "Banner", description="Banner to use for the homepage. Optional." ) - ctf_banner = FileField("Banner", description="Banner to use for the homepage.") ctf_small_icon = FileField( "Small Icon", - description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.", + description="favicon used in user's browsers. Only PNGs accepted. Must be 32x32px. Optional.", ) ctf_theme = SelectField( "Theme", - description="CTFd Theme to use", + description="CTFd Theme to use. Can be changed later.", choices=list(zip(get_themes(), get_themes())), default=DEFAULT_THEME, validators=[InputRequired()],
{"golden_diff": "diff --git a/CTFd/forms/setup.py b/CTFd/forms/setup.py\n--- a/CTFd/forms/setup.py\n+++ b/CTFd/forms/setup.py\n@@ -49,16 +49,18 @@\n \n ctf_logo = FileField(\n \"Logo\",\n- description=\"Logo to use for the website instead of a CTF name. Used as the home page button.\",\n+ description=\"Logo to use for the website instead of a CTF name. Used as the home page button. Optional.\",\n+ )\n+ ctf_banner = FileField(\n+ \"Banner\", description=\"Banner to use for the homepage. Optional.\"\n )\n- ctf_banner = FileField(\"Banner\", description=\"Banner to use for the homepage.\")\n ctf_small_icon = FileField(\n \"Small Icon\",\n- description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.\",\n+ description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px. Optional.\",\n )\n ctf_theme = SelectField(\n \"Theme\",\n- description=\"CTFd Theme to use\",\n+ description=\"CTFd Theme to use. Can be changed later.\",\n choices=list(zip(get_themes(), get_themes())),\n default=DEFAULT_THEME,\n validators=[InputRequired()],\n", "issue": "Size limits on logo, favicon, image uploads\nSometimes people upload really big images for small things like the logo or the favicon. We should impose some kind of size limit or automatic resizing. \n", "before_files": [{"content": "from wtforms import (\n FileField,\n HiddenField,\n PasswordField,\n RadioField,\n SelectField,\n StringField,\n TextAreaField,\n)\nfrom wtforms.fields.html5 import EmailField\nfrom wtforms.validators import InputRequired\n\nfrom CTFd.constants.themes import DEFAULT_THEME\nfrom CTFd.forms import BaseForm\nfrom CTFd.forms.fields import SubmitField\nfrom CTFd.utils.config import get_themes\n\n\nclass SetupForm(BaseForm):\n ctf_name = StringField(\n \"Event Name\", description=\"The name of your CTF event/workshop\"\n )\n ctf_description = TextAreaField(\n \"Event Description\", description=\"Description for the CTF\"\n )\n user_mode = RadioField(\n \"User Mode\",\n choices=[(\"teams\", \"Team Mode\"), (\"users\", \"User Mode\")],\n default=\"teams\",\n description=\"Controls whether users join together in teams to play (Team Mode) or play as themselves (User Mode)\",\n validators=[InputRequired()],\n )\n\n name = StringField(\n \"Admin Username\",\n description=\"Your username for the administration account\",\n validators=[InputRequired()],\n )\n email = EmailField(\n \"Admin Email\",\n description=\"Your email address for the administration account\",\n validators=[InputRequired()],\n )\n password = PasswordField(\n \"Admin Password\",\n description=\"Your password for the administration account\",\n validators=[InputRequired()],\n )\n\n ctf_logo = FileField(\n \"Logo\",\n description=\"Logo to use for the website instead of a CTF name. Used as the home page button.\",\n )\n ctf_banner = FileField(\"Banner\", description=\"Banner to use for the homepage.\")\n ctf_small_icon = FileField(\n \"Small Icon\",\n description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px.\",\n )\n ctf_theme = SelectField(\n \"Theme\",\n description=\"CTFd Theme to use\",\n choices=list(zip(get_themes(), get_themes())),\n default=DEFAULT_THEME,\n validators=[InputRequired()],\n )\n theme_color = HiddenField(\n \"Theme Color\",\n description=\"Color used by theme to control aesthetics. Requires theme support. Optional.\",\n )\n\n start = StringField(\n \"Start Time\", description=\"Time when your CTF is scheduled to start. Optional.\"\n )\n end = StringField(\n \"End Time\", description=\"Time when your CTF is scheduled to end. Optional.\"\n )\n submit = SubmitField(\"Finish\")\n", "path": "CTFd/forms/setup.py"}], "after_files": [{"content": "from wtforms import (\n FileField,\n HiddenField,\n PasswordField,\n RadioField,\n SelectField,\n StringField,\n TextAreaField,\n)\nfrom wtforms.fields.html5 import EmailField\nfrom wtforms.validators import InputRequired\n\nfrom CTFd.constants.themes import DEFAULT_THEME\nfrom CTFd.forms import BaseForm\nfrom CTFd.forms.fields import SubmitField\nfrom CTFd.utils.config import get_themes\n\n\nclass SetupForm(BaseForm):\n ctf_name = StringField(\n \"Event Name\", description=\"The name of your CTF event/workshop\"\n )\n ctf_description = TextAreaField(\n \"Event Description\", description=\"Description for the CTF\"\n )\n user_mode = RadioField(\n \"User Mode\",\n choices=[(\"teams\", \"Team Mode\"), (\"users\", \"User Mode\")],\n default=\"teams\",\n description=\"Controls whether users join together in teams to play (Team Mode) or play as themselves (User Mode)\",\n validators=[InputRequired()],\n )\n\n name = StringField(\n \"Admin Username\",\n description=\"Your username for the administration account\",\n validators=[InputRequired()],\n )\n email = EmailField(\n \"Admin Email\",\n description=\"Your email address for the administration account\",\n validators=[InputRequired()],\n )\n password = PasswordField(\n \"Admin Password\",\n description=\"Your password for the administration account\",\n validators=[InputRequired()],\n )\n\n ctf_logo = FileField(\n \"Logo\",\n description=\"Logo to use for the website instead of a CTF name. Used as the home page button. Optional.\",\n )\n ctf_banner = FileField(\n \"Banner\", description=\"Banner to use for the homepage. Optional.\"\n )\n ctf_small_icon = FileField(\n \"Small Icon\",\n description=\"favicon used in user's browsers. Only PNGs accepted. Must be 32x32px. Optional.\",\n )\n ctf_theme = SelectField(\n \"Theme\",\n description=\"CTFd Theme to use. Can be changed later.\",\n choices=list(zip(get_themes(), get_themes())),\n default=DEFAULT_THEME,\n validators=[InputRequired()],\n )\n theme_color = HiddenField(\n \"Theme Color\",\n description=\"Color used by theme to control aesthetics. Requires theme support. Optional.\",\n )\n\n start = StringField(\n \"Start Time\", description=\"Time when your CTF is scheduled to start. Optional.\"\n )\n end = StringField(\n \"End Time\", description=\"Time when your CTF is scheduled to end. Optional.\"\n )\n submit = SubmitField(\"Finish\")\n", "path": "CTFd/forms/setup.py"}]}
987
294
gh_patches_debug_28186
rasdani/github-patches
git_diff
bridgecrewio__checkov-93
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Dynamic blocks handling is partial **Describe the bug** An S3 bucket with a dynamic `logging` block is considered a violation, even if a value was set for the variable externally. **To Reproduce** Steps to reproduce the behavior: S3 configuration: ``` resource "aws_s3_bucket" "bridgecrew_cws_bucket" { count = var.existing_bucket_name == null ? 1 : 0 bucket = local.bucket_name acl = "private" versioning { enabled = true } lifecycle_rule { id = "Delete old log files" enabled = true noncurrent_version_expiration { days = var.log_file_expiration } expiration { days = var.log_file_expiration } } dynamic "logging" { for_each = var.logs_bucket_id != null ? [var.logs_bucket_id] : [] content { target_bucket = logging.value target_prefix = "/${local.bucket_name}" } } server_side_encryption_configuration { rule { apply_server_side_encryption_by_default { kms_master_key_id = local.kms_key sse_algorithm = "aws:kms" } } } tags = { Name = "BridgecrewCWSBucket" } } ``` **Expected behavior** The check should not fail **Desktop (please complete the following information):** - OS: mac OSX Catalina - Checkov Version 1.0.167 Docker command in README.md is wrong **Describe the bug** The docker run command in the readme is incorrect and does not work. It should be: docker run -v /user/tf:/tf bridgecrew/checkov -d /tf --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `checkov/terraform/parser.py` Content: ``` 1 import logging 2 import os 3 from os import path 4 5 import hcl2 6 7 8 class Parser: 9 logger = logging.getLogger(__name__) 10 11 def hcl2(self, directory, tf_definitions={}, parsing_errors={}): 12 modules_scan = [] 13 14 for file in os.listdir(directory): 15 if file.endswith(".tf"): 16 tf_file = os.path.join(directory, file) 17 if tf_file not in tf_definitions.keys(): 18 try: 19 with(open(tf_file, 'r')) as file: 20 file.seek(0) 21 dict = hcl2.load(file) 22 tf_defenition = dict 23 tf_definitions[tf_file] = tf_defenition 24 # TODO move from here 25 # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions) 26 27 for modules in dict.get("module", []): 28 for module in modules.values(): 29 relative_path = module['source'][0] 30 abs_path = os.path.join(directory, relative_path) 31 modules_scan.append(abs_path) 32 except Exception as e: 33 self.logger.debug('failed while parsing file %s' % tf_file, exc_info=e) 34 parsing_errors[tf_file] = e 35 for m in modules_scan: 36 if path.exists(m): 37 self.hcl2(directory=m, tf_definitions=tf_definitions) 38 39 def parse_file(self, file, tf_definitions={}, parsing_errors={}): 40 if file.endswith(".tf"): 41 try: 42 with(open(file, 'r')) as tf_file: 43 tf_file.seek(0) 44 dict = hcl2.load(tf_file) 45 tf_defenition = dict 46 tf_definitions[file] = tf_defenition 47 except Exception as e: 48 self.logger.debug('failed while parsing file %s' % file, exc_info=e) 49 parsing_errors[file] = e 50 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/checkov/terraform/parser.py b/checkov/terraform/parser.py --- a/checkov/terraform/parser.py +++ b/checkov/terraform/parser.py @@ -18,13 +18,18 @@ try: with(open(tf_file, 'r')) as file: file.seek(0) - dict = hcl2.load(file) - tf_defenition = dict - tf_definitions[tf_file] = tf_defenition + tf_definition = hcl2.load(file) + for resource_type in tf_definition.get('resource', []): + for resource in resource_type.values(): + for named_resource in resource.values(): + for dynamic_block in named_resource.get('dynamic', []): + for dynamic_field_name, dynamic_field_value in dynamic_block.items(): + named_resource[dynamic_field_name] = dynamic_field_value['for_each'] + tf_definitions[tf_file] = tf_definition # TODO move from here # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions) - for modules in dict.get("module", []): + for modules in tf_definition.get("module", []): for module in modules.values(): relative_path = module['source'][0] abs_path = os.path.join(directory, relative_path)
{"golden_diff": "diff --git a/checkov/terraform/parser.py b/checkov/terraform/parser.py\n--- a/checkov/terraform/parser.py\n+++ b/checkov/terraform/parser.py\n@@ -18,13 +18,18 @@\n try:\n with(open(tf_file, 'r')) as file:\n file.seek(0)\n- dict = hcl2.load(file)\n- tf_defenition = dict\n- tf_definitions[tf_file] = tf_defenition\n+ tf_definition = hcl2.load(file)\n+ for resource_type in tf_definition.get('resource', []):\n+ for resource in resource_type.values():\n+ for named_resource in resource.values():\n+ for dynamic_block in named_resource.get('dynamic', []):\n+ for dynamic_field_name, dynamic_field_value in dynamic_block.items():\n+ named_resource[dynamic_field_name] = dynamic_field_value['for_each']\n+ tf_definitions[tf_file] = tf_definition\n # TODO move from here\n # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)\n \n- for modules in dict.get(\"module\", []):\n+ for modules in tf_definition.get(\"module\", []):\n for module in modules.values():\n relative_path = module['source'][0]\n abs_path = os.path.join(directory, relative_path)\n", "issue": "Dynamic blocks handling is partial\n**Describe the bug**\r\nAn S3 bucket with a dynamic `logging` block is considered a violation, even if a value was set for the variable externally.\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\nS3 configuration:\r\n```\r\nresource \"aws_s3_bucket\" \"bridgecrew_cws_bucket\" {\r\n count = var.existing_bucket_name == null ? 1 : 0\r\n\r\n bucket = local.bucket_name\r\n acl = \"private\"\r\n\r\n versioning {\r\n enabled = true\r\n }\r\n\r\n lifecycle_rule {\r\n id = \"Delete old log files\"\r\n enabled = true\r\n\r\n noncurrent_version_expiration {\r\n days = var.log_file_expiration\r\n }\r\n\r\n expiration {\r\n days = var.log_file_expiration\r\n }\r\n }\r\n\r\n dynamic \"logging\" {\r\n for_each = var.logs_bucket_id != null ? [var.logs_bucket_id] : []\r\n\r\n content {\r\n target_bucket = logging.value\r\n target_prefix = \"/${local.bucket_name}\"\r\n }\r\n }\r\n\r\n server_side_encryption_configuration {\r\n rule {\r\n apply_server_side_encryption_by_default {\r\n kms_master_key_id = local.kms_key\r\n sse_algorithm = \"aws:kms\"\r\n }\r\n }\r\n }\r\n\r\n tags = {\r\n Name = \"BridgecrewCWSBucket\"\r\n }\r\n}\r\n```\r\n\r\n**Expected behavior**\r\nThe check should not fail\r\n\r\n**Desktop (please complete the following information):**\r\n - OS: mac OSX Catalina\r\n - Checkov Version 1.0.167\r\n\r\n\nDocker command in README.md is wrong\n**Describe the bug**\r\nThe docker run command in the readme is incorrect and does not work. It should be: \r\ndocker run -v /user/tf:/tf bridgecrew/checkov -d /tf\r\n\r\n\n", "before_files": [{"content": "import logging\nimport os\nfrom os import path\n\nimport hcl2\n\n\nclass Parser:\n logger = logging.getLogger(__name__)\n\n def hcl2(self, directory, tf_definitions={}, parsing_errors={}):\n modules_scan = []\n\n for file in os.listdir(directory):\n if file.endswith(\".tf\"):\n tf_file = os.path.join(directory, file)\n if tf_file not in tf_definitions.keys():\n try:\n with(open(tf_file, 'r')) as file:\n file.seek(0)\n dict = hcl2.load(file)\n tf_defenition = dict\n tf_definitions[tf_file] = tf_defenition\n # TODO move from here\n # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)\n\n for modules in dict.get(\"module\", []):\n for module in modules.values():\n relative_path = module['source'][0]\n abs_path = os.path.join(directory, relative_path)\n modules_scan.append(abs_path)\n except Exception as e:\n self.logger.debug('failed while parsing file %s' % tf_file, exc_info=e)\n parsing_errors[tf_file] = e\n for m in modules_scan:\n if path.exists(m):\n self.hcl2(directory=m, tf_definitions=tf_definitions)\n\n def parse_file(self, file, tf_definitions={}, parsing_errors={}):\n if file.endswith(\".tf\"):\n try:\n with(open(file, 'r')) as tf_file:\n tf_file.seek(0)\n dict = hcl2.load(tf_file)\n tf_defenition = dict\n tf_definitions[file] = tf_defenition\n except Exception as e:\n self.logger.debug('failed while parsing file %s' % file, exc_info=e)\n parsing_errors[file] = e\n", "path": "checkov/terraform/parser.py"}], "after_files": [{"content": "import logging\nimport os\nfrom os import path\n\nimport hcl2\n\n\nclass Parser:\n logger = logging.getLogger(__name__)\n\n def hcl2(self, directory, tf_definitions={}, parsing_errors={}):\n modules_scan = []\n\n for file in os.listdir(directory):\n if file.endswith(\".tf\"):\n tf_file = os.path.join(directory, file)\n if tf_file not in tf_definitions.keys():\n try:\n with(open(tf_file, 'r')) as file:\n file.seek(0)\n tf_definition = hcl2.load(file)\n for resource_type in tf_definition.get('resource', []):\n for resource in resource_type.values():\n for named_resource in resource.values():\n for dynamic_block in named_resource.get('dynamic', []):\n for dynamic_field_name, dynamic_field_value in dynamic_block.items():\n named_resource[dynamic_field_name] = dynamic_field_value['for_each']\n tf_definitions[tf_file] = tf_definition\n # TODO move from here\n # tf_defenitions = context_registry.enrich_context(tf_file,tf_defenitions)\n\n for modules in tf_definition.get(\"module\", []):\n for module in modules.values():\n relative_path = module['source'][0]\n abs_path = os.path.join(directory, relative_path)\n modules_scan.append(abs_path)\n except Exception as e:\n self.logger.debug('failed while parsing file %s' % tf_file, exc_info=e)\n parsing_errors[tf_file] = e\n for m in modules_scan:\n if path.exists(m):\n self.hcl2(directory=m, tf_definitions=tf_definitions)\n\n def parse_file(self, file, tf_definitions={}, parsing_errors={}):\n if file.endswith(\".tf\"):\n try:\n with(open(file, 'r')) as tf_file:\n tf_file.seek(0)\n dict = hcl2.load(tf_file)\n tf_defenition = dict\n tf_definitions[file] = tf_defenition\n except Exception as e:\n self.logger.debug('failed while parsing file %s' % file, exc_info=e)\n parsing_errors[file] = e\n", "path": "checkov/terraform/parser.py"}]}
1,114
287
gh_patches_debug_15817
rasdani/github-patches
git_diff
OpenMined__PySyft-3588
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Disable manual register() ids in syft.grid.register() **Is your feature request related to a problem? Please describe.** It is a security risk for people to specify their own IDs given that GridNetwork will let you connect to anyone whose id you already know. Thus, we should disable the ability for people to specify their own ID and replace it with a randomly generated hash. This hash should be printed with clear instructions ("Send this to whomever you'd like to connect with") when register() is called. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `syft/grid/__init__.py` Content: ``` 1 from .network import Network 2 3 DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com" 4 5 6 def register(node_id: str, **kwargs): 7 """ Add this process as a new peer registering it in the grid network. 8 9 Args: 10 node_id: Id used to identify this node. 11 Returns: 12 peer: Peer Network instance. 13 """ 14 if not kwargs: 15 args = args = {"max_size": None, "timeout": 444, "url": DEFAULT_NETWORK_URL} 16 else: 17 args = kwargs 18 19 peer = Network(node_id, **args) 20 peer.start() 21 return peer 22 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py --- a/syft/grid/__init__.py +++ b/syft/grid/__init__.py @@ -1,13 +1,12 @@ from .network import Network +import uuid DEFAULT_NETWORK_URL = "ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com" -def register(node_id: str, **kwargs): +def register(**kwargs): """ Add this process as a new peer registering it in the grid network. - Args: - node_id: Id used to identify this node. Returns: peer: Peer Network instance. """ @@ -16,6 +15,8 @@ else: args = kwargs - peer = Network(node_id, **args) + peer_id = str(uuid.uuid4()) + peer = Network(peer_id, **args) peer.start() + return peer
{"golden_diff": "diff --git a/syft/grid/__init__.py b/syft/grid/__init__.py\n--- a/syft/grid/__init__.py\n+++ b/syft/grid/__init__.py\n@@ -1,13 +1,12 @@\n from .network import Network\n+import uuid\n \n DEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n \n \n-def register(node_id: str, **kwargs):\n+def register(**kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n- Args:\n- node_id: Id used to identify this node.\n Returns:\n peer: Peer Network instance.\n \"\"\"\n@@ -16,6 +15,8 @@\n else:\n args = kwargs\n \n- peer = Network(node_id, **args)\n+ peer_id = str(uuid.uuid4())\n+ peer = Network(peer_id, **args)\n peer.start()\n+\n return peer\n", "issue": "Disable manual register() ids in syft.grid.register()\n**Is your feature request related to a problem? Please describe.**\r\nIt is a security risk for people to specify their own IDs given that GridNetwork will let you connect to anyone whose id you already know. Thus, we should disable the ability for people to specify their own ID and replace it with a randomly generated hash.\r\n\r\nThis hash should be printed with clear instructions (\"Send this to whomever you'd like to connect with\") when register() is called.\n", "before_files": [{"content": "from .network import Network\n\nDEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n\n\ndef register(node_id: str, **kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n Args:\n node_id: Id used to identify this node.\n Returns:\n peer: Peer Network instance.\n \"\"\"\n if not kwargs:\n args = args = {\"max_size\": None, \"timeout\": 444, \"url\": DEFAULT_NETWORK_URL}\n else:\n args = kwargs\n\n peer = Network(node_id, **args)\n peer.start()\n return peer\n", "path": "syft/grid/__init__.py"}], "after_files": [{"content": "from .network import Network\nimport uuid\n\nDEFAULT_NETWORK_URL = \"ws://ec2-13-59-45-128.us-east-2.compute.amazonaws.com\"\n\n\ndef register(**kwargs):\n \"\"\" Add this process as a new peer registering it in the grid network.\n \n Returns:\n peer: Peer Network instance.\n \"\"\"\n if not kwargs:\n args = args = {\"max_size\": None, \"timeout\": 444, \"url\": DEFAULT_NETWORK_URL}\n else:\n args = kwargs\n\n peer_id = str(uuid.uuid4())\n peer = Network(peer_id, **args)\n peer.start()\n\n return peer\n", "path": "syft/grid/__init__.py"}]}
550
219
gh_patches_debug_42215
rasdani/github-patches
git_diff
sktime__sktime-1442
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ARIMA and AutoARIMA should return pandas.NA when value cannot be predicted, e.g., first index in-sample for start=0, d=1 Update 2021-09-15 - re-opened since a recurring issue. Not a bug, but an interface convention - currently ARIMA, AutoARIMA and other learners fail when asked for a prediction that is undefined, e.g., difference parameter d=1 and first in-sample index (start=0). It would be more sensible if `pandas.NA` is returned instead for such indices, perhaps a warning should be raised too that `pandas.NA` are produced. In first instance, we should update ARIMA and AutoARIMA with this convention. --- **Describe the bug** When using `Imputer` with `forecaster = AutoARIMA()` the error "ValueError: In-sample predictions undefined for start=0 when d=1" is raised. ``` --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-11-2d05f4822fd6> in <module>() 4 5 transformer = Imputer(method="forecaster", forecaster=AutoARIMA()).fit(Z=df.value) ----> 6 y_hat = transformer.transform(Z=df.value) /usr/local/lib/python3.7/dist-packages/sktime/transformations/series/impute.py in transform(self, Z, X) 102 z_pred = forecaster.fit( 103 z.fillna(method="ffill").fillna(method="backfill") --> 104 ).predict(fh=fh_ins) 105 # fill with trend values 106 z = z.fillna(value=z_pred) /usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/_sktime.py in predict(self, fh, X, return_pred_int, alpha) 226 self.check_is_fitted() 227 self._set_fh(fh) --> 228 return self._predict(self.fh, X, return_pred_int=return_pred_int, alpha=alpha) 229 230 def compute_pred_int(self, y_pred, alpha=DEFAULT_ALPHA): /usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/adapters/_pmdarima.py in _predict(self, fh, X, return_pred_int, alpha) 58 # all values are in-sample 59 elif fh.is_all_in_sample(self.cutoff): ---> 60 return self._predict_in_sample(fh_ins, **kwargs) 61 62 # both in-sample and out-of-sample values /usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/adapters/_pmdarima.py in _predict_in_sample(self, fh, X, return_pred_int, alpha) 80 X=X, 81 return_conf_int=return_pred_int, ---> 82 alpha=alpha, 83 ) 84 /usr/local/lib/python3.7/dist-packages/pmdarima/utils/metaestimators.py in <lambda>(*args, **kwargs) 51 52 # lambda, but not partial, allows help() to work with update_wrapper ---> 53 out = (lambda *args, **kwargs: self.fn(obj, *args, **kwargs)) 54 # update the docstring of the returned function 55 update_wrapper(out, self.fn) /usr/local/lib/python3.7/dist-packages/pmdarima/arima/auto.py in predict_in_sample(self, X, start, end, dynamic, return_conf_int, alpha, typ, **kwargs) 242 return_conf_int=return_conf_int, 243 alpha=alpha, --> 244 typ=typ, 245 ) 246 /usr/local/lib/python3.7/dist-packages/pmdarima/arima/arima.py in predict_in_sample(self, X, start, end, dynamic, return_conf_int, alpha, **kwargs) 584 if start is not None and start < d: 585 raise ValueError("In-sample predictions undefined for start={0} " --> 586 "when d={1}".format(start, d)) 587 588 # if we fit with exog, make sure one was passed: ValueError: In-sample predictions undefined for start=0 when d=1 ``` **To Reproduce** Data: [df.csv](https://github.com/alan-turing-institute/sktime/files/6426660/df.csv) ```python import pandas as pd from sktime.transformations.series.impute import Imputer from sktime.forecasting.arima import AutoARIMA df = pd.read_csv('df.csv') df['timestamp'] = pd.DatetimeIndex(df['timestamp']).to_period('D') df = df.set_index('timestamp') transformer = Imputer(method="forecaster", forecaster=AutoARIMA()).fit(Z=df.value) y_hat = transformer.transform(Z=df.value) ``` **Expected behavior** `transformer.transform(Z)` should return `Z` with NAn values replaced by AutoArima predictions. **Additional context** I think the problem is in `/usr/local/lib/python3.7/dist-packages/sktime/transformations/series/impute.py` in `transform(self, Z, X)`, line 100, because all the points are being passed to `fh_ins` instead of just the points with NaN. ```python 100 fh_ins = -np.arange(len(z)) 101 # fill NaN before fitting with ffill and backfill (heuristic) 102 z_pred = forecaster.fit( 103 z.fillna(method="ffill").fillna(method="backfill") 104 ).predict(fh=fh_ins) 105 # fill with trend values 106 z = z.fillna(value=z_pred) ``` **Versions** <details> ``` System: python: 3.7.10 (default, Feb 20 2021, 21:17:23) [GCC 7.5.0] executable: /usr/bin/python3 machine: Linux-4.19.112+-x86_64-with-Ubuntu-18.04-bionic Python dependencies: pip: 19.3.1 setuptools: 56.0.0 sklearn: 0.24.2 sktime: 0.6.0 statsmodels: 0.12.2 numpy: 1.19.5 scipy: 1.4.1 Cython: 0.29.22 pandas: 1.1.5 matplotlib: 3.2.2 joblib: 1.0.1 numba: 0.51.2 pmdarima: 1.8.2 tsfresh: None ``` </details> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sktime/forecasting/base/adapters/_pmdarima.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 # !/usr/bin/env python3 -u 3 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file) 4 """Implements adapter for pmdarima forecasters to be used in sktime framework.""" 5 6 __author__ = ["Markus Löning", "Hongyi Yang"] 7 __all__ = ["_PmdArimaAdapter"] 8 9 import pandas as pd 10 11 from sktime.forecasting.base._base import DEFAULT_ALPHA 12 from sktime.forecasting.base import BaseForecaster 13 14 15 class _PmdArimaAdapter(BaseForecaster): 16 """Base class for interfacing pmdarima.""" 17 18 _tags = { 19 "ignores-exogeneous-X": True, 20 "capability:pred_int": True, 21 "requires-fh-in-fit": False, 22 "handles-missing-data": False, 23 } 24 25 def __init__(self): 26 self._forecaster = None 27 super(_PmdArimaAdapter, self).__init__() 28 29 def _instantiate_model(self): 30 raise NotImplementedError("abstract method") 31 32 def _fit(self, y, X=None, fh=None, **fit_params): 33 """Fit to training data. 34 35 Parameters 36 ---------- 37 y : pd.Series 38 Target time series to which to fit the forecaster. 39 fh : int, list, np.array or ForecastingHorizon, optional (default=None) 40 The forecasters horizon with the steps ahead to to predict. 41 X : pd.DataFrame, optional (default=None) 42 Exogenous variables are ignored 43 44 Returns 45 ------- 46 self : returns an instance of self. 47 """ 48 self._forecaster = self._instantiate_model() 49 self._forecaster.fit(y, X=X, **fit_params) 50 return self 51 52 def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA): 53 # distinguish between in-sample and out-of-sample prediction 54 fh_oos = fh.to_out_of_sample(self.cutoff) 55 fh_ins = fh.to_in_sample(self.cutoff) 56 57 kwargs = {"X": X, "return_pred_int": return_pred_int, "alpha": alpha} 58 59 # all values are out-of-sample 60 if fh.is_all_out_of_sample(self.cutoff): 61 return self._predict_fixed_cutoff(fh_oos, **kwargs) 62 63 # all values are in-sample 64 elif fh.is_all_in_sample(self.cutoff): 65 return self._predict_in_sample(fh_ins, **kwargs) 66 67 # both in-sample and out-of-sample values 68 else: 69 y_ins = self._predict_in_sample(fh_ins, **kwargs) 70 y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs) 71 return y_ins.append(y_oos) 72 73 def _predict_in_sample( 74 self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA 75 ): 76 if isinstance(alpha, (list, tuple)): 77 raise NotImplementedError("multiple `alpha` values are not yet supported") 78 79 # for in-sample predictions, pmdarima requires zero-based 80 # integer indicies 81 start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] 82 result = self._forecaster.predict_in_sample( 83 start=start, 84 end=end, 85 X=X, 86 return_conf_int=return_pred_int, 87 alpha=alpha, 88 ) 89 90 fh_abs = fh.to_absolute(self.cutoff) 91 fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False) 92 if return_pred_int: 93 # unpack and format results 94 y_pred, pred_int = result 95 y_pred = pd.Series(y_pred[fh_idx], index=fh_abs) 96 pred_int = pd.DataFrame( 97 pred_int[fh_idx, :], index=fh_abs, columns=["lower", "upper"] 98 ) 99 return y_pred, pred_int 100 101 else: 102 return pd.Series(result[fh_idx], index=fh_abs) 103 104 def _predict_fixed_cutoff( 105 self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA 106 ): 107 # make prediction 108 n_periods = int(fh.to_relative(self.cutoff)[-1]) 109 result = self._forecaster.predict( 110 n_periods=n_periods, 111 X=X, 112 return_conf_int=return_pred_int, 113 alpha=alpha, 114 ) 115 116 fh_abs = fh.to_absolute(self.cutoff) 117 fh_idx = fh.to_indexer(self.cutoff) 118 if return_pred_int: 119 y_pred, pred_int = result 120 y_pred = pd.Series(y_pred[fh_idx], index=fh_abs) 121 pred_int = pd.DataFrame( 122 pred_int[fh_idx, :], index=fh_abs, columns=["lower", "upper"] 123 ) 124 return y_pred, pred_int 125 else: 126 return pd.Series(result[fh_idx], index=fh_abs) 127 128 def get_fitted_params(self): 129 """Get fitted parameters. 130 131 Returns 132 ------- 133 fitted_params : dict 134 """ 135 self.check_is_fitted() 136 names = self._get_fitted_param_names() 137 params = self._get_fitted_params() 138 fitted_params = {name: param for name, param in zip(names, params)} 139 140 if hasattr(self._forecaster, "model_"): # AutoARIMA 141 res = self._forecaster.model_.arima_res_ 142 elif hasattr(self._forecaster, "arima_res_"): # ARIMA 143 res = self._forecaster.arima_res_ 144 else: 145 res = None 146 147 for name in ["aic", "aicc", "bic", "hqic"]: 148 fitted_params[name] = getattr(res, name, None) 149 150 return fitted_params 151 152 def _get_fitted_params(self): 153 # Return parameter values under `arima_res_` 154 if hasattr(self._forecaster, "model_"): # AutoARIMA 155 return self._forecaster.model_.arima_res_._results.params 156 elif hasattr(self._forecaster, "arima_res_"): # ARIMA 157 return self._forecaster.arima_res_._results.params 158 else: 159 raise NotImplementedError() 160 161 def _get_fitted_param_names(self): 162 # Return parameter names under `arima_res_` 163 if hasattr(self._forecaster, "model_"): # AutoARIMA 164 return self._forecaster.model_.arima_res_._results.param_names 165 elif hasattr(self._forecaster, "arima_res_"): # ARIMA 166 return self._forecaster.arima_res_._results.param_names 167 else: 168 raise NotImplementedError() 169 170 def summary(self): 171 """Summary of the fitted model.""" 172 return self._forecaster.summary() 173 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sktime/forecasting/base/adapters/_pmdarima.py b/sktime/forecasting/base/adapters/_pmdarima.py --- a/sktime/forecasting/base/adapters/_pmdarima.py +++ b/sktime/forecasting/base/adapters/_pmdarima.py @@ -7,7 +7,6 @@ __all__ = ["_PmdArimaAdapter"] import pandas as pd - from sktime.forecasting.base._base import DEFAULT_ALPHA from sktime.forecasting.base import BaseForecaster @@ -66,9 +65,18 @@ # both in-sample and out-of-sample values else: - y_ins = self._predict_in_sample(fh_ins, **kwargs) - y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs) - return y_ins.append(y_oos) + if return_pred_int: + y_ins_pred, y_ins_pred_int = self._predict_in_sample(fh_ins, **kwargs) + y_oos_pred, y_oos_pred_int = self._predict_fixed_cutoff( + fh_oos, **kwargs + ) + return y_ins_pred.append(y_oos_pred), y_ins_pred_int.append( + y_oos_pred_int + ) + else: + y_ins = self._predict_in_sample(fh_ins, **kwargs) + y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs) + return y_ins.append(y_oos) def _predict_in_sample( self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA @@ -76,9 +84,34 @@ if isinstance(alpha, (list, tuple)): raise NotImplementedError("multiple `alpha` values are not yet supported") - # for in-sample predictions, pmdarima requires zero-based - # integer indicies + if hasattr(self, "order"): + diff_order = self.order[1] + else: + diff_order = self._forecaster.model_.order[1] + + # Initialize return objects + fh_abs = fh.to_absolute(self.cutoff).to_numpy() + fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False) + y_pred = pd.Series(index=fh_abs) + pred_int = pd.DataFrame(index=fh_abs, columns=["lower", "upper"]) + + # for in-sample predictions, pmdarima requires zero-based integer indicies start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]] + if start < 0: + # Can't forecasts earlier to train starting point + raise ValueError("Can't make predictions earlier to train starting point") + elif start < diff_order: + # Can't forecasts earlier to arima's differencing order + # But we return NaN for these supposedly forecastable points + start = diff_order + if end < start: + # since we might have forced `start` to surpass `end` + end = diff_order + # get rid of unforcastable points + fh_abs = fh_abs[fh_idx >= diff_order] + # reindex accordingly + fh_idx = fh_idx[fh_idx >= diff_order] - diff_order + result = self._forecaster.predict_in_sample( start=start, end=end, @@ -87,19 +120,16 @@ alpha=alpha, ) - fh_abs = fh.to_absolute(self.cutoff) - fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False) if return_pred_int: - # unpack and format results - y_pred, pred_int = result - y_pred = pd.Series(y_pred[fh_idx], index=fh_abs) - pred_int = pd.DataFrame( - pred_int[fh_idx, :], index=fh_abs, columns=["lower", "upper"] + # unpack results + y_pred.loc[fh_abs], pred_int.loc[fh_abs] = ( + result[0][fh_idx], + result[1][fh_idx, :], ) return y_pred, pred_int - else: - return pd.Series(result[fh_idx], index=fh_abs) + y_pred.loc[fh_abs] = result[fh_idx] + return y_pred def _predict_fixed_cutoff( self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA
{"golden_diff": "diff --git a/sktime/forecasting/base/adapters/_pmdarima.py b/sktime/forecasting/base/adapters/_pmdarima.py\n--- a/sktime/forecasting/base/adapters/_pmdarima.py\n+++ b/sktime/forecasting/base/adapters/_pmdarima.py\n@@ -7,7 +7,6 @@\n __all__ = [\"_PmdArimaAdapter\"]\n \n import pandas as pd\n-\n from sktime.forecasting.base._base import DEFAULT_ALPHA\n from sktime.forecasting.base import BaseForecaster\n \n@@ -66,9 +65,18 @@\n \n # both in-sample and out-of-sample values\n else:\n- y_ins = self._predict_in_sample(fh_ins, **kwargs)\n- y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)\n- return y_ins.append(y_oos)\n+ if return_pred_int:\n+ y_ins_pred, y_ins_pred_int = self._predict_in_sample(fh_ins, **kwargs)\n+ y_oos_pred, y_oos_pred_int = self._predict_fixed_cutoff(\n+ fh_oos, **kwargs\n+ )\n+ return y_ins_pred.append(y_oos_pred), y_ins_pred_int.append(\n+ y_oos_pred_int\n+ )\n+ else:\n+ y_ins = self._predict_in_sample(fh_ins, **kwargs)\n+ y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)\n+ return y_ins.append(y_oos)\n \n def _predict_in_sample(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n@@ -76,9 +84,34 @@\n if isinstance(alpha, (list, tuple)):\n raise NotImplementedError(\"multiple `alpha` values are not yet supported\")\n \n- # for in-sample predictions, pmdarima requires zero-based\n- # integer indicies\n+ if hasattr(self, \"order\"):\n+ diff_order = self.order[1]\n+ else:\n+ diff_order = self._forecaster.model_.order[1]\n+\n+ # Initialize return objects\n+ fh_abs = fh.to_absolute(self.cutoff).to_numpy()\n+ fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)\n+ y_pred = pd.Series(index=fh_abs)\n+ pred_int = pd.DataFrame(index=fh_abs, columns=[\"lower\", \"upper\"])\n+\n+ # for in-sample predictions, pmdarima requires zero-based integer indicies\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n+ if start < 0:\n+ # Can't forecasts earlier to train starting point\n+ raise ValueError(\"Can't make predictions earlier to train starting point\")\n+ elif start < diff_order:\n+ # Can't forecasts earlier to arima's differencing order\n+ # But we return NaN for these supposedly forecastable points\n+ start = diff_order\n+ if end < start:\n+ # since we might have forced `start` to surpass `end`\n+ end = diff_order\n+ # get rid of unforcastable points\n+ fh_abs = fh_abs[fh_idx >= diff_order]\n+ # reindex accordingly\n+ fh_idx = fh_idx[fh_idx >= diff_order] - diff_order\n+\n result = self._forecaster.predict_in_sample(\n start=start,\n end=end,\n@@ -87,19 +120,16 @@\n alpha=alpha,\n )\n \n- fh_abs = fh.to_absolute(self.cutoff)\n- fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)\n if return_pred_int:\n- # unpack and format results\n- y_pred, pred_int = result\n- y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)\n- pred_int = pd.DataFrame(\n- pred_int[fh_idx, :], index=fh_abs, columns=[\"lower\", \"upper\"]\n+ # unpack results\n+ y_pred.loc[fh_abs], pred_int.loc[fh_abs] = (\n+ result[0][fh_idx],\n+ result[1][fh_idx, :],\n )\n return y_pred, pred_int\n-\n else:\n- return pd.Series(result[fh_idx], index=fh_abs)\n+ y_pred.loc[fh_abs] = result[fh_idx]\n+ return y_pred\n \n def _predict_fixed_cutoff(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n", "issue": "ARIMA and AutoARIMA should return pandas.NA when value cannot be predicted, e.g., first index in-sample for start=0, d=1\nUpdate 2021-09-15 - re-opened since a recurring issue.\r\n\r\nNot a bug, but an interface convention - currently ARIMA, AutoARIMA and other learners fail when asked for a prediction that is undefined, e.g., difference parameter d=1 and first in-sample index (start=0).\r\n\r\nIt would be more sensible if `pandas.NA` is returned instead for such indices, perhaps a warning should be raised too that `pandas.NA` are produced.\r\n\r\nIn first instance, we should update ARIMA and AutoARIMA with this convention.\r\n\r\n---\r\n\r\n**Describe the bug**\r\n\r\nWhen using `Imputer` with `forecaster = AutoARIMA()` the error \"ValueError: In-sample predictions undefined for start=0 when d=1\" is raised.\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nValueError Traceback (most recent call last)\r\n<ipython-input-11-2d05f4822fd6> in <module>()\r\n 4 \r\n 5 transformer = Imputer(method=\"forecaster\", forecaster=AutoARIMA()).fit(Z=df.value)\r\n----> 6 y_hat = transformer.transform(Z=df.value)\r\n\r\n/usr/local/lib/python3.7/dist-packages/sktime/transformations/series/impute.py in transform(self, Z, X)\r\n 102 z_pred = forecaster.fit(\r\n 103 z.fillna(method=\"ffill\").fillna(method=\"backfill\")\r\n--> 104 ).predict(fh=fh_ins)\r\n 105 # fill with trend values\r\n 106 z = z.fillna(value=z_pred)\r\n\r\n/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/_sktime.py in predict(self, fh, X, return_pred_int, alpha)\r\n 226 self.check_is_fitted()\r\n 227 self._set_fh(fh)\r\n--> 228 return self._predict(self.fh, X, return_pred_int=return_pred_int, alpha=alpha)\r\n 229 \r\n 230 def compute_pred_int(self, y_pred, alpha=DEFAULT_ALPHA):\r\n\r\n/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/adapters/_pmdarima.py in _predict(self, fh, X, return_pred_int, alpha)\r\n 58 # all values are in-sample\r\n 59 elif fh.is_all_in_sample(self.cutoff):\r\n---> 60 return self._predict_in_sample(fh_ins, **kwargs)\r\n 61 \r\n 62 # both in-sample and out-of-sample values\r\n\r\n/usr/local/lib/python3.7/dist-packages/sktime/forecasting/base/adapters/_pmdarima.py in _predict_in_sample(self, fh, X, return_pred_int, alpha)\r\n 80 X=X,\r\n 81 return_conf_int=return_pred_int,\r\n---> 82 alpha=alpha,\r\n 83 )\r\n 84 \r\n\r\n/usr/local/lib/python3.7/dist-packages/pmdarima/utils/metaestimators.py in <lambda>(*args, **kwargs)\r\n 51 \r\n 52 # lambda, but not partial, allows help() to work with update_wrapper\r\n---> 53 out = (lambda *args, **kwargs: self.fn(obj, *args, **kwargs))\r\n 54 # update the docstring of the returned function\r\n 55 update_wrapper(out, self.fn)\r\n\r\n/usr/local/lib/python3.7/dist-packages/pmdarima/arima/auto.py in predict_in_sample(self, X, start, end, dynamic, return_conf_int, alpha, typ, **kwargs)\r\n 242 return_conf_int=return_conf_int,\r\n 243 alpha=alpha,\r\n--> 244 typ=typ,\r\n 245 )\r\n 246 \r\n\r\n/usr/local/lib/python3.7/dist-packages/pmdarima/arima/arima.py in predict_in_sample(self, X, start, end, dynamic, return_conf_int, alpha, **kwargs)\r\n 584 if start is not None and start < d:\r\n 585 raise ValueError(\"In-sample predictions undefined for start={0} \"\r\n--> 586 \"when d={1}\".format(start, d))\r\n 587 \r\n 588 # if we fit with exog, make sure one was passed:\r\n\r\nValueError: In-sample predictions undefined for start=0 when d=1\r\n```\r\n\r\n**To Reproduce**\r\n\r\nData:\r\n[df.csv](https://github.com/alan-turing-institute/sktime/files/6426660/df.csv) \r\n\r\n```python\r\nimport pandas as pd\r\nfrom sktime.transformations.series.impute import Imputer\r\nfrom sktime.forecasting.arima import AutoARIMA\r\n\r\ndf = pd.read_csv('df.csv') \r\ndf['timestamp'] = pd.DatetimeIndex(df['timestamp']).to_period('D')\r\ndf = df.set_index('timestamp')\r\n\r\ntransformer = Imputer(method=\"forecaster\", forecaster=AutoARIMA()).fit(Z=df.value)\r\ny_hat = transformer.transform(Z=df.value)\r\n```\r\n\r\n**Expected behavior**\r\n\r\n`transformer.transform(Z)` should return `Z` with NAn values replaced by AutoArima predictions.\r\n\r\n**Additional context**\r\n\r\nI think the problem is in `/usr/local/lib/python3.7/dist-packages/sktime/transformations/series/impute.py` in `transform(self, Z, X)`, line 100, because all the points are being passed to `fh_ins` instead of just the points with NaN.\r\n\r\n```python\r\n100 fh_ins = -np.arange(len(z))\r\n101 # fill NaN before fitting with ffill and backfill (heuristic)\r\n102 z_pred = forecaster.fit(\r\n103 z.fillna(method=\"ffill\").fillna(method=\"backfill\")\r\n104 ).predict(fh=fh_ins)\r\n105 # fill with trend values\r\n106 z = z.fillna(value=z_pred)\r\n\r\n```\r\n\r\n**Versions**\r\n<details>\r\n\r\n```\r\nSystem:\r\n python: 3.7.10 (default, Feb 20 2021, 21:17:23) [GCC 7.5.0]\r\nexecutable: /usr/bin/python3\r\n machine: Linux-4.19.112+-x86_64-with-Ubuntu-18.04-bionic\r\n\r\nPython dependencies:\r\n pip: 19.3.1\r\n setuptools: 56.0.0\r\n sklearn: 0.24.2\r\n sktime: 0.6.0\r\n statsmodels: 0.12.2\r\n numpy: 1.19.5\r\n scipy: 1.4.1\r\n Cython: 0.29.22\r\n pandas: 1.1.5\r\n matplotlib: 3.2.2\r\n joblib: 1.0.1\r\n numba: 0.51.2\r\n pmdarima: 1.8.2\r\n tsfresh: None\r\n```\r\n\r\n</details>\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for pmdarima forecasters to be used in sktime framework.\"\"\"\n\n__author__ = [\"Markus L\u00f6ning\", \"Hongyi Yang\"]\n__all__ = [\"_PmdArimaAdapter\"]\n\nimport pandas as pd\n\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base import BaseForecaster\n\n\nclass _PmdArimaAdapter(BaseForecaster):\n \"\"\"Base class for interfacing pmdarima.\"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"capability:pred_int\": True,\n \"requires-fh-in-fit\": False,\n \"handles-missing-data\": False,\n }\n\n def __init__(self):\n self._forecaster = None\n super(_PmdArimaAdapter, self).__init__()\n\n def _instantiate_model(self):\n raise NotImplementedError(\"abstract method\")\n\n def _fit(self, y, X=None, fh=None, **fit_params):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list, np.array or ForecastingHorizon, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._forecaster = self._instantiate_model()\n self._forecaster.fit(y, X=X, **fit_params)\n return self\n\n def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):\n # distinguish between in-sample and out-of-sample prediction\n fh_oos = fh.to_out_of_sample(self.cutoff)\n fh_ins = fh.to_in_sample(self.cutoff)\n\n kwargs = {\"X\": X, \"return_pred_int\": return_pred_int, \"alpha\": alpha}\n\n # all values are out-of-sample\n if fh.is_all_out_of_sample(self.cutoff):\n return self._predict_fixed_cutoff(fh_oos, **kwargs)\n\n # all values are in-sample\n elif fh.is_all_in_sample(self.cutoff):\n return self._predict_in_sample(fh_ins, **kwargs)\n\n # both in-sample and out-of-sample values\n else:\n y_ins = self._predict_in_sample(fh_ins, **kwargs)\n y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)\n return y_ins.append(y_oos)\n\n def _predict_in_sample(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n ):\n if isinstance(alpha, (list, tuple)):\n raise NotImplementedError(\"multiple `alpha` values are not yet supported\")\n\n # for in-sample predictions, pmdarima requires zero-based\n # integer indicies\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n result = self._forecaster.predict_in_sample(\n start=start,\n end=end,\n X=X,\n return_conf_int=return_pred_int,\n alpha=alpha,\n )\n\n fh_abs = fh.to_absolute(self.cutoff)\n fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)\n if return_pred_int:\n # unpack and format results\n y_pred, pred_int = result\n y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)\n pred_int = pd.DataFrame(\n pred_int[fh_idx, :], index=fh_abs, columns=[\"lower\", \"upper\"]\n )\n return y_pred, pred_int\n\n else:\n return pd.Series(result[fh_idx], index=fh_abs)\n\n def _predict_fixed_cutoff(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n ):\n # make prediction\n n_periods = int(fh.to_relative(self.cutoff)[-1])\n result = self._forecaster.predict(\n n_periods=n_periods,\n X=X,\n return_conf_int=return_pred_int,\n alpha=alpha,\n )\n\n fh_abs = fh.to_absolute(self.cutoff)\n fh_idx = fh.to_indexer(self.cutoff)\n if return_pred_int:\n y_pred, pred_int = result\n y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)\n pred_int = pd.DataFrame(\n pred_int[fh_idx, :], index=fh_abs, columns=[\"lower\", \"upper\"]\n )\n return y_pred, pred_int\n else:\n return pd.Series(result[fh_idx], index=fh_abs)\n\n def get_fitted_params(self):\n \"\"\"Get fitted parameters.\n\n Returns\n -------\n fitted_params : dict\n \"\"\"\n self.check_is_fitted()\n names = self._get_fitted_param_names()\n params = self._get_fitted_params()\n fitted_params = {name: param for name, param in zip(names, params)}\n\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n res = self._forecaster.model_.arima_res_\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n res = self._forecaster.arima_res_\n else:\n res = None\n\n for name in [\"aic\", \"aicc\", \"bic\", \"hqic\"]:\n fitted_params[name] = getattr(res, name, None)\n\n return fitted_params\n\n def _get_fitted_params(self):\n # Return parameter values under `arima_res_`\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n return self._forecaster.model_.arima_res_._results.params\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n return self._forecaster.arima_res_._results.params\n else:\n raise NotImplementedError()\n\n def _get_fitted_param_names(self):\n # Return parameter names under `arima_res_`\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n return self._forecaster.model_.arima_res_._results.param_names\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n return self._forecaster.arima_res_._results.param_names\n else:\n raise NotImplementedError()\n\n def summary(self):\n \"\"\"Summary of the fitted model.\"\"\"\n return self._forecaster.summary()\n", "path": "sktime/forecasting/base/adapters/_pmdarima.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for pmdarima forecasters to be used in sktime framework.\"\"\"\n\n__author__ = [\"Markus L\u00f6ning\", \"Hongyi Yang\"]\n__all__ = [\"_PmdArimaAdapter\"]\n\nimport pandas as pd\nfrom sktime.forecasting.base._base import DEFAULT_ALPHA\nfrom sktime.forecasting.base import BaseForecaster\n\n\nclass _PmdArimaAdapter(BaseForecaster):\n \"\"\"Base class for interfacing pmdarima.\"\"\"\n\n _tags = {\n \"ignores-exogeneous-X\": True,\n \"capability:pred_int\": True,\n \"requires-fh-in-fit\": False,\n \"handles-missing-data\": False,\n }\n\n def __init__(self):\n self._forecaster = None\n super(_PmdArimaAdapter, self).__init__()\n\n def _instantiate_model(self):\n raise NotImplementedError(\"abstract method\")\n\n def _fit(self, y, X=None, fh=None, **fit_params):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list, np.array or ForecastingHorizon, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n self._forecaster = self._instantiate_model()\n self._forecaster.fit(y, X=X, **fit_params)\n return self\n\n def _predict(self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA):\n # distinguish between in-sample and out-of-sample prediction\n fh_oos = fh.to_out_of_sample(self.cutoff)\n fh_ins = fh.to_in_sample(self.cutoff)\n\n kwargs = {\"X\": X, \"return_pred_int\": return_pred_int, \"alpha\": alpha}\n\n # all values are out-of-sample\n if fh.is_all_out_of_sample(self.cutoff):\n return self._predict_fixed_cutoff(fh_oos, **kwargs)\n\n # all values are in-sample\n elif fh.is_all_in_sample(self.cutoff):\n return self._predict_in_sample(fh_ins, **kwargs)\n\n # both in-sample and out-of-sample values\n else:\n if return_pred_int:\n y_ins_pred, y_ins_pred_int = self._predict_in_sample(fh_ins, **kwargs)\n y_oos_pred, y_oos_pred_int = self._predict_fixed_cutoff(\n fh_oos, **kwargs\n )\n return y_ins_pred.append(y_oos_pred), y_ins_pred_int.append(\n y_oos_pred_int\n )\n else:\n y_ins = self._predict_in_sample(fh_ins, **kwargs)\n y_oos = self._predict_fixed_cutoff(fh_oos, **kwargs)\n return y_ins.append(y_oos)\n\n def _predict_in_sample(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n ):\n if isinstance(alpha, (list, tuple)):\n raise NotImplementedError(\"multiple `alpha` values are not yet supported\")\n\n if hasattr(self, \"order\"):\n diff_order = self.order[1]\n else:\n diff_order = self._forecaster.model_.order[1]\n\n # Initialize return objects\n fh_abs = fh.to_absolute(self.cutoff).to_numpy()\n fh_idx = fh.to_indexer(self.cutoff, from_cutoff=False)\n y_pred = pd.Series(index=fh_abs)\n pred_int = pd.DataFrame(index=fh_abs, columns=[\"lower\", \"upper\"])\n\n # for in-sample predictions, pmdarima requires zero-based integer indicies\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n if start < 0:\n # Can't forecasts earlier to train starting point\n raise ValueError(\"Can't make predictions earlier to train starting point\")\n elif start < diff_order:\n # Can't forecasts earlier to arima's differencing order\n # But we return NaN for these supposedly forecastable points\n start = diff_order\n if end < start:\n # since we might have forced `start` to surpass `end`\n end = diff_order\n # get rid of unforcastable points\n fh_abs = fh_abs[fh_idx >= diff_order]\n # reindex accordingly\n fh_idx = fh_idx[fh_idx >= diff_order] - diff_order\n\n result = self._forecaster.predict_in_sample(\n start=start,\n end=end,\n X=X,\n return_conf_int=return_pred_int,\n alpha=alpha,\n )\n\n if return_pred_int:\n # unpack results\n y_pred.loc[fh_abs], pred_int.loc[fh_abs] = (\n result[0][fh_idx],\n result[1][fh_idx, :],\n )\n return y_pred, pred_int\n else:\n y_pred.loc[fh_abs] = result[fh_idx]\n return y_pred\n\n def _predict_fixed_cutoff(\n self, fh, X=None, return_pred_int=False, alpha=DEFAULT_ALPHA\n ):\n # make prediction\n n_periods = int(fh.to_relative(self.cutoff)[-1])\n result = self._forecaster.predict(\n n_periods=n_periods,\n X=X,\n return_conf_int=return_pred_int,\n alpha=alpha,\n )\n\n fh_abs = fh.to_absolute(self.cutoff)\n fh_idx = fh.to_indexer(self.cutoff)\n if return_pred_int:\n y_pred, pred_int = result\n y_pred = pd.Series(y_pred[fh_idx], index=fh_abs)\n pred_int = pd.DataFrame(\n pred_int[fh_idx, :], index=fh_abs, columns=[\"lower\", \"upper\"]\n )\n return y_pred, pred_int\n else:\n return pd.Series(result[fh_idx], index=fh_abs)\n\n def get_fitted_params(self):\n \"\"\"Get fitted parameters.\n\n Returns\n -------\n fitted_params : dict\n \"\"\"\n self.check_is_fitted()\n names = self._get_fitted_param_names()\n params = self._get_fitted_params()\n fitted_params = {name: param for name, param in zip(names, params)}\n\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n res = self._forecaster.model_.arima_res_\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n res = self._forecaster.arima_res_\n else:\n res = None\n\n for name in [\"aic\", \"aicc\", \"bic\", \"hqic\"]:\n fitted_params[name] = getattr(res, name, None)\n\n return fitted_params\n\n def _get_fitted_params(self):\n # Return parameter values under `arima_res_`\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n return self._forecaster.model_.arima_res_._results.params\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n return self._forecaster.arima_res_._results.params\n else:\n raise NotImplementedError()\n\n def _get_fitted_param_names(self):\n # Return parameter names under `arima_res_`\n if hasattr(self._forecaster, \"model_\"): # AutoARIMA\n return self._forecaster.model_.arima_res_._results.param_names\n elif hasattr(self._forecaster, \"arima_res_\"): # ARIMA\n return self._forecaster.arima_res_._results.param_names\n else:\n raise NotImplementedError()\n\n def summary(self):\n \"\"\"Summary of the fitted model.\"\"\"\n return self._forecaster.summary()\n", "path": "sktime/forecasting/base/adapters/_pmdarima.py"}]}
3,727
998
gh_patches_debug_22494
rasdani/github-patches
git_diff
tobymao__sqlglot-3129
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Athena Iceberg Tables parsing issue Hi, I want to parse a SQL Statement that creates an Iceberg table on Athena: ```sql create table if not exists tmp.mytable ( name string ) location 's3://bucket/tmp/mytable/' tblproperties ( 'table_type'='iceberg', 'format'='parquet' ); ``` running ```python stmts = sqlglot.parse(sql, read=sqlglot.Dialects.ATHENA) stmts[0].sql() ``` returns: ```sql CREATE TABLE IF NOT EXISTS tmp.mytable (name TEXT) LOCATION 's3://bucket/tmp/mytable/' WITH ( table_type='iceberg', FORMAT='parquet' ) ``` Unfortunately, the syntax in Athena is different for Iceberg Tables and Hive-style tables. The parsed statement should look like this: ```sql CREATE TABLE IF NOT EXISTS tmp.mytable (name STRING) LOCATION 's3://bucket/tmp/mytable/' TBLPROPERTIES ( 'table_type'='iceberg', 'FORMAT'='parquet' ) ``` Instead of WITH -> TBLPROPERTIES The keys in the this block are wrapped in upper quotes and iceberg has slightly different data types. In this case STRING instead of TEXT https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-supported-data-types.html https://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sqlglot/dialects/athena.py` Content: ``` 1 from __future__ import annotations 2 3 from sqlglot.dialects.trino import Trino 4 from sqlglot.tokens import TokenType 5 6 7 class Athena(Trino): 8 class Parser(Trino.Parser): 9 STATEMENT_PARSERS = { 10 **Trino.Parser.STATEMENT_PARSERS, 11 TokenType.USING: lambda self: self._parse_as_command(self._prev), 12 } 13 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py --- a/sqlglot/dialects/athena.py +++ b/sqlglot/dialects/athena.py @@ -1,5 +1,6 @@ from __future__ import annotations +from sqlglot import exp from sqlglot.dialects.trino import Trino from sqlglot.tokens import TokenType @@ -10,3 +11,27 @@ **Trino.Parser.STATEMENT_PARSERS, TokenType.USING: lambda self: self._parse_as_command(self._prev), } + + class Generator(Trino.Generator): + PROPERTIES_LOCATION = { + **Trino.Generator.PROPERTIES_LOCATION, + exp.LocationProperty: exp.Properties.Location.POST_SCHEMA, + } + + TYPE_MAPPING = { + **Trino.Generator.TYPE_MAPPING, + exp.DataType.Type.TEXT: "STRING", + } + + TRANSFORMS = { + **Trino.Generator.TRANSFORMS, + exp.FileFormatProperty: lambda self, e: f"'FORMAT'={self.sql(e, 'this')}", + } + + def property_sql(self, expression: exp.Property) -> str: + return ( + f"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}" + ) + + def with_properties(self, properties: exp.Properties) -> str: + return self.properties(properties, prefix=self.seg("TBLPROPERTIES"))
{"golden_diff": "diff --git a/sqlglot/dialects/athena.py b/sqlglot/dialects/athena.py\n--- a/sqlglot/dialects/athena.py\n+++ b/sqlglot/dialects/athena.py\n@@ -1,5 +1,6 @@\n from __future__ import annotations\n \n+from sqlglot import exp\n from sqlglot.dialects.trino import Trino\n from sqlglot.tokens import TokenType\n \n@@ -10,3 +11,27 @@\n **Trino.Parser.STATEMENT_PARSERS,\n TokenType.USING: lambda self: self._parse_as_command(self._prev),\n }\n+\n+ class Generator(Trino.Generator):\n+ PROPERTIES_LOCATION = {\n+ **Trino.Generator.PROPERTIES_LOCATION,\n+ exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,\n+ }\n+\n+ TYPE_MAPPING = {\n+ **Trino.Generator.TYPE_MAPPING,\n+ exp.DataType.Type.TEXT: \"STRING\",\n+ }\n+\n+ TRANSFORMS = {\n+ **Trino.Generator.TRANSFORMS,\n+ exp.FileFormatProperty: lambda self, e: f\"'FORMAT'={self.sql(e, 'this')}\",\n+ }\n+\n+ def property_sql(self, expression: exp.Property) -> str:\n+ return (\n+ f\"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}\"\n+ )\n+\n+ def with_properties(self, properties: exp.Properties) -> str:\n+ return self.properties(properties, prefix=self.seg(\"TBLPROPERTIES\"))\n", "issue": "Athena Iceberg Tables parsing issue\nHi,\r\nI want to parse a SQL Statement that creates an Iceberg table on Athena:\r\n\r\n```sql\r\ncreate table if not exists tmp.mytable (\r\n name string\r\n)\r\nlocation 's3://bucket/tmp/mytable/'\r\ntblproperties (\r\n 'table_type'='iceberg',\r\n 'format'='parquet'\r\n);\r\n```\r\nrunning \r\n```python\r\nstmts = sqlglot.parse(sql, read=sqlglot.Dialects.ATHENA)\r\nstmts[0].sql()\r\n```\r\nreturns:\r\n```sql\r\nCREATE TABLE IF NOT EXISTS tmp.mytable \r\n (name TEXT) \r\nLOCATION 's3://bucket/tmp/mytable/' \r\nWITH (\r\n table_type='iceberg', \r\n FORMAT='parquet'\r\n)\r\n```\r\n\r\nUnfortunately, the syntax in Athena is different for Iceberg Tables and Hive-style tables.\r\n\r\nThe parsed statement should look like this:\r\n\r\n```sql\r\nCREATE TABLE IF NOT EXISTS tmp.mytable \r\n (name STRING) \r\nLOCATION 's3://bucket/tmp/mytable/' \r\nTBLPROPERTIES (\r\n 'table_type'='iceberg', \r\n 'FORMAT'='parquet'\r\n)\r\n```\r\n\r\nInstead of WITH -> TBLPROPERTIES\r\nThe keys in the this block are wrapped in upper quotes and iceberg has slightly different data types. In this case STRING instead of TEXT\r\n\r\nhttps://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-supported-data-types.html\r\nhttps://docs.aws.amazon.com/athena/latest/ug/querying-iceberg-creating-tables.html\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom sqlglot.dialects.trino import Trino\nfrom sqlglot.tokens import TokenType\n\n\nclass Athena(Trino):\n class Parser(Trino.Parser):\n STATEMENT_PARSERS = {\n **Trino.Parser.STATEMENT_PARSERS,\n TokenType.USING: lambda self: self._parse_as_command(self._prev),\n }\n", "path": "sqlglot/dialects/athena.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom sqlglot import exp\nfrom sqlglot.dialects.trino import Trino\nfrom sqlglot.tokens import TokenType\n\n\nclass Athena(Trino):\n class Parser(Trino.Parser):\n STATEMENT_PARSERS = {\n **Trino.Parser.STATEMENT_PARSERS,\n TokenType.USING: lambda self: self._parse_as_command(self._prev),\n }\n\n class Generator(Trino.Generator):\n PROPERTIES_LOCATION = {\n **Trino.Generator.PROPERTIES_LOCATION,\n exp.LocationProperty: exp.Properties.Location.POST_SCHEMA,\n }\n\n TYPE_MAPPING = {\n **Trino.Generator.TYPE_MAPPING,\n exp.DataType.Type.TEXT: \"STRING\",\n }\n\n TRANSFORMS = {\n **Trino.Generator.TRANSFORMS,\n exp.FileFormatProperty: lambda self, e: f\"'FORMAT'={self.sql(e, 'this')}\",\n }\n\n def property_sql(self, expression: exp.Property) -> str:\n return (\n f\"{self.property_name(expression, string_key=True)}={self.sql(expression, 'value')}\"\n )\n\n def with_properties(self, properties: exp.Properties) -> str:\n return self.properties(properties, prefix=self.seg(\"TBLPROPERTIES\"))\n", "path": "sqlglot/dialects/athena.py"}]}
690
341
gh_patches_debug_25411
rasdani/github-patches
git_diff
scikit-hep__pyhf-338
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add README to PyPI # Description At the moment we have no README for the [PyPI page](https://pypi.org/project/pyhf/0.0.15/). The addition of one would be a nice touch (even though I assume that most users will discover the project through GitHub). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 3 from setuptools import setup, find_packages 4 5 extras_require = { 6 'tensorflow': [ 7 'tensorflow>=1.10.0', 8 'tensorflow-probability==0.3.0', 9 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass 10 'setuptools<=39.1.0', 11 ], 12 'torch': ['torch>=0.4.0'], 13 'mxnet': [ 14 'mxnet>=1.0.0', 15 'requests<2.19.0,>=2.18.4', 16 'numpy<1.15.0,>=1.8.2', 17 'requests<2.19.0,>=2.18.4', 18 ], 19 # 'dask': [ 20 # 'dask[array]' 21 # ], 22 'xmlimport': ['uproot'], 23 'minuit': ['iminuit'], 24 'develop': [ 25 'pyflakes', 26 'pytest>=3.5.1', 27 'pytest-cov>=2.5.1', 28 'pytest-benchmark[histogram]', 29 'pytest-console-scripts', 30 'python-coveralls', 31 'coverage>=4.0', # coveralls 32 'matplotlib', 33 'jupyter', 34 'nbdime', 35 'uproot>=3.0.0', 36 'papermill', 37 'graphviz', 38 'bumpversion', 39 'sphinx', 40 'sphinxcontrib-bibtex', 41 'sphinxcontrib-napoleon', 42 'sphinx_rtd_theme', 43 'nbsphinx', 44 'm2r', 45 'jsonpatch', 46 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now 47 'pre-commit', 48 'black;python_version>="3.6"', # Black is Python3 only 49 ], 50 } 51 extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) 52 53 setup( 54 name='pyhf', 55 version='0.0.15', 56 description='(partial) pure python histfactory implementation', 57 url='https://github.com/diana-hep/pyhf', 58 author='Lukas Heinrich', 59 author_email='[email protected]', 60 license='Apache', 61 keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask', 62 classifiers=[ 63 "Programming Language :: Python :: 2", 64 "Programming Language :: Python :: 2.7", 65 "Programming Language :: Python :: 3", 66 "Programming Language :: Python :: 3.6", 67 ], 68 packages=find_packages(), 69 include_package_data=True, 70 python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*", 71 install_requires=[ 72 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet 73 'click>=6.0', # for console scripts, 74 'tqdm', # for readxml 75 'six', # for modifiers 76 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6 77 'jsonpatch', 78 ], 79 extras_require=extras_require, 80 entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']}, 81 dependency_links=[], 82 ) 83 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -1,6 +1,11 @@ #!/usr/bin/env python from setuptools import setup, find_packages +from os import path + +this_directory = path.abspath(path.dirname(__file__)) +with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md: + long_description = readme_md.read() extras_require = { 'tensorflow': [ @@ -46,6 +51,7 @@ 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now 'pre-commit', 'black;python_version>="3.6"', # Black is Python3 only + 'twine', ], } extras_require['complete'] = sorted(set(sum(extras_require.values(), []))) @@ -54,6 +60,8 @@ name='pyhf', version='0.0.15', description='(partial) pure python histfactory implementation', + long_description=long_description, + long_description_content_type='text/markdown', url='https://github.com/diana-hep/pyhf', author='Lukas Heinrich', author_email='[email protected]',
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -1,6 +1,11 @@\n #!/usr/bin/env python\n \n from setuptools import setup, find_packages\n+from os import path\n+\n+this_directory = path.abspath(path.dirname(__file__))\n+with open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n+ long_description = readme_md.read()\n \n extras_require = {\n 'tensorflow': [\n@@ -46,6 +51,7 @@\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n+ 'twine',\n ],\n }\n extras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n@@ -54,6 +60,8 @@\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n+ long_description=long_description,\n+ long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n", "issue": "Add README to PyPI\n# Description\r\n\r\nAt the moment we have no README for the [PyPI page](https://pypi.org/project/pyhf/0.0.15/). The addition of one would be a nice touch (even though I assume that most users will discover the project through GitHub).\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom setuptools import setup, find_packages\nfrom os import path\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith open(path.join(this_directory, 'README.md'), encoding='utf-8') as readme_md:\n long_description = readme_md.read()\n\nextras_require = {\n 'tensorflow': [\n 'tensorflow>=1.10.0',\n 'tensorflow-probability==0.3.0',\n 'numpy<=1.14.5,>=1.14.0', # Lower of 1.14.0 instead of 1.13.3 to ensure doctest pass\n 'setuptools<=39.1.0',\n ],\n 'torch': ['torch>=0.4.0'],\n 'mxnet': [\n 'mxnet>=1.0.0',\n 'requests<2.19.0,>=2.18.4',\n 'numpy<1.15.0,>=1.8.2',\n 'requests<2.19.0,>=2.18.4',\n ],\n # 'dask': [\n # 'dask[array]'\n # ],\n 'xmlimport': ['uproot'],\n 'minuit': ['iminuit'],\n 'develop': [\n 'pyflakes',\n 'pytest>=3.5.1',\n 'pytest-cov>=2.5.1',\n 'pytest-benchmark[histogram]',\n 'pytest-console-scripts',\n 'python-coveralls',\n 'coverage>=4.0', # coveralls\n 'matplotlib',\n 'jupyter',\n 'nbdime',\n 'uproot>=3.0.0',\n 'papermill',\n 'graphviz',\n 'bumpversion',\n 'sphinx',\n 'sphinxcontrib-bibtex',\n 'sphinxcontrib-napoleon',\n 'sphinx_rtd_theme',\n 'nbsphinx',\n 'm2r',\n 'jsonpatch',\n 'ipython<7', # jupyter_console and ipython clash in dependency requirement -- downgrade ipython for now\n 'pre-commit',\n 'black;python_version>=\"3.6\"', # Black is Python3 only\n 'twine',\n ],\n}\nextras_require['complete'] = sorted(set(sum(extras_require.values(), [])))\n\nsetup(\n name='pyhf',\n version='0.0.15',\n description='(partial) pure python histfactory implementation',\n long_description=long_description,\n long_description_content_type='text/markdown',\n url='https://github.com/diana-hep/pyhf',\n author='Lukas Heinrich',\n author_email='[email protected]',\n license='Apache',\n keywords='physics fitting numpy scipy tensorflow pytorch mxnet dask',\n classifiers=[\n \"Programming Language :: Python :: 2\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n ],\n packages=find_packages(),\n include_package_data=True,\n python_requires=\">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*\",\n install_requires=[\n 'scipy', # requires numpy, which is required by pyhf, tensorflow, and mxnet\n 'click>=6.0', # for console scripts,\n 'tqdm', # for readxml\n 'six', # for modifiers\n 'jsonschema>=v3.0.0a2', # for utils, alpha-release for draft 6\n 'jsonpatch',\n ],\n extras_require=extras_require,\n entry_points={'console_scripts': ['pyhf=pyhf.commandline:pyhf']},\n dependency_links=[],\n)\n", "path": "setup.py"}]}
1,252
289
gh_patches_debug_27906
rasdani/github-patches
git_diff
Mailu__Mailu-2735
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Brainstorming for 1.10 / 2.0 roadmap # Description Now 1.9 has been released for some time, it is time to discuss what we want to work on for the next release. - Release date? - Scope 1.10 (normal release) or 2.0 (release with breaking changes)? - What do we want to work on? ## Ideas Ideas from the last meeting: - introduce snappymail. - rootless containers - probably requires init container - replacement of Socrates. ghostwheel42 will work on this (if he finds time). - A security/keys page for Mailu admin interface. - On this page you can configure/change/generate keys for all your Mail domains. - DNS overview - Have some UI that tells you if DNS is correctly configured. - Have a look at BuildX - means we can build for multiple platforms including ARM - means we remove the build arm script - means CI will be changed massively. - For ARM, we could maybe build once per week to make sure the build time for normal builts is not too long. - autoconfiguration for email clients? - automx: https://rseichter.github.io/automx2/? - integrate whatever we choose with mailu (part of mailu)? - also good time to drop starttls for imap/pop/smtp(sending mail). - So only support direct SSL/TLS - Could be done via environment_variable. When not configured, then starttls supported. If configured (default for new deployments) then it is disabled. - Another idea is to disable starttls, and report a custom error (when you use explicit starttls) that you must switch to implicit SSL/TLS port 465. - Full 2 factor authentication with xoath2 - Too large in scope for this release. But preparations could be made. - Means we need autoconfiguration. Otherwise the email client will not use xoath2. - means using single sign on via identity provider (which mailu could be as well). This opens the door to use other identity providers in the future. Feel free to suggest your own ideas ## Misc For small non-critical issues/features I suggest we do not put it on the roadmap, but simply offer anyone the chance to pick these up and submit a PR if they want it included. What are your thoughts? Please share your feedback. Regardless the above wall of text, feel free to mention any feature/issue you would like included in the next release. ## My 2 cents For release date we could maybe aim around August/September? We are all very busy with our personal life now. This should give enough time to implement new features. Of course a decided date is not set in stone. It could still be moved forward/backwards. https://github.com/Mailu/Mailu/blob/master/design/mailu-directory-structure.md This is what I was thinking about for the scope. Changing the directory structure must be part of a major release with breaking changes. Do we want to make this change for this release or postpone this? Personally I'd like to check - switching to SnappyMail - rootless containers - BuildX. At least investigate what changes are required. Feel free to suggest your own ideas. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 #!/usr/bin/env python3 2 # -*- coding: utf-8 -*- 3 # 4 5 import os 6 7 extensions = ['sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinx_rtd_theme'] 8 templates_path = ['_templates'] 9 source_suffix = '.rst' 10 master_doc = 'index' 11 project = 'Mailu' 12 copyright = '2018, Mailu authors' 13 author = 'Mailu authors' 14 version = release = os.environ.get('VERSION', 'master') 15 language = 'en' 16 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Dockerfile', 'docker-compose.yml'] 17 pygments_style = 'sphinx' 18 todo_include_todos = False 19 html_theme = 'sphinx_rtd_theme' 20 html_title = 'Mailu, Docker based mail server' 21 html_static_path = [] 22 htmlhelp_basename = 'Mailudoc' 23 24 # Custom sidebar templates, must be a dictionary that maps document names 25 # to template names. 26 html_sidebars = { 27 '**': [ 28 'relations.html', 29 'searchbox.html', 30 ] 31 } 32 33 # Theme options 34 html_context = { 35 'display_github': True, 36 'github_user': 'mailu', 37 'github_repo': 'mailu', 38 'github_version': version, 39 'stable_version': '1.9', 40 'versions': [ 41 ('1.8', '/1.8/'), 42 ('1.9', '/1.9/'), 43 ('master', '/master/') 44 ], 45 'conf_py_path': '/docs/' 46 } 47 ``` Path: `core/admin/mailu/configuration.py` Content: ``` 1 import os 2 3 from datetime import timedelta 4 import ipaddress 5 6 DEFAULT_CONFIG = { 7 # Specific to the admin UI 8 'DOCKER_SOCKET': 'unix:///var/run/docker.sock', 9 'BABEL_DEFAULT_LOCALE': 'en', 10 'BABEL_DEFAULT_TIMEZONE': 'UTC', 11 'BOOTSTRAP_SERVE_LOCAL': True, 12 'RATELIMIT_STORAGE_URL': '', 13 'DEBUG': False, 14 'DEBUG_PROFILER': False, 15 'DEBUG_TB_INTERCEPT_REDIRECTS': False, 16 'DEBUG_ASSETS': '', 17 'DOMAIN_REGISTRATION': False, 18 'TEMPLATES_AUTO_RELOAD': True, 19 'MEMORY_SESSIONS': False, 20 'FETCHMAIL_ENABLED': True, 21 'MAILU_VERSION': 'unknown', 22 # Database settings 23 'DB_FLAVOR': None, 24 'DB_USER': 'mailu', 25 'DB_PW': None, 26 'DB_HOST': 'database', 27 'DB_NAME': 'mailu', 28 'SQLITE_DATABASE_FILE': 'data/main.db', 29 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db', 30 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db', 31 'SQLALCHEMY_TRACK_MODIFICATIONS': False, 32 # Statistics management 33 'INSTANCE_ID_PATH': '/data/instance', 34 'STATS_ENDPOINT': '19.{}.stats.mailu.io', 35 # Common configuration variables 36 'SECRET_KEY': 'changeMe', 37 'DOMAIN': 'mailu.io', 38 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io', 39 'POSTMASTER': 'postmaster', 40 'WILDCARD_SENDERS': '', 41 'TLS_FLAVOR': 'cert', 42 'INBOUND_TLS_ENFORCE': False, 43 'DEFER_ON_TLS_ERROR': True, 44 'AUTH_RATELIMIT_IP': '5/hour', 45 'AUTH_RATELIMIT_IP_V4_MASK': 24, 46 'AUTH_RATELIMIT_IP_V6_MASK': 48, 47 'AUTH_RATELIMIT_USER': '50/day', 48 'AUTH_RATELIMIT_EXEMPTION': '', 49 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400, 50 'DISABLE_STATISTICS': False, 51 # Mail settings 52 'DMARC_RUA': None, 53 'DMARC_RUF': None, 54 'WELCOME': False, 55 'WELCOME_SUBJECT': 'Dummy welcome topic', 56 'WELCOME_BODY': 'Dummy welcome body', 57 'DKIM_SELECTOR': 'dkim', 58 'DKIM_PATH': '/dkim/{domain}.{selector}.key', 59 'DEFAULT_QUOTA': 1000000000, 60 'MESSAGE_RATELIMIT': '200/day', 61 'MESSAGE_RATELIMIT_EXEMPTION': '', 62 'RECIPIENT_DELIMITER': '', 63 # Web settings 64 'SITENAME': 'Mailu', 65 'WEBSITE': 'https://mailu.io', 66 'ADMIN': 'none', 67 'WEB_ADMIN': '/admin', 68 'WEB_WEBMAIL': '/webmail', 69 'WEBMAIL': 'none', 70 'RECAPTCHA_PUBLIC_KEY': '', 71 'RECAPTCHA_PRIVATE_KEY': '', 72 'LOGO_URL': None, 73 'LOGO_BACKGROUND': None, 74 # Advanced settings 75 'API': False, 76 'WEB_API': '/api', 77 'API_TOKEN': None, 78 'LOG_LEVEL': 'WARNING', 79 'SESSION_KEY_BITS': 128, 80 'SESSION_TIMEOUT': 3600, 81 'PERMANENT_SESSION_LIFETIME': 30*24*3600, 82 'SESSION_COOKIE_SECURE': None, 83 'CREDENTIAL_ROUNDS': 12, 84 'TLS_PERMISSIVE': True, 85 'TZ': 'Etc/UTC', 86 'DEFAULT_SPAM_THRESHOLD': 80, 87 'PROXY_AUTH_WHITELIST': '', 88 'PROXY_AUTH_HEADER': 'X-Auth-Email', 89 'PROXY_AUTH_CREATE': False, 90 'PROXY_AUTH_LOGOUT_URL': None, 91 'SUBNET': '192.168.203.0/24', 92 'SUBNET6': None, 93 } 94 95 class ConfigManager: 96 """ Naive configuration manager that uses environment only 97 """ 98 99 DB_TEMPLATES = { 100 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}', 101 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}', 102 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}', 103 } 104 105 def __init__(self): 106 self.config = dict() 107 108 def __get_env(self, key, value): 109 key_file = key + "_FILE" 110 if key_file in os.environ: 111 with open(os.environ.get(key_file)) as file: 112 value_from_file = file.read() 113 return value_from_file.strip() 114 else: 115 return os.environ.get(key, value) 116 117 def __coerce_value(self, value): 118 if isinstance(value, str) and value.lower() in ('true','yes'): 119 return True 120 elif isinstance(value, str) and value.lower() in ('false', 'no'): 121 return False 122 return value 123 124 def init_app(self, app): 125 # get current app config 126 self.config.update(app.config) 127 # get environment variables 128 for key in os.environ: 129 if key.endswith('_ADDRESS'): 130 self.config[key] = os.environ[key] 131 132 self.config.update({ 133 key: self.__coerce_value(self.__get_env(key, value)) 134 for key, value in DEFAULT_CONFIG.items() 135 }) 136 137 # automatically set the sqlalchemy string 138 if self.config['DB_FLAVOR']: 139 template = self.DB_TEMPLATES[self.config['DB_FLAVOR']] 140 self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config) 141 142 if not self.config.get('RATELIMIT_STORAGE_URL'): 143 self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/2' 144 145 self.config['SESSION_STORAGE_URL'] = f'redis://{self.config["REDIS_ADDRESS"]}/3' 146 self.config['SESSION_COOKIE_SAMESITE'] = 'Strict' 147 self.config['SESSION_COOKIE_HTTPONLY'] = True 148 if self.config['SESSION_COOKIE_SECURE'] is None: 149 self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls' 150 self.config['SESSION_PERMANENT'] = True 151 self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT']) 152 self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS']) 153 self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME']) 154 self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK']) 155 self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK']) 156 self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr) 157 self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s]) 158 hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')] 159 self.config['HOSTNAMES'] = ','.join(hostnames) 160 self.config['HOSTNAME'] = hostnames[0] 161 self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD']) 162 self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr) 163 try: 164 self.config['MAILU_VERSION'] = open('/version', 'r').read() 165 except FileNotFoundError: 166 pass 167 168 # update the app config 169 app.config.update(self.config) 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py --- a/core/admin/mailu/configuration.py +++ b/core/admin/mailu/configuration.py @@ -31,7 +31,7 @@ 'SQLALCHEMY_TRACK_MODIFICATIONS': False, # Statistics management 'INSTANCE_ID_PATH': '/data/instance', - 'STATS_ENDPOINT': '19.{}.stats.mailu.io', + 'STATS_ENDPOINT': '20.{}.stats.mailu.io', # Common configuration variables 'SECRET_KEY': 'changeMe', 'DOMAIN': 'mailu.io', diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -9,7 +9,7 @@ source_suffix = '.rst' master_doc = 'index' project = 'Mailu' -copyright = '2018, Mailu authors' +copyright = '2016, Mailu authors' author = 'Mailu authors' version = release = os.environ.get('VERSION', 'master') language = 'en' @@ -25,7 +25,7 @@ # to template names. html_sidebars = { '**': [ - 'relations.html', + 'relations.html', 'searchbox.html', ] } @@ -36,10 +36,10 @@ 'github_user': 'mailu', 'github_repo': 'mailu', 'github_version': version, - 'stable_version': '1.9', + 'stable_version': '2.0', 'versions': [ - ('1.8', '/1.8/'), ('1.9', '/1.9/'), + ('2.0', '/2.0/'), ('master', '/master/') ], 'conf_py_path': '/docs/'
{"golden_diff": "diff --git a/core/admin/mailu/configuration.py b/core/admin/mailu/configuration.py\n--- a/core/admin/mailu/configuration.py\n+++ b/core/admin/mailu/configuration.py\n@@ -31,7 +31,7 @@\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n- 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n+ 'STATS_ENDPOINT': '20.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\ndiff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -9,7 +9,7 @@\n source_suffix = '.rst'\n master_doc = 'index'\n project = 'Mailu'\n-copyright = '2018, Mailu authors'\n+copyright = '2016, Mailu authors'\n author = 'Mailu authors'\n version = release = os.environ.get('VERSION', 'master')\n language = 'en'\n@@ -25,7 +25,7 @@\n # to template names.\n html_sidebars = {\n '**': [\n- 'relations.html', \n+ 'relations.html',\n 'searchbox.html',\n ]\n }\n@@ -36,10 +36,10 @@\n 'github_user': 'mailu',\n 'github_repo': 'mailu',\n 'github_version': version,\n- 'stable_version': '1.9',\n+ 'stable_version': '2.0',\n 'versions': [\n- ('1.8', '/1.8/'),\n ('1.9', '/1.9/'),\n+ ('2.0', '/2.0/'),\n ('master', '/master/')\n ],\n 'conf_py_path': '/docs/'\n", "issue": "Brainstorming for 1.10 / 2.0 roadmap\n# Description\r\nNow 1.9 has been released for some time, it is time to discuss what we want to work on for the next release.\r\n\r\n- Release date?\r\n- Scope 1.10 (normal release) or 2.0 (release with breaking changes)? \r\n- What do we want to work on?\r\n\r\n## Ideas\r\n\r\nIdeas from the last meeting:\r\n- introduce snappymail.\r\n- rootless containers\r\n - probably requires init container\r\n- replacement of Socrates. ghostwheel42 will work on this (if he finds time).\r\n- A security/keys page for Mailu admin interface. \r\n - On this page you can configure/change/generate keys for all your Mail domains.\r\n - DNS overview \r\n - Have some UI that tells you if DNS is correctly configured.\r\n- Have a look at BuildX\r\n - means we can build for multiple platforms including ARM\r\n - means we remove the build arm script\r\n - means CI will be changed massively.\r\n - For ARM, we could maybe build once per week to make sure the build time for normal builts is not too long.\r\n- autoconfiguration for email clients?\r\n - automx: https://rseichter.github.io/automx2/?\r\n - integrate whatever we choose with mailu (part of mailu)?\r\n - also good time to drop starttls for imap/pop/smtp(sending mail).\r\n - So only support direct SSL/TLS\r\n - Could be done via environment_variable. When not configured, then starttls supported. If configured (default for new deployments) then it is disabled.\r\n - Another idea is to disable starttls, and report a custom error (when you use explicit starttls) that you must switch to implicit SSL/TLS port 465.\r\n- Full 2 factor authentication with xoath2 \r\n - Too large in scope for this release. But preparations could be made.\r\n - Means we need autoconfiguration. Otherwise the email client will not use xoath2.\r\n - means using single sign on via identity provider (which mailu could be as well). This opens the door to use other identity providers in the future.\r\n\r\nFeel free to suggest your own ideas\r\n\r\n## Misc\r\nFor small non-critical issues/features I suggest we do not put it on the roadmap, but simply offer anyone the chance to pick these up and submit a PR if they want it included.\r\n\r\nWhat are your thoughts? Please share your feedback.\r\nRegardless the above wall of text, feel free to mention any feature/issue you would like included in the next release.\r\n\r\n## My 2 cents\r\nFor release date we could maybe aim around August/September?\r\nWe are all very busy with our personal life now. This should give enough time to implement new features. Of course a decided date is not set in stone. It could still be moved forward/backwards. \r\n\r\nhttps://github.com/Mailu/Mailu/blob/master/design/mailu-directory-structure.md\r\nThis is what I was thinking about for the scope. Changing the directory structure must be part of a major release with breaking changes. Do we want to make this change for this release or postpone this?\r\n\r\nPersonally I'd like to check \r\n - switching to SnappyMail \r\n - rootless containers\r\n - BuildX. At least investigate what changes are required.\r\n\r\nFeel free to suggest your own ideas. \r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n\nimport os\n\nextensions = ['sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinx_rtd_theme']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nproject = 'Mailu'\ncopyright = '2018, Mailu authors'\nauthor = 'Mailu authors'\nversion = release = os.environ.get('VERSION', 'master')\nlanguage = 'en'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Dockerfile', 'docker-compose.yml']\npygments_style = 'sphinx'\ntodo_include_todos = False\nhtml_theme = 'sphinx_rtd_theme'\nhtml_title = 'Mailu, Docker based mail server'\nhtml_static_path = []\nhtmlhelp_basename = 'Mailudoc'\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\nhtml_sidebars = {\n '**': [\n 'relations.html', \n 'searchbox.html',\n ]\n}\n\n# Theme options\nhtml_context = {\n 'display_github': True,\n 'github_user': 'mailu',\n 'github_repo': 'mailu',\n 'github_version': version,\n 'stable_version': '1.9',\n 'versions': [\n ('1.8', '/1.8/'),\n ('1.9', '/1.9/'),\n ('master', '/master/')\n ],\n 'conf_py_path': '/docs/'\n}\n", "path": "docs/conf.py"}, {"content": "import os\n\nfrom datetime import timedelta\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'DEBUG': False,\n 'DEBUG_PROFILER': False,\n 'DEBUG_TB_INTERCEPT_REDIRECTS': False,\n 'DEBUG_ASSETS': '',\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n 'FETCHMAIL_ENABLED': True,\n 'MAILU_VERSION': 'unknown',\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE': 'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '19.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '5/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 48,\n 'AUTH_RATELIMIT_USER': '50/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN': 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': None,\n 'CREDENTIAL_ROUNDS': 12,\n 'TLS_PERMISSIVE': True,\n 'TZ': 'Etc/UTC',\n 'DEFAULT_SPAM_THRESHOLD': 80,\n 'PROXY_AUTH_WHITELIST': '',\n 'PROXY_AUTH_HEADER': 'X-Auth-Email',\n 'PROXY_AUTH_CREATE': False,\n 'PROXY_AUTH_LOGOUT_URL': None,\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n }\n\n def __init__(self):\n self.config = dict()\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n for key in os.environ:\n if key.endswith('_ADDRESS'):\n self.config[key] = os.environ[key]\n\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n if not self.config.get('RATELIMIT_STORAGE_URL'):\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n if self.config['SESSION_COOKIE_SECURE'] is None:\n self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])\n self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)\n try:\n self.config['MAILU_VERSION'] = open('/version', 'r').read()\n except FileNotFoundError:\n pass\n\n # update the app config\n app.config.update(self.config)\n", "path": "core/admin/mailu/configuration.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n\nimport os\n\nextensions = ['sphinx.ext.imgmath', 'sphinx.ext.viewcode', 'sphinx_rtd_theme']\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\nmaster_doc = 'index'\nproject = 'Mailu'\ncopyright = '2016, Mailu authors'\nauthor = 'Mailu authors'\nversion = release = os.environ.get('VERSION', 'master')\nlanguage = 'en'\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'Dockerfile', 'docker-compose.yml']\npygments_style = 'sphinx'\ntodo_include_todos = False\nhtml_theme = 'sphinx_rtd_theme'\nhtml_title = 'Mailu, Docker based mail server'\nhtml_static_path = []\nhtmlhelp_basename = 'Mailudoc'\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\nhtml_sidebars = {\n '**': [\n 'relations.html',\n 'searchbox.html',\n ]\n}\n\n# Theme options\nhtml_context = {\n 'display_github': True,\n 'github_user': 'mailu',\n 'github_repo': 'mailu',\n 'github_version': version,\n 'stable_version': '2.0',\n 'versions': [\n ('1.9', '/1.9/'),\n ('2.0', '/2.0/'),\n ('master', '/master/')\n ],\n 'conf_py_path': '/docs/'\n}\n", "path": "docs/conf.py"}, {"content": "import os\n\nfrom datetime import timedelta\nimport ipaddress\n\nDEFAULT_CONFIG = {\n # Specific to the admin UI\n 'DOCKER_SOCKET': 'unix:///var/run/docker.sock',\n 'BABEL_DEFAULT_LOCALE': 'en',\n 'BABEL_DEFAULT_TIMEZONE': 'UTC',\n 'BOOTSTRAP_SERVE_LOCAL': True,\n 'RATELIMIT_STORAGE_URL': '',\n 'DEBUG': False,\n 'DEBUG_PROFILER': False,\n 'DEBUG_TB_INTERCEPT_REDIRECTS': False,\n 'DEBUG_ASSETS': '',\n 'DOMAIN_REGISTRATION': False,\n 'TEMPLATES_AUTO_RELOAD': True,\n 'MEMORY_SESSIONS': False,\n 'FETCHMAIL_ENABLED': True,\n 'MAILU_VERSION': 'unknown',\n # Database settings\n 'DB_FLAVOR': None,\n 'DB_USER': 'mailu',\n 'DB_PW': None,\n 'DB_HOST': 'database',\n 'DB_NAME': 'mailu',\n 'SQLITE_DATABASE_FILE': 'data/main.db',\n 'SQLALCHEMY_DATABASE_URI': 'sqlite:////data/main.db',\n 'SQLALCHEMY_DATABASE_URI_ROUNDCUBE': 'sqlite:////data/roundcube.db',\n 'SQLALCHEMY_TRACK_MODIFICATIONS': False,\n # Statistics management\n 'INSTANCE_ID_PATH': '/data/instance',\n 'STATS_ENDPOINT': '20.{}.stats.mailu.io',\n # Common configuration variables\n 'SECRET_KEY': 'changeMe',\n 'DOMAIN': 'mailu.io',\n 'HOSTNAMES': 'mail.mailu.io,alternative.mailu.io,yetanother.mailu.io',\n 'POSTMASTER': 'postmaster',\n 'WILDCARD_SENDERS': '',\n 'TLS_FLAVOR': 'cert',\n 'INBOUND_TLS_ENFORCE': False,\n 'DEFER_ON_TLS_ERROR': True,\n 'AUTH_RATELIMIT_IP': '5/hour',\n 'AUTH_RATELIMIT_IP_V4_MASK': 24,\n 'AUTH_RATELIMIT_IP_V6_MASK': 48,\n 'AUTH_RATELIMIT_USER': '50/day',\n 'AUTH_RATELIMIT_EXEMPTION': '',\n 'AUTH_RATELIMIT_EXEMPTION_LENGTH': 86400,\n 'DISABLE_STATISTICS': False,\n # Mail settings\n 'DMARC_RUA': None,\n 'DMARC_RUF': None,\n 'WELCOME': False,\n 'WELCOME_SUBJECT': 'Dummy welcome topic',\n 'WELCOME_BODY': 'Dummy welcome body',\n 'DKIM_SELECTOR': 'dkim',\n 'DKIM_PATH': '/dkim/{domain}.{selector}.key',\n 'DEFAULT_QUOTA': 1000000000,\n 'MESSAGE_RATELIMIT': '200/day',\n 'MESSAGE_RATELIMIT_EXEMPTION': '',\n 'RECIPIENT_DELIMITER': '',\n # Web settings\n 'SITENAME': 'Mailu',\n 'WEBSITE': 'https://mailu.io',\n 'ADMIN': 'none',\n 'WEB_ADMIN': '/admin',\n 'WEB_WEBMAIL': '/webmail',\n 'WEBMAIL': 'none',\n 'RECAPTCHA_PUBLIC_KEY': '',\n 'RECAPTCHA_PRIVATE_KEY': '',\n 'LOGO_URL': None,\n 'LOGO_BACKGROUND': None,\n # Advanced settings\n 'API': False,\n 'WEB_API': '/api',\n 'API_TOKEN': None,\n 'LOG_LEVEL': 'WARNING',\n 'SESSION_KEY_BITS': 128,\n 'SESSION_TIMEOUT': 3600,\n 'PERMANENT_SESSION_LIFETIME': 30*24*3600,\n 'SESSION_COOKIE_SECURE': None,\n 'CREDENTIAL_ROUNDS': 12,\n 'TLS_PERMISSIVE': True,\n 'TZ': 'Etc/UTC',\n 'DEFAULT_SPAM_THRESHOLD': 80,\n 'PROXY_AUTH_WHITELIST': '',\n 'PROXY_AUTH_HEADER': 'X-Auth-Email',\n 'PROXY_AUTH_CREATE': False,\n 'PROXY_AUTH_LOGOUT_URL': None,\n 'SUBNET': '192.168.203.0/24',\n 'SUBNET6': None,\n}\n\nclass ConfigManager:\n \"\"\" Naive configuration manager that uses environment only\n \"\"\"\n\n DB_TEMPLATES = {\n 'sqlite': 'sqlite:////{SQLITE_DATABASE_FILE}',\n 'postgresql': 'postgresql://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n 'mysql': 'mysql+mysqlconnector://{DB_USER}:{DB_PW}@{DB_HOST}/{DB_NAME}',\n }\n\n def __init__(self):\n self.config = dict()\n\n def __get_env(self, key, value):\n key_file = key + \"_FILE\"\n if key_file in os.environ:\n with open(os.environ.get(key_file)) as file:\n value_from_file = file.read()\n return value_from_file.strip()\n else:\n return os.environ.get(key, value)\n\n def __coerce_value(self, value):\n if isinstance(value, str) and value.lower() in ('true','yes'):\n return True\n elif isinstance(value, str) and value.lower() in ('false', 'no'):\n return False\n return value\n\n def init_app(self, app):\n # get current app config\n self.config.update(app.config)\n # get environment variables\n for key in os.environ:\n if key.endswith('_ADDRESS'):\n self.config[key] = os.environ[key]\n\n self.config.update({\n key: self.__coerce_value(self.__get_env(key, value))\n for key, value in DEFAULT_CONFIG.items()\n })\n\n # automatically set the sqlalchemy string\n if self.config['DB_FLAVOR']:\n template = self.DB_TEMPLATES[self.config['DB_FLAVOR']]\n self.config['SQLALCHEMY_DATABASE_URI'] = template.format(**self.config)\n\n if not self.config.get('RATELIMIT_STORAGE_URL'):\n self.config['RATELIMIT_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/2'\n\n self.config['SESSION_STORAGE_URL'] = f'redis://{self.config[\"REDIS_ADDRESS\"]}/3'\n self.config['SESSION_COOKIE_SAMESITE'] = 'Strict'\n self.config['SESSION_COOKIE_HTTPONLY'] = True\n if self.config['SESSION_COOKIE_SECURE'] is None:\n self.config['SESSION_COOKIE_SECURE'] = self.config['TLS_FLAVOR'] != 'notls'\n self.config['SESSION_PERMANENT'] = True\n self.config['SESSION_TIMEOUT'] = int(self.config['SESSION_TIMEOUT'])\n self.config['SESSION_KEY_BITS'] = int(self.config['SESSION_KEY_BITS'])\n self.config['PERMANENT_SESSION_LIFETIME'] = int(self.config['PERMANENT_SESSION_LIFETIME'])\n self.config['AUTH_RATELIMIT_IP_V4_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V4_MASK'])\n self.config['AUTH_RATELIMIT_IP_V6_MASK'] = int(self.config['AUTH_RATELIMIT_IP_V6_MASK'])\n self.config['AUTH_RATELIMIT_EXEMPTION'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['AUTH_RATELIMIT_EXEMPTION'].split(',')) if cidr)\n self.config['MESSAGE_RATELIMIT_EXEMPTION'] = set([s for s in self.config['MESSAGE_RATELIMIT_EXEMPTION'].lower().replace(' ', '').split(',') if s])\n hostnames = [host.strip() for host in self.config['HOSTNAMES'].split(',')]\n self.config['HOSTNAMES'] = ','.join(hostnames)\n self.config['HOSTNAME'] = hostnames[0]\n self.config['DEFAULT_SPAM_THRESHOLD'] = int(self.config['DEFAULT_SPAM_THRESHOLD'])\n self.config['PROXY_AUTH_WHITELIST'] = set(ipaddress.ip_network(cidr, False) for cidr in (cidr.strip() for cidr in self.config['PROXY_AUTH_WHITELIST'].split(',')) if cidr)\n try:\n self.config['MAILU_VERSION'] = open('/version', 'r').read()\n except FileNotFoundError:\n pass\n\n # update the app config\n app.config.update(self.config)\n", "path": "core/admin/mailu/configuration.py"}]}
3,556
405
gh_patches_debug_42306
rasdani/github-patches
git_diff
joke2k__faker-1600
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect language set on Provider during locale fallback * Faker version: 11.3.0 When faker is initialised with a locale not implemented by some of the built-in providers, it falls back to the DEFAULT_LOCALE for these providers. However, the language set on the provider’s instance in `__lang__` is still the requested locale and not the locale in effect. This is due to `Factory._find_provider_class` not returning the locale it chose, in: https://github.com/joke2k/faker/blob/001ddee39c33b6b82196fe6a5ecc131bca3b964c/faker/factory.py#L102-L104 to `Factory._get_provider_class` which then proceeds to return the locale value as it was passed in at the first place. Thus, `provider.__lang__` does not contain the found locale (as the variable name `lang_found` would suggest): https://github.com/joke2k/faker/blob/001ddee39c33b6b82196fe6a5ecc131bca3b964c/faker/factory.py#L61 ### Expected behavior `provider.__lang__` should be set to the actual language / locale being used by the provider. ### Actual behavior `provider.__lang__` is set to the locale that was requested but is not offered by this provider. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `faker/documentor.py` Content: ``` 1 import inspect 2 import warnings 3 4 from typing import Any, Dict, List, Optional, Tuple, Union 5 6 from .generator import Generator 7 from .providers import BaseProvider 8 from .proxy import Faker 9 10 11 class Documentor: 12 def __init__(self, generator: Union[Generator, Faker]) -> None: 13 """ 14 :param generator: a localized Generator with providers filled, 15 for which to write the documentation 16 :type generator: faker.Generator() 17 """ 18 self.generator = generator 19 self.max_name_len: int = 0 20 self.already_generated: List[str] = [] 21 22 def get_formatters( 23 self, 24 locale: Optional[str] = None, 25 excludes: Optional[List[str]] = None, 26 **kwargs: Any, 27 ) -> List[Tuple[BaseProvider, Dict[str, str]]]: 28 self.max_name_len = 0 29 self.already_generated = [] if excludes is None else excludes[:] 30 formatters = [] 31 providers: List[BaseProvider] = self.generator.get_providers() 32 for provider in providers[::-1]: # reverse 33 if locale and provider.__lang__ != locale: 34 continue 35 formatters.append( 36 (provider, self.get_provider_formatters(provider, **kwargs)), 37 ) 38 return formatters 39 40 def get_provider_formatters( 41 self, 42 provider: BaseProvider, 43 prefix: str = "fake.", 44 with_args: bool = True, 45 with_defaults: bool = True, 46 ) -> Dict[str, str]: 47 formatters = {} 48 49 for name, method in inspect.getmembers(provider, inspect.ismethod): 50 # skip 'private' method and inherited methods 51 if name.startswith("_") or name in self.already_generated: 52 continue 53 54 arguments = [] 55 faker_args: List[str] = [] 56 faker_kwargs = {} 57 58 if name == "binary": 59 faker_kwargs["length"] = 1024 60 elif name in ["zip", "tar"]: 61 faker_kwargs.update( 62 { 63 "uncompressed_size": 1024, 64 "min_file_size": 512, 65 } 66 ) 67 68 if with_args: 69 # retrieve all parameter 70 argspec = inspect.getfullargspec(method) 71 72 lst = [x for x in argspec.args if x not in ["self", "cls"]] 73 for i, arg in enumerate(lst): 74 75 if argspec.defaults and with_defaults: 76 77 try: 78 default = argspec.defaults[i] 79 if isinstance(default, str): 80 default = repr(default) 81 else: 82 # TODO check default type 83 default = f"{default}" 84 85 arg = f"{arg}={default}" 86 87 except IndexError: 88 pass 89 90 arguments.append(arg) 91 if with_args == "first": 92 break 93 94 if with_args != "first": 95 if argspec.varargs: 96 arguments.append("*" + argspec.varargs) 97 if argspec.varkw: 98 arguments.append("**" + argspec.varkw) 99 100 # build fake method signature 101 signature = f"{prefix}{name}({', '.join(arguments)})" 102 103 try: 104 # make a fake example 105 example = self.generator.format(name, *faker_args, **faker_kwargs) 106 except (AttributeError, ValueError) as e: 107 warnings.warn(str(e)) 108 continue 109 formatters[signature] = example 110 111 self.max_name_len = max(self.max_name_len, len(signature)) 112 self.already_generated.append(name) 113 114 return formatters 115 116 @staticmethod 117 def get_provider_name(provider_class: BaseProvider) -> str: 118 return provider_class.__provider__ 119 ``` Path: `faker/factory.py` Content: ``` 1 import locale as pylocale 2 import logging 3 import sys 4 5 from importlib import import_module 6 from typing import Any, List, Optional, Tuple 7 8 from .config import AVAILABLE_LOCALES, DEFAULT_LOCALE, PROVIDERS 9 from .generator import Generator 10 from .utils.loading import list_module 11 12 logger = logging.getLogger(__name__) 13 14 # identify if python is being run in interactive mode. If so, disable logging. 15 inREPL = bool(getattr(sys, "ps1", False)) 16 if inREPL: 17 logger.setLevel(logging.CRITICAL) 18 else: 19 logger.debug("Not in REPL -> leaving logger event level as is.") 20 21 22 class Factory: 23 @classmethod 24 def create( 25 cls, 26 locale: Optional[str] = None, 27 providers: Optional[List[str]] = None, 28 generator: Generator = None, 29 includes: Optional[List[str]] = None, 30 # Should we use weightings (more realistic) or weight every element equally (faster)? 31 # By default, use weightings for backwards compatibility & realism 32 use_weighting: bool = True, 33 **config: Any, 34 ) -> Generator: 35 if includes is None: 36 includes = [] 37 38 # fix locale to package name 39 locale = locale.replace("-", "_") if locale else DEFAULT_LOCALE 40 locale = pylocale.normalize(locale).split(".")[0] 41 if locale not in AVAILABLE_LOCALES: 42 msg = f"Invalid configuration for faker locale `{locale}`" 43 raise AttributeError(msg) 44 45 config["locale"] = locale 46 config["use_weighting"] = use_weighting 47 providers = providers or PROVIDERS 48 49 providers += includes 50 51 faker = generator or Generator(**config) 52 53 for prov_name in providers: 54 if prov_name == "faker.providers": 55 continue 56 57 prov_cls, lang_found = cls._get_provider_class(prov_name, locale) 58 provider = prov_cls(faker) 59 provider.__use_weighting__ = use_weighting 60 provider.__provider__ = prov_name 61 provider.__lang__ = lang_found 62 faker.add_provider(provider) 63 64 return faker 65 66 @classmethod 67 def _get_provider_class(cls, provider: str, locale: Optional[str] = "") -> Tuple[Any, Optional[str]]: 68 69 provider_class = cls._find_provider_class(provider, locale) 70 71 if provider_class: 72 return provider_class, locale 73 74 if locale and locale != DEFAULT_LOCALE: 75 # fallback to default locale 76 provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE) 77 if provider_class: 78 return provider_class, DEFAULT_LOCALE 79 80 # fallback to no locale 81 provider_class = cls._find_provider_class(provider) 82 if provider_class: 83 return provider_class, None 84 85 msg = f"Unable to find provider `{provider}` with locale `{locale}`" 86 raise ValueError(msg) 87 88 @classmethod 89 def _find_provider_class(cls, provider_path: str, locale: Optional[str] = None) -> Any: 90 91 provider_module = import_module(provider_path) 92 93 if getattr(provider_module, "localized", False): 94 95 logger.debug( 96 "Looking for locale `%s` in provider `%s`.", 97 locale, 98 provider_module.__name__, 99 ) 100 101 available_locales = list_module(provider_module) 102 if not locale or locale not in available_locales: 103 unavailable_locale = locale 104 locale = getattr(provider_module, "default_locale", DEFAULT_LOCALE) 105 logger.debug( 106 "Specified locale `%s` is not available for " 107 "provider `%s`. Locale reset to `%s` for this " 108 "provider.", 109 unavailable_locale, 110 provider_module.__name__, 111 locale, 112 ) 113 else: 114 logger.debug( 115 "Provider `%s` has been localized to `%s`.", 116 provider_module.__name__, 117 locale, 118 ) 119 120 path = f"{provider_path}.{locale}" 121 provider_module = import_module(path) 122 123 else: 124 125 logger.debug( 126 "Provider `%s` does not feature localization. " 127 "Specified locale `%s` is not utilized for this " 128 "provider.", 129 provider_module.__name__, 130 locale, 131 ) 132 133 if locale is not None: 134 provider_module = import_module(provider_path) 135 136 return provider_module.Provider # type: ignore 137 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/faker/documentor.py b/faker/documentor.py --- a/faker/documentor.py +++ b/faker/documentor.py @@ -30,7 +30,7 @@ formatters = [] providers: List[BaseProvider] = self.generator.get_providers() for provider in providers[::-1]: # reverse - if locale and provider.__lang__ != locale: + if locale and provider.__lang__ and provider.__lang__ != locale: continue formatters.append( (provider, self.get_provider_formatters(provider, **kwargs)), diff --git a/faker/factory.py b/faker/factory.py --- a/faker/factory.py +++ b/faker/factory.py @@ -54,7 +54,7 @@ if prov_name == "faker.providers": continue - prov_cls, lang_found = cls._get_provider_class(prov_name, locale) + prov_cls, lang_found, _ = cls._find_provider_class(prov_name, locale) provider = prov_cls(faker) provider.__use_weighting__ = use_weighting provider.__provider__ = prov_name @@ -64,31 +64,14 @@ return faker @classmethod - def _get_provider_class(cls, provider: str, locale: Optional[str] = "") -> Tuple[Any, Optional[str]]: - - provider_class = cls._find_provider_class(provider, locale) - - if provider_class: - return provider_class, locale - - if locale and locale != DEFAULT_LOCALE: - # fallback to default locale - provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE) - if provider_class: - return provider_class, DEFAULT_LOCALE - - # fallback to no locale - provider_class = cls._find_provider_class(provider) - if provider_class: - return provider_class, None - - msg = f"Unable to find provider `{provider}` with locale `{locale}`" - raise ValueError(msg) - - @classmethod - def _find_provider_class(cls, provider_path: str, locale: Optional[str] = None) -> Any: + def _find_provider_class( + cls, + provider_path: str, + locale: Optional[str] = None, + ) -> Tuple[Any, Optional[str], Optional[str]]: provider_module = import_module(provider_path) + default_locale = getattr(provider_module, "default_locale", "") if getattr(provider_module, "localized", False): @@ -101,7 +84,7 @@ available_locales = list_module(provider_module) if not locale or locale not in available_locales: unavailable_locale = locale - locale = getattr(provider_module, "default_locale", DEFAULT_LOCALE) + locale = default_locale or DEFAULT_LOCALE logger.debug( "Specified locale `%s` is not available for " "provider `%s`. Locale reset to `%s` for this " @@ -122,15 +105,14 @@ else: - logger.debug( - "Provider `%s` does not feature localization. " - "Specified locale `%s` is not utilized for this " - "provider.", - provider_module.__name__, - locale, - ) - - if locale is not None: - provider_module = import_module(provider_path) + if locale: + logger.debug( + "Provider `%s` does not feature localization. " + "Specified locale `%s` is not utilized for this " + "provider.", + provider_module.__name__, + locale, + ) + locale = default_locale = None - return provider_module.Provider # type: ignore + return provider_module.Provider, locale, default_locale # type: ignore
{"golden_diff": "diff --git a/faker/documentor.py b/faker/documentor.py\n--- a/faker/documentor.py\n+++ b/faker/documentor.py\n@@ -30,7 +30,7 @@\n formatters = []\n providers: List[BaseProvider] = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n- if locale and provider.__lang__ != locale:\n+ if locale and provider.__lang__ and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs)),\ndiff --git a/faker/factory.py b/faker/factory.py\n--- a/faker/factory.py\n+++ b/faker/factory.py\n@@ -54,7 +54,7 @@\n if prov_name == \"faker.providers\":\n continue\n \n- prov_cls, lang_found = cls._get_provider_class(prov_name, locale)\n+ prov_cls, lang_found, _ = cls._find_provider_class(prov_name, locale)\n provider = prov_cls(faker)\n provider.__use_weighting__ = use_weighting\n provider.__provider__ = prov_name\n@@ -64,31 +64,14 @@\n return faker\n \n @classmethod\n- def _get_provider_class(cls, provider: str, locale: Optional[str] = \"\") -> Tuple[Any, Optional[str]]:\n-\n- provider_class = cls._find_provider_class(provider, locale)\n-\n- if provider_class:\n- return provider_class, locale\n-\n- if locale and locale != DEFAULT_LOCALE:\n- # fallback to default locale\n- provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)\n- if provider_class:\n- return provider_class, DEFAULT_LOCALE\n-\n- # fallback to no locale\n- provider_class = cls._find_provider_class(provider)\n- if provider_class:\n- return provider_class, None\n-\n- msg = f\"Unable to find provider `{provider}` with locale `{locale}`\"\n- raise ValueError(msg)\n-\n- @classmethod\n- def _find_provider_class(cls, provider_path: str, locale: Optional[str] = None) -> Any:\n+ def _find_provider_class(\n+ cls,\n+ provider_path: str,\n+ locale: Optional[str] = None,\n+ ) -> Tuple[Any, Optional[str], Optional[str]]:\n \n provider_module = import_module(provider_path)\n+ default_locale = getattr(provider_module, \"default_locale\", \"\")\n \n if getattr(provider_module, \"localized\", False):\n \n@@ -101,7 +84,7 @@\n available_locales = list_module(provider_module)\n if not locale or locale not in available_locales:\n unavailable_locale = locale\n- locale = getattr(provider_module, \"default_locale\", DEFAULT_LOCALE)\n+ locale = default_locale or DEFAULT_LOCALE\n logger.debug(\n \"Specified locale `%s` is not available for \"\n \"provider `%s`. Locale reset to `%s` for this \"\n@@ -122,15 +105,14 @@\n \n else:\n \n- logger.debug(\n- \"Provider `%s` does not feature localization. \"\n- \"Specified locale `%s` is not utilized for this \"\n- \"provider.\",\n- provider_module.__name__,\n- locale,\n- )\n-\n- if locale is not None:\n- provider_module = import_module(provider_path)\n+ if locale:\n+ logger.debug(\n+ \"Provider `%s` does not feature localization. \"\n+ \"Specified locale `%s` is not utilized for this \"\n+ \"provider.\",\n+ provider_module.__name__,\n+ locale,\n+ )\n+ locale = default_locale = None\n \n- return provider_module.Provider # type: ignore\n+ return provider_module.Provider, locale, default_locale # type: ignore\n", "issue": "Incorrect language set on Provider during locale fallback\n* Faker version: 11.3.0\r\n\r\nWhen faker is initialised with a locale not implemented by some of the built-in providers, it falls back to the DEFAULT_LOCALE for these providers. However, the language set on the provider\u2019s instance in `__lang__` is still the requested locale and not the locale in effect.\r\n\r\nThis is due to `Factory._find_provider_class` not returning the locale it chose, in: https://github.com/joke2k/faker/blob/001ddee39c33b6b82196fe6a5ecc131bca3b964c/faker/factory.py#L102-L104 to `Factory._get_provider_class` which then proceeds to return the locale value as it was passed in at the first place. Thus, `provider.__lang__` does not contain the found locale (as the variable name `lang_found` would suggest): https://github.com/joke2k/faker/blob/001ddee39c33b6b82196fe6a5ecc131bca3b964c/faker/factory.py#L61\r\n\r\n### Expected behavior\r\n\r\n`provider.__lang__` should be set to the actual language / locale being used by the provider.\r\n\r\n### Actual behavior\r\n\r\n`provider.__lang__` is set to the locale that was requested but is not offered by this provider.\n", "before_files": [{"content": "import inspect\nimport warnings\n\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom .generator import Generator\nfrom .providers import BaseProvider\nfrom .proxy import Faker\n\n\nclass Documentor:\n def __init__(self, generator: Union[Generator, Faker]) -> None:\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len: int = 0\n self.already_generated: List[str] = []\n\n def get_formatters(\n self,\n locale: Optional[str] = None,\n excludes: Optional[List[str]] = None,\n **kwargs: Any,\n ) -> List[Tuple[BaseProvider, Dict[str, str]]]:\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers: List[BaseProvider] = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs)),\n )\n return formatters\n\n def get_provider_formatters(\n self,\n provider: BaseProvider,\n prefix: str = \"fake.\",\n with_args: bool = True,\n with_defaults: bool = True,\n ) -> Dict[str, str]:\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n # skip 'private' method and inherited methods\n if name.startswith(\"_\") or name in self.already_generated:\n continue\n\n arguments = []\n faker_args: List[str] = []\n faker_kwargs = {}\n\n if name == \"binary\":\n faker_kwargs[\"length\"] = 1024\n elif name in [\"zip\", \"tar\"]:\n faker_kwargs.update(\n {\n \"uncompressed_size\": 1024,\n \"min_file_size\": 512,\n }\n )\n\n if with_args:\n # retrieve all parameter\n argspec = inspect.getfullargspec(method)\n\n lst = [x for x in argspec.args if x not in [\"self\", \"cls\"]]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if isinstance(default, str):\n default = repr(default)\n else:\n # TODO check default type\n default = f\"{default}\"\n\n arg = f\"{arg}={default}\"\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == \"first\":\n break\n\n if with_args != \"first\":\n if argspec.varargs:\n arguments.append(\"*\" + argspec.varargs)\n if argspec.varkw:\n arguments.append(\"**\" + argspec.varkw)\n\n # build fake method signature\n signature = f\"{prefix}{name}({', '.join(arguments)})\"\n\n try:\n # make a fake example\n example = self.generator.format(name, *faker_args, **faker_kwargs)\n except (AttributeError, ValueError) as e:\n warnings.warn(str(e))\n continue\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class: BaseProvider) -> str:\n return provider_class.__provider__\n", "path": "faker/documentor.py"}, {"content": "import locale as pylocale\nimport logging\nimport sys\n\nfrom importlib import import_module\nfrom typing import Any, List, Optional, Tuple\n\nfrom .config import AVAILABLE_LOCALES, DEFAULT_LOCALE, PROVIDERS\nfrom .generator import Generator\nfrom .utils.loading import list_module\n\nlogger = logging.getLogger(__name__)\n\n# identify if python is being run in interactive mode. If so, disable logging.\ninREPL = bool(getattr(sys, \"ps1\", False))\nif inREPL:\n logger.setLevel(logging.CRITICAL)\nelse:\n logger.debug(\"Not in REPL -> leaving logger event level as is.\")\n\n\nclass Factory:\n @classmethod\n def create(\n cls,\n locale: Optional[str] = None,\n providers: Optional[List[str]] = None,\n generator: Generator = None,\n includes: Optional[List[str]] = None,\n # Should we use weightings (more realistic) or weight every element equally (faster)?\n # By default, use weightings for backwards compatibility & realism\n use_weighting: bool = True,\n **config: Any,\n ) -> Generator:\n if includes is None:\n includes = []\n\n # fix locale to package name\n locale = locale.replace(\"-\", \"_\") if locale else DEFAULT_LOCALE\n locale = pylocale.normalize(locale).split(\".\")[0]\n if locale not in AVAILABLE_LOCALES:\n msg = f\"Invalid configuration for faker locale `{locale}`\"\n raise AttributeError(msg)\n\n config[\"locale\"] = locale\n config[\"use_weighting\"] = use_weighting\n providers = providers or PROVIDERS\n\n providers += includes\n\n faker = generator or Generator(**config)\n\n for prov_name in providers:\n if prov_name == \"faker.providers\":\n continue\n\n prov_cls, lang_found = cls._get_provider_class(prov_name, locale)\n provider = prov_cls(faker)\n provider.__use_weighting__ = use_weighting\n provider.__provider__ = prov_name\n provider.__lang__ = lang_found\n faker.add_provider(provider)\n\n return faker\n\n @classmethod\n def _get_provider_class(cls, provider: str, locale: Optional[str] = \"\") -> Tuple[Any, Optional[str]]:\n\n provider_class = cls._find_provider_class(provider, locale)\n\n if provider_class:\n return provider_class, locale\n\n if locale and locale != DEFAULT_LOCALE:\n # fallback to default locale\n provider_class = cls._find_provider_class(provider, DEFAULT_LOCALE)\n if provider_class:\n return provider_class, DEFAULT_LOCALE\n\n # fallback to no locale\n provider_class = cls._find_provider_class(provider)\n if provider_class:\n return provider_class, None\n\n msg = f\"Unable to find provider `{provider}` with locale `{locale}`\"\n raise ValueError(msg)\n\n @classmethod\n def _find_provider_class(cls, provider_path: str, locale: Optional[str] = None) -> Any:\n\n provider_module = import_module(provider_path)\n\n if getattr(provider_module, \"localized\", False):\n\n logger.debug(\n \"Looking for locale `%s` in provider `%s`.\",\n locale,\n provider_module.__name__,\n )\n\n available_locales = list_module(provider_module)\n if not locale or locale not in available_locales:\n unavailable_locale = locale\n locale = getattr(provider_module, \"default_locale\", DEFAULT_LOCALE)\n logger.debug(\n \"Specified locale `%s` is not available for \"\n \"provider `%s`. Locale reset to `%s` for this \"\n \"provider.\",\n unavailable_locale,\n provider_module.__name__,\n locale,\n )\n else:\n logger.debug(\n \"Provider `%s` has been localized to `%s`.\",\n provider_module.__name__,\n locale,\n )\n\n path = f\"{provider_path}.{locale}\"\n provider_module = import_module(path)\n\n else:\n\n logger.debug(\n \"Provider `%s` does not feature localization. \"\n \"Specified locale `%s` is not utilized for this \"\n \"provider.\",\n provider_module.__name__,\n locale,\n )\n\n if locale is not None:\n provider_module = import_module(provider_path)\n\n return provider_module.Provider # type: ignore\n", "path": "faker/factory.py"}], "after_files": [{"content": "import inspect\nimport warnings\n\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nfrom .generator import Generator\nfrom .providers import BaseProvider\nfrom .proxy import Faker\n\n\nclass Documentor:\n def __init__(self, generator: Union[Generator, Faker]) -> None:\n \"\"\"\n :param generator: a localized Generator with providers filled,\n for which to write the documentation\n :type generator: faker.Generator()\n \"\"\"\n self.generator = generator\n self.max_name_len: int = 0\n self.already_generated: List[str] = []\n\n def get_formatters(\n self,\n locale: Optional[str] = None,\n excludes: Optional[List[str]] = None,\n **kwargs: Any,\n ) -> List[Tuple[BaseProvider, Dict[str, str]]]:\n self.max_name_len = 0\n self.already_generated = [] if excludes is None else excludes[:]\n formatters = []\n providers: List[BaseProvider] = self.generator.get_providers()\n for provider in providers[::-1]: # reverse\n if locale and provider.__lang__ and provider.__lang__ != locale:\n continue\n formatters.append(\n (provider, self.get_provider_formatters(provider, **kwargs)),\n )\n return formatters\n\n def get_provider_formatters(\n self,\n provider: BaseProvider,\n prefix: str = \"fake.\",\n with_args: bool = True,\n with_defaults: bool = True,\n ) -> Dict[str, str]:\n formatters = {}\n\n for name, method in inspect.getmembers(provider, inspect.ismethod):\n # skip 'private' method and inherited methods\n if name.startswith(\"_\") or name in self.already_generated:\n continue\n\n arguments = []\n faker_args: List[str] = []\n faker_kwargs = {}\n\n if name == \"binary\":\n faker_kwargs[\"length\"] = 1024\n elif name in [\"zip\", \"tar\"]:\n faker_kwargs.update(\n {\n \"uncompressed_size\": 1024,\n \"min_file_size\": 512,\n }\n )\n\n if with_args:\n # retrieve all parameter\n argspec = inspect.getfullargspec(method)\n\n lst = [x for x in argspec.args if x not in [\"self\", \"cls\"]]\n for i, arg in enumerate(lst):\n\n if argspec.defaults and with_defaults:\n\n try:\n default = argspec.defaults[i]\n if isinstance(default, str):\n default = repr(default)\n else:\n # TODO check default type\n default = f\"{default}\"\n\n arg = f\"{arg}={default}\"\n\n except IndexError:\n pass\n\n arguments.append(arg)\n if with_args == \"first\":\n break\n\n if with_args != \"first\":\n if argspec.varargs:\n arguments.append(\"*\" + argspec.varargs)\n if argspec.varkw:\n arguments.append(\"**\" + argspec.varkw)\n\n # build fake method signature\n signature = f\"{prefix}{name}({', '.join(arguments)})\"\n\n try:\n # make a fake example\n example = self.generator.format(name, *faker_args, **faker_kwargs)\n except (AttributeError, ValueError) as e:\n warnings.warn(str(e))\n continue\n formatters[signature] = example\n\n self.max_name_len = max(self.max_name_len, len(signature))\n self.already_generated.append(name)\n\n return formatters\n\n @staticmethod\n def get_provider_name(provider_class: BaseProvider) -> str:\n return provider_class.__provider__\n", "path": "faker/documentor.py"}, {"content": "import locale as pylocale\nimport logging\nimport sys\n\nfrom importlib import import_module\nfrom typing import Any, List, Optional, Tuple\n\nfrom .config import AVAILABLE_LOCALES, DEFAULT_LOCALE, PROVIDERS\nfrom .generator import Generator\nfrom .utils.loading import list_module\n\nlogger = logging.getLogger(__name__)\n\n# identify if python is being run in interactive mode. If so, disable logging.\ninREPL = bool(getattr(sys, \"ps1\", False))\nif inREPL:\n logger.setLevel(logging.CRITICAL)\nelse:\n logger.debug(\"Not in REPL -> leaving logger event level as is.\")\n\n\nclass Factory:\n @classmethod\n def create(\n cls,\n locale: Optional[str] = None,\n providers: Optional[List[str]] = None,\n generator: Generator = None,\n includes: Optional[List[str]] = None,\n # Should we use weightings (more realistic) or weight every element equally (faster)?\n # By default, use weightings for backwards compatibility & realism\n use_weighting: bool = True,\n **config: Any,\n ) -> Generator:\n if includes is None:\n includes = []\n\n # fix locale to package name\n locale = locale.replace(\"-\", \"_\") if locale else DEFAULT_LOCALE\n locale = pylocale.normalize(locale).split(\".\")[0]\n if locale not in AVAILABLE_LOCALES:\n msg = f\"Invalid configuration for faker locale `{locale}`\"\n raise AttributeError(msg)\n\n config[\"locale\"] = locale\n config[\"use_weighting\"] = use_weighting\n providers = providers or PROVIDERS\n\n providers += includes\n\n faker = generator or Generator(**config)\n\n for prov_name in providers:\n if prov_name == \"faker.providers\":\n continue\n\n prov_cls, lang_found, _ = cls._find_provider_class(prov_name, locale)\n provider = prov_cls(faker)\n provider.__use_weighting__ = use_weighting\n provider.__provider__ = prov_name\n provider.__lang__ = lang_found\n faker.add_provider(provider)\n\n return faker\n\n @classmethod\n def _find_provider_class(\n cls,\n provider_path: str,\n locale: Optional[str] = None,\n ) -> Tuple[Any, Optional[str], Optional[str]]:\n\n provider_module = import_module(provider_path)\n default_locale = getattr(provider_module, \"default_locale\", \"\")\n\n if getattr(provider_module, \"localized\", False):\n\n logger.debug(\n \"Looking for locale `%s` in provider `%s`.\",\n locale,\n provider_module.__name__,\n )\n\n available_locales = list_module(provider_module)\n if not locale or locale not in available_locales:\n unavailable_locale = locale\n locale = default_locale or DEFAULT_LOCALE\n logger.debug(\n \"Specified locale `%s` is not available for \"\n \"provider `%s`. Locale reset to `%s` for this \"\n \"provider.\",\n unavailable_locale,\n provider_module.__name__,\n locale,\n )\n else:\n logger.debug(\n \"Provider `%s` has been localized to `%s`.\",\n provider_module.__name__,\n locale,\n )\n\n path = f\"{provider_path}.{locale}\"\n provider_module = import_module(path)\n\n else:\n\n if locale:\n logger.debug(\n \"Provider `%s` does not feature localization. \"\n \"Specified locale `%s` is not utilized for this \"\n \"provider.\",\n provider_module.__name__,\n locale,\n )\n locale = default_locale = None\n\n return provider_module.Provider, locale, default_locale # type: ignore\n", "path": "faker/factory.py"}]}
2,835
841
gh_patches_debug_11225
rasdani/github-patches
git_diff
Parsl__parsl-1865
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support memoization of tuples We already support lists, so implementing an `id_for_memo` for tuple types is straightforward. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `parsl/dataflow/memoization.py` Content: ``` 1 import hashlib 2 from functools import singledispatch 3 import logging 4 from parsl.serialize import serialize 5 import types 6 7 logger = logging.getLogger(__name__) 8 9 10 @singledispatch 11 def id_for_memo(obj, output_ref=False): 12 """This should return a byte sequence which identifies the supplied 13 value for memoization purposes: for any two calls of id_for_memo, 14 the byte sequence should be the same when the "same" value is supplied, 15 and different otherwise. 16 17 "same" is in quotes about because sameness is not as straightforward as 18 serialising out the content. 19 20 For example, for two dicts x, y: 21 22 x = {"a":3, "b":4} 23 y = {"b":4, "a":3} 24 25 then: x == y, but their serialization is not equal, and some other 26 functions on x and y are not equal: list(x.keys()) != list(y.keys()) 27 28 29 id_for_memo is invoked with output_ref=True when the parameter is an 30 output reference (a value in the outputs=[] parameter of an app 31 invocation). 32 33 Memo hashing might be different for such parameters: for example, a 34 user might choose to hash input File content so that changing the 35 content of an input file invalidates memoization. This does not make 36 sense to do for output files: there is no meaningful content stored 37 where an output filename points at memoization time. 38 """ 39 logger.error("id_for_memo attempted on unknown type {}".format(type(obj))) 40 raise ValueError("unknown type for memoization: {}".format(type(obj))) 41 42 43 @id_for_memo.register(str) 44 @id_for_memo.register(int) 45 @id_for_memo.register(float) 46 @id_for_memo.register(types.FunctionType) 47 @id_for_memo.register(type(None)) 48 def id_for_memo_serialize(obj, output_ref=False): 49 return serialize(obj) 50 51 52 @id_for_memo.register(list) 53 def id_for_memo_list(denormalized_list, output_ref=False): 54 if type(denormalized_list) != list: 55 raise ValueError("id_for_memo_list cannot work on subclasses of list") 56 57 normalized_list = [] 58 59 for e in denormalized_list: 60 normalized_list.append(id_for_memo(e, output_ref=output_ref)) 61 62 return serialize(normalized_list) 63 64 65 @id_for_memo.register(dict) 66 def id_for_memo_dict(denormalized_dict, output_ref=False): 67 """This normalises the keys and values of the supplied dictionary. 68 69 When output_ref=True, the values are normalised as output refs, but 70 the keys are not. 71 """ 72 if type(denormalized_dict) != dict: 73 raise ValueError("id_for_memo_dict cannot work on subclasses of dict") 74 75 keys = sorted(denormalized_dict) 76 77 normalized_list = [] 78 for k in keys: 79 normalized_list.append(id_for_memo(k)) 80 normalized_list.append(id_for_memo(denormalized_dict[k], output_ref=output_ref)) 81 return serialize(normalized_list) 82 83 84 class Memoizer(object): 85 """Memoizer is responsible for ensuring that identical work is not repeated. 86 87 When a task is repeated, i.e., the same function is called with the same exact arguments, the 88 result from a previous execution is reused. `wiki <https://en.wikipedia.org/wiki/Memoization>`_ 89 90 The memoizer implementation here does not collapse duplicate calls 91 at call time, but works **only** when the result of a previous 92 call is available at the time the duplicate call is made. 93 94 For instance:: 95 96 No advantage from Memoization helps 97 memoization here: here: 98 99 TaskA TaskB 100 | TaskA | 101 | | TaskA done (TaskB) 102 | | | (TaskB) 103 done | | 104 done | 105 done 106 107 The memoizer creates a lookup table by hashing the function name 108 and its inputs, and storing the results of the function. 109 110 When a task is ready for launch, i.e., all of its arguments 111 have resolved, we add its hash to the task datastructure. 112 """ 113 114 def __init__(self, dfk, memoize=True, checkpoint={}): 115 """Initialize the memoizer. 116 117 Args: 118 - dfk (DFK obj): The DFK object 119 120 KWargs: 121 - memoize (Bool): enable memoization or not. 122 - checkpoint (Dict): A checkpoint loaded as a dict. 123 """ 124 self.dfk = dfk 125 self.memoize = memoize 126 127 if self.memoize: 128 logger.info("App caching initialized") 129 self.memo_lookup_table = checkpoint 130 else: 131 logger.info("App caching disabled for all apps") 132 self.memo_lookup_table = {} 133 134 def make_hash(self, task): 135 """Create a hash of the task inputs. 136 137 If this fails here, then all ipp calls are also likely to fail due to failure 138 at serialization. 139 140 Args: 141 - task (dict) : Task dictionary from dfk.tasks 142 143 Returns: 144 - hash (str) : A unique hash string 145 """ 146 # Function name TODO: Add fn body later 147 148 t = [] 149 150 # if kwargs contains an outputs parameter, that parameter is removed 151 # and normalised differently - with output_ref set to True. 152 # kwargs listed in ignore_for_cache will also be removed 153 154 filtered_kw = task['kwargs'].copy() 155 156 ignore_list = task['ignore_for_cache'] 157 158 logger.debug("Ignoring these kwargs for checkpointing: {}".format(ignore_list)) 159 for k in ignore_list: 160 logger.debug("Ignoring kwarg {}".format(k)) 161 del filtered_kw[k] 162 163 if 'outputs' in task['kwargs']: 164 outputs = task['kwargs']['outputs'] 165 del filtered_kw['outputs'] 166 t = t + [id_for_memo(outputs, output_ref=True)] # TODO: use append? 167 168 t = t + [id_for_memo(filtered_kw)] 169 t = t + [id_for_memo(task['func_name']), 170 id_for_memo(task['fn_hash']), 171 id_for_memo(task['args'])] 172 173 x = b''.join(t) 174 hashedsum = hashlib.md5(x).hexdigest() 175 return hashedsum 176 177 def check_memo(self, task_id, task): 178 """Create a hash of the task and its inputs and check the lookup table for this hash. 179 180 If present, the results are returned. The result is a tuple indicating whether a memo 181 exists and the result, since a None result is possible and could be confusing. 182 This seems like a reasonable option without relying on a cache_miss exception. 183 184 Args: 185 - task(task) : task from the dfk.tasks table 186 187 Returns: 188 - Result (Future): A completed future containing the memoized result 189 190 This call will also set task['hashsum'] to the unique hashsum for the func+inputs. 191 """ 192 if not self.memoize or not task['memoize']: 193 task['hashsum'] = None 194 logger.debug("Task {} will not be memoized".format(task_id)) 195 return None 196 197 hashsum = self.make_hash(task) 198 logger.debug("Task {} has memoization hash {}".format(task_id, hashsum)) 199 result = None 200 if hashsum in self.memo_lookup_table: 201 result = self.memo_lookup_table[hashsum] 202 logger.info("Task %s using result from cache", task_id) 203 else: 204 logger.info("Task %s had no result in cache", task_id) 205 206 task['hashsum'] = hashsum 207 208 return result 209 210 def hash_lookup(self, hashsum): 211 """Lookup a hash in the memoization table. 212 213 Args: 214 - hashsum (str): The same hashes used to uniquely identify apps+inputs 215 216 Returns: 217 - Lookup result 218 219 Raises: 220 - KeyError: if hash not in table 221 """ 222 return self.memo_lookup_table[hashsum] 223 224 def update_memo(self, task_id, task, r): 225 """Updates the memoization lookup table with the result from a task. 226 227 Args: 228 - task_id (int): Integer task id 229 - task (dict) : A task dict from dfk.tasks 230 - r (Result future): Result future 231 232 A warning is issued when a hash collision occurs during the update. 233 This is not likely. 234 """ 235 if not self.memoize or not task['memoize'] or 'hashsum' not in task: 236 return 237 238 if task['hashsum'] in self.memo_lookup_table: 239 logger.info('Updating app cache entry with latest %s:%s call' % 240 (task['func_name'], task_id)) 241 self.memo_lookup_table[task['hashsum']] = r 242 else: 243 self.memo_lookup_table[task['hashsum']] = r 244 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/parsl/dataflow/memoization.py b/parsl/dataflow/memoization.py --- a/parsl/dataflow/memoization.py +++ b/parsl/dataflow/memoization.py @@ -62,6 +62,19 @@ return serialize(normalized_list) +@id_for_memo.register(tuple) +def id_for_memo_tuple(denormalized_tuple, output_ref=False): + if type(denormalized_tuple) != tuple: + raise ValueError("id_for_memo_tuple cannot work on subclasses of tuple") + + normalized_list = [] + + for e in denormalized_tuple: + normalized_list.append(id_for_memo(e, output_ref=output_ref)) + + return serialize(normalized_list) + + @id_for_memo.register(dict) def id_for_memo_dict(denormalized_dict, output_ref=False): """This normalises the keys and values of the supplied dictionary.
{"golden_diff": "diff --git a/parsl/dataflow/memoization.py b/parsl/dataflow/memoization.py\n--- a/parsl/dataflow/memoization.py\n+++ b/parsl/dataflow/memoization.py\n@@ -62,6 +62,19 @@\n return serialize(normalized_list)\n \n \n+@id_for_memo.register(tuple)\n+def id_for_memo_tuple(denormalized_tuple, output_ref=False):\n+ if type(denormalized_tuple) != tuple:\n+ raise ValueError(\"id_for_memo_tuple cannot work on subclasses of tuple\")\n+\n+ normalized_list = []\n+\n+ for e in denormalized_tuple:\n+ normalized_list.append(id_for_memo(e, output_ref=output_ref))\n+\n+ return serialize(normalized_list)\n+\n+\n @id_for_memo.register(dict)\n def id_for_memo_dict(denormalized_dict, output_ref=False):\n \"\"\"This normalises the keys and values of the supplied dictionary.\n", "issue": "Support memoization of tuples\nWe already support lists, so implementing an `id_for_memo` for tuple types is straightforward. \r\n\n", "before_files": [{"content": "import hashlib\nfrom functools import singledispatch\nimport logging\nfrom parsl.serialize import serialize\nimport types\n\nlogger = logging.getLogger(__name__)\n\n\n@singledispatch\ndef id_for_memo(obj, output_ref=False):\n \"\"\"This should return a byte sequence which identifies the supplied\n value for memoization purposes: for any two calls of id_for_memo,\n the byte sequence should be the same when the \"same\" value is supplied,\n and different otherwise.\n\n \"same\" is in quotes about because sameness is not as straightforward as\n serialising out the content.\n\n For example, for two dicts x, y:\n\n x = {\"a\":3, \"b\":4}\n y = {\"b\":4, \"a\":3}\n\n then: x == y, but their serialization is not equal, and some other\n functions on x and y are not equal: list(x.keys()) != list(y.keys())\n\n\n id_for_memo is invoked with output_ref=True when the parameter is an\n output reference (a value in the outputs=[] parameter of an app\n invocation).\n\n Memo hashing might be different for such parameters: for example, a\n user might choose to hash input File content so that changing the\n content of an input file invalidates memoization. This does not make\n sense to do for output files: there is no meaningful content stored\n where an output filename points at memoization time.\n \"\"\"\n logger.error(\"id_for_memo attempted on unknown type {}\".format(type(obj)))\n raise ValueError(\"unknown type for memoization: {}\".format(type(obj)))\n\n\n@id_for_memo.register(str)\n@id_for_memo.register(int)\n@id_for_memo.register(float)\n@id_for_memo.register(types.FunctionType)\n@id_for_memo.register(type(None))\ndef id_for_memo_serialize(obj, output_ref=False):\n return serialize(obj)\n\n\n@id_for_memo.register(list)\ndef id_for_memo_list(denormalized_list, output_ref=False):\n if type(denormalized_list) != list:\n raise ValueError(\"id_for_memo_list cannot work on subclasses of list\")\n\n normalized_list = []\n\n for e in denormalized_list:\n normalized_list.append(id_for_memo(e, output_ref=output_ref))\n\n return serialize(normalized_list)\n\n\n@id_for_memo.register(dict)\ndef id_for_memo_dict(denormalized_dict, output_ref=False):\n \"\"\"This normalises the keys and values of the supplied dictionary.\n\n When output_ref=True, the values are normalised as output refs, but\n the keys are not.\n \"\"\"\n if type(denormalized_dict) != dict:\n raise ValueError(\"id_for_memo_dict cannot work on subclasses of dict\")\n\n keys = sorted(denormalized_dict)\n\n normalized_list = []\n for k in keys:\n normalized_list.append(id_for_memo(k))\n normalized_list.append(id_for_memo(denormalized_dict[k], output_ref=output_ref))\n return serialize(normalized_list)\n\n\nclass Memoizer(object):\n \"\"\"Memoizer is responsible for ensuring that identical work is not repeated.\n\n When a task is repeated, i.e., the same function is called with the same exact arguments, the\n result from a previous execution is reused. `wiki <https://en.wikipedia.org/wiki/Memoization>`_\n\n The memoizer implementation here does not collapse duplicate calls\n at call time, but works **only** when the result of a previous\n call is available at the time the duplicate call is made.\n\n For instance::\n\n No advantage from Memoization helps\n memoization here: here:\n\n TaskA TaskB\n | TaskA |\n | | TaskA done (TaskB)\n | | | (TaskB)\n done | |\n done |\n done\n\n The memoizer creates a lookup table by hashing the function name\n and its inputs, and storing the results of the function.\n\n When a task is ready for launch, i.e., all of its arguments\n have resolved, we add its hash to the task datastructure.\n \"\"\"\n\n def __init__(self, dfk, memoize=True, checkpoint={}):\n \"\"\"Initialize the memoizer.\n\n Args:\n - dfk (DFK obj): The DFK object\n\n KWargs:\n - memoize (Bool): enable memoization or not.\n - checkpoint (Dict): A checkpoint loaded as a dict.\n \"\"\"\n self.dfk = dfk\n self.memoize = memoize\n\n if self.memoize:\n logger.info(\"App caching initialized\")\n self.memo_lookup_table = checkpoint\n else:\n logger.info(\"App caching disabled for all apps\")\n self.memo_lookup_table = {}\n\n def make_hash(self, task):\n \"\"\"Create a hash of the task inputs.\n\n If this fails here, then all ipp calls are also likely to fail due to failure\n at serialization.\n\n Args:\n - task (dict) : Task dictionary from dfk.tasks\n\n Returns:\n - hash (str) : A unique hash string\n \"\"\"\n # Function name TODO: Add fn body later\n\n t = []\n\n # if kwargs contains an outputs parameter, that parameter is removed\n # and normalised differently - with output_ref set to True.\n # kwargs listed in ignore_for_cache will also be removed\n\n filtered_kw = task['kwargs'].copy()\n\n ignore_list = task['ignore_for_cache']\n\n logger.debug(\"Ignoring these kwargs for checkpointing: {}\".format(ignore_list))\n for k in ignore_list:\n logger.debug(\"Ignoring kwarg {}\".format(k))\n del filtered_kw[k]\n\n if 'outputs' in task['kwargs']:\n outputs = task['kwargs']['outputs']\n del filtered_kw['outputs']\n t = t + [id_for_memo(outputs, output_ref=True)] # TODO: use append?\n\n t = t + [id_for_memo(filtered_kw)]\n t = t + [id_for_memo(task['func_name']),\n id_for_memo(task['fn_hash']),\n id_for_memo(task['args'])]\n\n x = b''.join(t)\n hashedsum = hashlib.md5(x).hexdigest()\n return hashedsum\n\n def check_memo(self, task_id, task):\n \"\"\"Create a hash of the task and its inputs and check the lookup table for this hash.\n\n If present, the results are returned. The result is a tuple indicating whether a memo\n exists and the result, since a None result is possible and could be confusing.\n This seems like a reasonable option without relying on a cache_miss exception.\n\n Args:\n - task(task) : task from the dfk.tasks table\n\n Returns:\n - Result (Future): A completed future containing the memoized result\n\n This call will also set task['hashsum'] to the unique hashsum for the func+inputs.\n \"\"\"\n if not self.memoize or not task['memoize']:\n task['hashsum'] = None\n logger.debug(\"Task {} will not be memoized\".format(task_id))\n return None\n\n hashsum = self.make_hash(task)\n logger.debug(\"Task {} has memoization hash {}\".format(task_id, hashsum))\n result = None\n if hashsum in self.memo_lookup_table:\n result = self.memo_lookup_table[hashsum]\n logger.info(\"Task %s using result from cache\", task_id)\n else:\n logger.info(\"Task %s had no result in cache\", task_id)\n\n task['hashsum'] = hashsum\n\n return result\n\n def hash_lookup(self, hashsum):\n \"\"\"Lookup a hash in the memoization table.\n\n Args:\n - hashsum (str): The same hashes used to uniquely identify apps+inputs\n\n Returns:\n - Lookup result\n\n Raises:\n - KeyError: if hash not in table\n \"\"\"\n return self.memo_lookup_table[hashsum]\n\n def update_memo(self, task_id, task, r):\n \"\"\"Updates the memoization lookup table with the result from a task.\n\n Args:\n - task_id (int): Integer task id\n - task (dict) : A task dict from dfk.tasks\n - r (Result future): Result future\n\n A warning is issued when a hash collision occurs during the update.\n This is not likely.\n \"\"\"\n if not self.memoize or not task['memoize'] or 'hashsum' not in task:\n return\n\n if task['hashsum'] in self.memo_lookup_table:\n logger.info('Updating app cache entry with latest %s:%s call' %\n (task['func_name'], task_id))\n self.memo_lookup_table[task['hashsum']] = r\n else:\n self.memo_lookup_table[task['hashsum']] = r\n", "path": "parsl/dataflow/memoization.py"}], "after_files": [{"content": "import hashlib\nfrom functools import singledispatch\nimport logging\nfrom parsl.serialize import serialize\nimport types\n\nlogger = logging.getLogger(__name__)\n\n\n@singledispatch\ndef id_for_memo(obj, output_ref=False):\n \"\"\"This should return a byte sequence which identifies the supplied\n value for memoization purposes: for any two calls of id_for_memo,\n the byte sequence should be the same when the \"same\" value is supplied,\n and different otherwise.\n\n \"same\" is in quotes about because sameness is not as straightforward as\n serialising out the content.\n\n For example, for two dicts x, y:\n\n x = {\"a\":3, \"b\":4}\n y = {\"b\":4, \"a\":3}\n\n then: x == y, but their serialization is not equal, and some other\n functions on x and y are not equal: list(x.keys()) != list(y.keys())\n\n\n id_for_memo is invoked with output_ref=True when the parameter is an\n output reference (a value in the outputs=[] parameter of an app\n invocation).\n\n Memo hashing might be different for such parameters: for example, a\n user might choose to hash input File content so that changing the\n content of an input file invalidates memoization. This does not make\n sense to do for output files: there is no meaningful content stored\n where an output filename points at memoization time.\n \"\"\"\n logger.error(\"id_for_memo attempted on unknown type {}\".format(type(obj)))\n raise ValueError(\"unknown type for memoization: {}\".format(type(obj)))\n\n\n@id_for_memo.register(str)\n@id_for_memo.register(int)\n@id_for_memo.register(float)\n@id_for_memo.register(types.FunctionType)\n@id_for_memo.register(type(None))\ndef id_for_memo_serialize(obj, output_ref=False):\n return serialize(obj)\n\n\n@id_for_memo.register(list)\ndef id_for_memo_list(denormalized_list, output_ref=False):\n if type(denormalized_list) != list:\n raise ValueError(\"id_for_memo_list cannot work on subclasses of list\")\n\n normalized_list = []\n\n for e in denormalized_list:\n normalized_list.append(id_for_memo(e, output_ref=output_ref))\n\n return serialize(normalized_list)\n\n\n@id_for_memo.register(tuple)\ndef id_for_memo_tuple(denormalized_tuple, output_ref=False):\n if type(denormalized_tuple) != tuple:\n raise ValueError(\"id_for_memo_tuple cannot work on subclasses of tuple\")\n\n normalized_list = []\n\n for e in denormalized_tuple:\n normalized_list.append(id_for_memo(e, output_ref=output_ref))\n\n return serialize(normalized_list)\n\n\n@id_for_memo.register(dict)\ndef id_for_memo_dict(denormalized_dict, output_ref=False):\n \"\"\"This normalises the keys and values of the supplied dictionary.\n\n When output_ref=True, the values are normalised as output refs, but\n the keys are not.\n \"\"\"\n if type(denormalized_dict) != dict:\n raise ValueError(\"id_for_memo_dict cannot work on subclasses of dict\")\n\n keys = sorted(denormalized_dict)\n\n normalized_list = []\n for k in keys:\n normalized_list.append(id_for_memo(k))\n normalized_list.append(id_for_memo(denormalized_dict[k], output_ref=output_ref))\n return serialize(normalized_list)\n\n\nclass Memoizer(object):\n \"\"\"Memoizer is responsible for ensuring that identical work is not repeated.\n\n When a task is repeated, i.e., the same function is called with the same exact arguments, the\n result from a previous execution is reused. `wiki <https://en.wikipedia.org/wiki/Memoization>`_\n\n The memoizer implementation here does not collapse duplicate calls\n at call time, but works **only** when the result of a previous\n call is available at the time the duplicate call is made.\n\n For instance::\n\n No advantage from Memoization helps\n memoization here: here:\n\n TaskA TaskB\n | TaskA |\n | | TaskA done (TaskB)\n | | | (TaskB)\n done | |\n done |\n done\n\n The memoizer creates a lookup table by hashing the function name\n and its inputs, and storing the results of the function.\n\n When a task is ready for launch, i.e., all of its arguments\n have resolved, we add its hash to the task datastructure.\n \"\"\"\n\n def __init__(self, dfk, memoize=True, checkpoint={}):\n \"\"\"Initialize the memoizer.\n\n Args:\n - dfk (DFK obj): The DFK object\n\n KWargs:\n - memoize (Bool): enable memoization or not.\n - checkpoint (Dict): A checkpoint loaded as a dict.\n \"\"\"\n self.dfk = dfk\n self.memoize = memoize\n\n if self.memoize:\n logger.info(\"App caching initialized\")\n self.memo_lookup_table = checkpoint\n else:\n logger.info(\"App caching disabled for all apps\")\n self.memo_lookup_table = {}\n\n def make_hash(self, task):\n \"\"\"Create a hash of the task inputs.\n\n If this fails here, then all ipp calls are also likely to fail due to failure\n at serialization.\n\n Args:\n - task (dict) : Task dictionary from dfk.tasks\n\n Returns:\n - hash (str) : A unique hash string\n \"\"\"\n # Function name TODO: Add fn body later\n\n t = []\n\n # if kwargs contains an outputs parameter, that parameter is removed\n # and normalised differently - with output_ref set to True.\n # kwargs listed in ignore_for_cache will also be removed\n\n filtered_kw = task['kwargs'].copy()\n\n ignore_list = task['ignore_for_cache']\n\n logger.debug(\"Ignoring these kwargs for checkpointing: {}\".format(ignore_list))\n for k in ignore_list:\n logger.debug(\"Ignoring kwarg {}\".format(k))\n del filtered_kw[k]\n\n if 'outputs' in task['kwargs']:\n outputs = task['kwargs']['outputs']\n del filtered_kw['outputs']\n t = t + [id_for_memo(outputs, output_ref=True)] # TODO: use append?\n\n t = t + [id_for_memo(filtered_kw)]\n t = t + [id_for_memo(task['func_name']),\n id_for_memo(task['fn_hash']),\n id_for_memo(task['args'])]\n\n x = b''.join(t)\n hashedsum = hashlib.md5(x).hexdigest()\n return hashedsum\n\n def check_memo(self, task_id, task):\n \"\"\"Create a hash of the task and its inputs and check the lookup table for this hash.\n\n If present, the results are returned. The result is a tuple indicating whether a memo\n exists and the result, since a None result is possible and could be confusing.\n This seems like a reasonable option without relying on a cache_miss exception.\n\n Args:\n - task(task) : task from the dfk.tasks table\n\n Returns:\n - Result (Future): A completed future containing the memoized result\n\n This call will also set task['hashsum'] to the unique hashsum for the func+inputs.\n \"\"\"\n if not self.memoize or not task['memoize']:\n task['hashsum'] = None\n logger.debug(\"Task {} will not be memoized\".format(task_id))\n return None\n\n hashsum = self.make_hash(task)\n logger.debug(\"Task {} has memoization hash {}\".format(task_id, hashsum))\n result = None\n if hashsum in self.memo_lookup_table:\n result = self.memo_lookup_table[hashsum]\n logger.info(\"Task %s using result from cache\", task_id)\n else:\n logger.info(\"Task %s had no result in cache\", task_id)\n\n task['hashsum'] = hashsum\n\n return result\n\n def hash_lookup(self, hashsum):\n \"\"\"Lookup a hash in the memoization table.\n\n Args:\n - hashsum (str): The same hashes used to uniquely identify apps+inputs\n\n Returns:\n - Lookup result\n\n Raises:\n - KeyError: if hash not in table\n \"\"\"\n return self.memo_lookup_table[hashsum]\n\n def update_memo(self, task_id, task, r):\n \"\"\"Updates the memoization lookup table with the result from a task.\n\n Args:\n - task_id (int): Integer task id\n - task (dict) : A task dict from dfk.tasks\n - r (Result future): Result future\n\n A warning is issued when a hash collision occurs during the update.\n This is not likely.\n \"\"\"\n if not self.memoize or not task['memoize'] or 'hashsum' not in task:\n return\n\n if task['hashsum'] in self.memo_lookup_table:\n logger.info('Updating app cache entry with latest %s:%s call' %\n (task['func_name'], task_id))\n self.memo_lookup_table[task['hashsum']] = r\n else:\n self.memo_lookup_table[task['hashsum']] = r\n", "path": "parsl/dataflow/memoization.py"}]}
2,841
208
gh_patches_debug_7584
rasdani/github-patches
git_diff
pwndbg__pwndbg-774
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- QEMU uses binfmt root instead of pwndbg.qemu.root() This bit here should probably use pwndbg.qemu.root() instead of using the module variable directly: https://github.com/pwndbg/pwndbg/blob/609284cee279de345dcb0706e11a0b56abe349f4/pwndbg/file.py#L35 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/file.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- coding: utf-8 -*- 3 """ 4 Retrieve files from the debuggee's filesystem. Useful when 5 debugging a remote process over SSH or similar, where e.g. 6 /proc/FOO/maps is needed from the remote system. 7 """ 8 from __future__ import absolute_import 9 from __future__ import division 10 from __future__ import print_function 11 from __future__ import unicode_literals 12 13 import binascii 14 import os 15 import tempfile 16 17 import gdb 18 19 import pwndbg.qemu 20 import pwndbg.remote 21 import pwndbg.symbol 22 23 24 def get_file(path): 25 """ 26 Downloads the specified file from the system where the current process is 27 being debugged. 28 29 Returns: 30 The local path to the file 31 """ 32 local_path = path 33 34 if pwndbg.qemu.root(): 35 return os.path.join(pwndbg.qemu.binfmt_root, path) 36 elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu(): 37 local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir) 38 error = None 39 try: 40 error = gdb.execute('remote get "%s" "%s"' % (path, local_path), 41 to_string=True) 42 except gdb.error as e: 43 error = e 44 45 if error: 46 raise OSError("Could not download remote file %r:\n" \ 47 "Error: %s" % (path, error)) 48 49 return local_path 50 51 def get(path): 52 """ 53 Retrieves the contents of the specified file on the system 54 where the current process is being debugged. 55 56 Returns: 57 A byte array, or None. 58 """ 59 local_path = get_file(path) 60 61 try: 62 with open(local_path,'rb') as f: 63 return f.read() 64 except: 65 return b'' 66 67 def readlink(path): 68 """readlink(path) -> str 69 70 Read the link specified by 'path' on the system being debugged. 71 72 Handles local, qemu-usermode, and remote debugging cases. 73 """ 74 is_qemu = pwndbg.qemu.is_qemu_usermode() 75 76 if is_qemu: 77 if not os.path.exists(path): 78 path = os.path.join(pwndbg.qemu.root(), path) 79 80 if is_qemu or not pwndbg.remote.is_remote(): 81 try: 82 return os.readlink(path) 83 except Exception: 84 return '' 85 86 # 87 # Hurray unexposed packets! 88 # 89 # The 'vFile:readlink:' packet does exactly what it sounds like, 90 # but there is no API exposed to do this and there is also no 91 # command exposed... so we have to send the packet manually. 92 # 93 cmd = 'maintenance packet vFile:readlink:%s' 94 95 # The path must be uppercase hex-encoded and NULL-terminated. 96 path += '\x00' 97 path = binascii.hexlify(path.encode()) 98 path = path.upper() 99 path = path.decode() 100 101 result = gdb.execute(cmd % path, from_tty=False, to_string=True) 102 103 """ 104 sending: "vFile:readlink:2F70726F632F3130303839302F66642F3000" 105 received: "Fc;pipe:[98420]" 106 107 sending: "vFile:readlink:2F70726F632F3130303839302F66642F333300" 108 received: "F-1,2" 109 """ 110 111 _, data = result.split('\n', 1) 112 113 # Sanity check 114 expected = 'received: "F' 115 if not data.startswith(expected): 116 return '' 117 118 # Negative values are errors 119 data = data[len(expected):] 120 if data[0] == '-': 121 return '' 122 123 # If non-negative, there will be a hex-encoded length followed 124 # by a semicolon. 125 n, data = data.split(';', 1) 126 127 n = int(n, 16) 128 if n < 0: 129 return '' 130 131 # The result is quoted by GDB, strip the quote and newline. 132 # I have no idea how well it handles other crazy stuff. 133 ending = '"\n' 134 data = data[:-len(ending)] 135 136 return data 137 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/file.py b/pwndbg/file.py --- a/pwndbg/file.py +++ b/pwndbg/file.py @@ -30,9 +30,9 @@ The local path to the file """ local_path = path - - if pwndbg.qemu.root(): - return os.path.join(pwndbg.qemu.binfmt_root, path) + qemu_root = pwndbg.qemu.root() + if qemu_root: + return os.path.join(qemu_root, path) elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu(): local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir) error = None
{"golden_diff": "diff --git a/pwndbg/file.py b/pwndbg/file.py\n--- a/pwndbg/file.py\n+++ b/pwndbg/file.py\n@@ -30,9 +30,9 @@\n The local path to the file\n \"\"\"\n local_path = path\n-\n- if pwndbg.qemu.root():\n- return os.path.join(pwndbg.qemu.binfmt_root, path)\n+ qemu_root = pwndbg.qemu.root()\n+ if qemu_root:\n+ return os.path.join(qemu_root, path)\n elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():\n local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)\n error = None\n", "issue": "QEMU uses binfmt root instead of pwndbg.qemu.root()\nThis bit here should probably use pwndbg.qemu.root() instead of using the module variable directly: \r\n\r\nhttps://github.com/pwndbg/pwndbg/blob/609284cee279de345dcb0706e11a0b56abe349f4/pwndbg/file.py#L35\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRetrieve files from the debuggee's filesystem. Useful when\ndebugging a remote process over SSH or similar, where e.g.\n/proc/FOO/maps is needed from the remote system.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport binascii\nimport os\nimport tempfile\n\nimport gdb\n\nimport pwndbg.qemu\nimport pwndbg.remote\nimport pwndbg.symbol\n\n\ndef get_file(path):\n \"\"\"\n Downloads the specified file from the system where the current process is\n being debugged.\n\n Returns:\n The local path to the file\n \"\"\"\n local_path = path\n\n if pwndbg.qemu.root():\n return os.path.join(pwndbg.qemu.binfmt_root, path)\n elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():\n local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)\n error = None\n try:\n error = gdb.execute('remote get \"%s\" \"%s\"' % (path, local_path),\n to_string=True)\n except gdb.error as e:\n error = e\n\n if error:\n raise OSError(\"Could not download remote file %r:\\n\" \\\n \"Error: %s\" % (path, error))\n\n return local_path\n\ndef get(path):\n \"\"\"\n Retrieves the contents of the specified file on the system\n where the current process is being debugged.\n\n Returns:\n A byte array, or None.\n \"\"\"\n local_path = get_file(path)\n\n try:\n with open(local_path,'rb') as f:\n return f.read()\n except:\n return b''\n\ndef readlink(path):\n \"\"\"readlink(path) -> str\n\n Read the link specified by 'path' on the system being debugged.\n\n Handles local, qemu-usermode, and remote debugging cases.\n \"\"\"\n is_qemu = pwndbg.qemu.is_qemu_usermode()\n\n if is_qemu:\n if not os.path.exists(path):\n path = os.path.join(pwndbg.qemu.root(), path)\n\n if is_qemu or not pwndbg.remote.is_remote():\n try:\n return os.readlink(path)\n except Exception:\n return ''\n\n #\n # Hurray unexposed packets!\n #\n # The 'vFile:readlink:' packet does exactly what it sounds like,\n # but there is no API exposed to do this and there is also no\n # command exposed... so we have to send the packet manually.\n #\n cmd = 'maintenance packet vFile:readlink:%s'\n\n # The path must be uppercase hex-encoded and NULL-terminated.\n path += '\\x00'\n path = binascii.hexlify(path.encode())\n path = path.upper()\n path = path.decode()\n\n result = gdb.execute(cmd % path, from_tty=False, to_string=True)\n\n \"\"\"\n sending: \"vFile:readlink:2F70726F632F3130303839302F66642F3000\"\n received: \"Fc;pipe:[98420]\"\n\n sending: \"vFile:readlink:2F70726F632F3130303839302F66642F333300\"\n received: \"F-1,2\"\n \"\"\"\n\n _, data = result.split('\\n', 1)\n\n # Sanity check\n expected = 'received: \"F'\n if not data.startswith(expected):\n return ''\n\n # Negative values are errors\n data = data[len(expected):]\n if data[0] == '-':\n return ''\n\n # If non-negative, there will be a hex-encoded length followed\n # by a semicolon.\n n, data = data.split(';', 1)\n\n n = int(n, 16)\n if n < 0:\n return ''\n\n # The result is quoted by GDB, strip the quote and newline.\n # I have no idea how well it handles other crazy stuff.\n ending = '\"\\n'\n data = data[:-len(ending)]\n\n return data\n", "path": "pwndbg/file.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nRetrieve files from the debuggee's filesystem. Useful when\ndebugging a remote process over SSH or similar, where e.g.\n/proc/FOO/maps is needed from the remote system.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport binascii\nimport os\nimport tempfile\n\nimport gdb\n\nimport pwndbg.qemu\nimport pwndbg.remote\nimport pwndbg.symbol\n\n\ndef get_file(path):\n \"\"\"\n Downloads the specified file from the system where the current process is\n being debugged.\n\n Returns:\n The local path to the file\n \"\"\"\n local_path = path\n qemu_root = pwndbg.qemu.root()\n if qemu_root:\n return os.path.join(qemu_root, path)\n elif pwndbg.remote.is_remote() and not pwndbg.qemu.is_qemu():\n local_path = tempfile.mktemp(dir=pwndbg.symbol.remote_files_dir)\n error = None\n try:\n error = gdb.execute('remote get \"%s\" \"%s\"' % (path, local_path),\n to_string=True)\n except gdb.error as e:\n error = e\n\n if error:\n raise OSError(\"Could not download remote file %r:\\n\" \\\n \"Error: %s\" % (path, error))\n\n return local_path\n\ndef get(path):\n \"\"\"\n Retrieves the contents of the specified file on the system\n where the current process is being debugged.\n\n Returns:\n A byte array, or None.\n \"\"\"\n local_path = get_file(path)\n\n try:\n with open(local_path,'rb') as f:\n return f.read()\n except:\n return b''\n\ndef readlink(path):\n \"\"\"readlink(path) -> str\n\n Read the link specified by 'path' on the system being debugged.\n\n Handles local, qemu-usermode, and remote debugging cases.\n \"\"\"\n is_qemu = pwndbg.qemu.is_qemu_usermode()\n\n if is_qemu:\n if not os.path.exists(path):\n path = os.path.join(pwndbg.qemu.root(), path)\n\n if is_qemu or not pwndbg.remote.is_remote():\n try:\n return os.readlink(path)\n except Exception:\n return ''\n\n #\n # Hurray unexposed packets!\n #\n # The 'vFile:readlink:' packet does exactly what it sounds like,\n # but there is no API exposed to do this and there is also no\n # command exposed... so we have to send the packet manually.\n #\n cmd = 'maintenance packet vFile:readlink:%s'\n\n # The path must be uppercase hex-encoded and NULL-terminated.\n path += '\\x00'\n path = binascii.hexlify(path.encode())\n path = path.upper()\n path = path.decode()\n\n result = gdb.execute(cmd % path, from_tty=False, to_string=True)\n\n \"\"\"\n sending: \"vFile:readlink:2F70726F632F3130303839302F66642F3000\"\n received: \"Fc;pipe:[98420]\"\n\n sending: \"vFile:readlink:2F70726F632F3130303839302F66642F333300\"\n received: \"F-1,2\"\n \"\"\"\n\n _, data = result.split('\\n', 1)\n\n # Sanity check\n expected = 'received: \"F'\n if not data.startswith(expected):\n return ''\n\n # Negative values are errors\n data = data[len(expected):]\n if data[0] == '-':\n return ''\n\n # If non-negative, there will be a hex-encoded length followed\n # by a semicolon.\n n, data = data.split(';', 1)\n\n n = int(n, 16)\n if n < 0:\n return ''\n\n # The result is quoted by GDB, strip the quote and newline.\n # I have no idea how well it handles other crazy stuff.\n ending = '\"\\n'\n data = data[:-len(ending)]\n\n return data\n", "path": "pwndbg/file.py"}]}
1,621
160
gh_patches_debug_32529
rasdani/github-patches
git_diff
OpenMined__PySyft-2254
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Syft Keras bug on Windows Relevant slack discussion: https://openmined.slack.com/archives/C6DEWA4FR/p1559899875021800 Bug: ![image](https://user-images.githubusercontent.com/7891333/59118559-79fa9400-891e-11e9-990e-1bba1fe9ec93.png) It looks like the problem here is that the `tfe.config` is being saved in a location that is not a valid filepath in Windows. As a result, there is likely a file with the name `/tmp/tfe.config` being saved in some folder on the machine, as opposed to a file with the name `tfe.config` being saved in the root subdirectory called `tmp`. The fix for this should use `os.path` to figure out which filepath the tfe.config should be saved to, and then the logging messages should print the OS-specific CLI command for launching each `TFEWorker` process. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `syft/workers/tfe.py` Content: ``` 1 """To be extended in the near future.""" 2 from collections import OrderedDict 3 import logging 4 import subprocess 5 6 import tf_encrypted as tfe 7 8 9 logger = logging.getLogger("tf_encrypted") 10 11 12 class TFEWorker: 13 # TODO(Morten) this should be turned into a proxy, with existing code 14 # extracted into a new component that's launched via a script 15 16 def __init__(self, host=None, auto_managed=True): 17 self.host = host 18 self._server_process = None 19 self._auto_managed = auto_managed 20 21 def start(self, player_name, *workers): 22 if self.host is None: 23 # we're running using a tfe.LocalConfig which doesn't require us to do anything 24 return 25 26 config_filename = "/tmp/tfe.config" 27 28 config, _ = self.config_from_workers(workers) 29 config.save(config_filename) 30 31 if self._auto_managed: 32 cmd = "python -m tf_encrypted.player --config {} {}".format( 33 config_filename, player_name 34 ) 35 self._server_process = subprocess.Popen(cmd.split(" ")) 36 else: 37 logger.info( 38 "If not done already, please launch the following " 39 "command in a terminal on host '%s':\n" 40 "'python -m tf_encrypted.player --config %s %s'\n" 41 "This can be done automatically in a local subprocess by " 42 "setting `auto_managed=True` when instantiating a TFEWorker.", 43 self.host, 44 config_filename, 45 player_name, 46 ) 47 48 def stop(self): 49 if self.host is None: 50 # we're running using a tfe.LocalConfig which doesn't require us to do anything 51 return 52 53 if self._auto_managed: 54 if self._server_process is None: 55 return 56 self._server_process.kill() 57 self._server_process.communicate() 58 self._server_process = None 59 else: 60 logger.info("Please terminate the process on host '%s'.", self.host) 61 62 def connect_to_model(self, input_shape, output_shape, *workers): 63 config, _ = self.config_from_workers(workers) 64 tfe.set_config(config) 65 66 prot = tfe.protocol.SecureNN( 67 config.get_player("server0"), config.get_player("server1"), config.get_player("server2") 68 ) 69 tfe.set_protocol(prot) 70 71 self._tf_client = tfe.serving.QueueClient( 72 input_shape=input_shape, output_shape=output_shape 73 ) 74 75 sess = tfe.Session(config=config) 76 self._tf_session = sess 77 78 def query_model(self, data): 79 self.query_model_async(data) 80 return self.query_model_join() 81 82 def query_model_async(self, data): 83 self._tf_client.send_input(self._tf_session, data) 84 85 def query_model_join(self): 86 return self._tf_client.receive_output(self._tf_session) 87 88 @classmethod 89 def config_from_workers(cls, workers): 90 if len(workers) != 3: 91 raise ValueError("Expected three workers but {} were given".format(len(workers))) 92 93 player_to_worker_mapping = OrderedDict() 94 player_to_worker_mapping["server0"] = workers[0] 95 player_to_worker_mapping["server1"] = workers[1] 96 player_to_worker_mapping["server2"] = workers[2] 97 98 use_local_config = all(worker.host is None for worker in workers) 99 if use_local_config: 100 config = tfe.LocalConfig( 101 player_names=player_to_worker_mapping.keys(), auto_add_unknown_players=False 102 ) 103 return config, player_to_worker_mapping 104 105 # use tfe.RemoteConfig 106 hostmap = OrderedDict( 107 [(player_name, worker.host) for player_name, worker in player_to_worker_mapping.items()] 108 ) 109 config = tfe.RemoteConfig(hostmap) 110 return config, player_to_worker_mapping 111 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/syft/workers/tfe.py b/syft/workers/tfe.py --- a/syft/workers/tfe.py +++ b/syft/workers/tfe.py @@ -1,12 +1,15 @@ """To be extended in the near future.""" from collections import OrderedDict import logging +import os import subprocess +import tempfile import tf_encrypted as tfe logger = logging.getLogger("tf_encrypted") +_TMP_DIR = tempfile.gettempdir() class TFEWorker: @@ -23,26 +26,24 @@ # we're running using a tfe.LocalConfig which doesn't require us to do anything return - config_filename = "/tmp/tfe.config" + config_filename = os.path.join(_TMP_DIR, "tfe.config") config, _ = self.config_from_workers(workers) config.save(config_filename) + launch_cmd = "python -m tf_encrypted.player --config {} {}".format( + config_filename, player_name + ) if self._auto_managed: - cmd = "python -m tf_encrypted.player --config {} {}".format( - config_filename, player_name - ) - self._server_process = subprocess.Popen(cmd.split(" ")) + self._server_process = subprocess.Popen(launch_cmd.split(" ")) else: logger.info( "If not done already, please launch the following " - "command in a terminal on host '%s':\n" - "'python -m tf_encrypted.player --config %s %s'\n" + "command in a terminal on host %s: '%s'\n" "This can be done automatically in a local subprocess by " - "setting `auto_managed=True` when instantiating a TFEWorker.", + "setting `auto_managed=True` when instantiating a TFEWorker.\n", self.host, - config_filename, - player_name, + launch_cmd, ) def stop(self):
{"golden_diff": "diff --git a/syft/workers/tfe.py b/syft/workers/tfe.py\n--- a/syft/workers/tfe.py\n+++ b/syft/workers/tfe.py\n@@ -1,12 +1,15 @@\n \"\"\"To be extended in the near future.\"\"\"\n from collections import OrderedDict\n import logging\n+import os\n import subprocess\n+import tempfile\n \n import tf_encrypted as tfe\n \n \n logger = logging.getLogger(\"tf_encrypted\")\n+_TMP_DIR = tempfile.gettempdir()\n \n \n class TFEWorker:\n@@ -23,26 +26,24 @@\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n \n- config_filename = \"/tmp/tfe.config\"\n+ config_filename = os.path.join(_TMP_DIR, \"tfe.config\")\n \n config, _ = self.config_from_workers(workers)\n config.save(config_filename)\n \n+ launch_cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n+ config_filename, player_name\n+ )\n if self._auto_managed:\n- cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n- config_filename, player_name\n- )\n- self._server_process = subprocess.Popen(cmd.split(\" \"))\n+ self._server_process = subprocess.Popen(launch_cmd.split(\" \"))\n else:\n logger.info(\n \"If not done already, please launch the following \"\n- \"command in a terminal on host '%s':\\n\"\n- \"'python -m tf_encrypted.player --config %s %s'\\n\"\n+ \"command in a terminal on host %s: '%s'\\n\"\n \"This can be done automatically in a local subprocess by \"\n- \"setting `auto_managed=True` when instantiating a TFEWorker.\",\n+ \"setting `auto_managed=True` when instantiating a TFEWorker.\\n\",\n self.host,\n- config_filename,\n- player_name,\n+ launch_cmd,\n )\n \n def stop(self):\n", "issue": "Syft Keras bug on Windows\nRelevant slack discussion: https://openmined.slack.com/archives/C6DEWA4FR/p1559899875021800\r\n\r\nBug:\r\n![image](https://user-images.githubusercontent.com/7891333/59118559-79fa9400-891e-11e9-990e-1bba1fe9ec93.png)\r\n\r\nIt looks like the problem here is that the `tfe.config` is being saved in a location that is not a valid filepath in Windows. As a result, there is likely a file with the name `/tmp/tfe.config` being saved in some folder on the machine, as opposed to a file with the name `tfe.config` being saved in the root subdirectory called `tmp`.\r\n\r\nThe fix for this should use `os.path` to figure out which filepath the tfe.config should be saved to, and then the logging messages should print the OS-specific CLI command for launching each `TFEWorker` process.\n", "before_files": [{"content": "\"\"\"To be extended in the near future.\"\"\"\nfrom collections import OrderedDict\nimport logging\nimport subprocess\n\nimport tf_encrypted as tfe\n\n\nlogger = logging.getLogger(\"tf_encrypted\")\n\n\nclass TFEWorker:\n # TODO(Morten) this should be turned into a proxy, with existing code\n # extracted into a new component that's launched via a script\n\n def __init__(self, host=None, auto_managed=True):\n self.host = host\n self._server_process = None\n self._auto_managed = auto_managed\n\n def start(self, player_name, *workers):\n if self.host is None:\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n\n config_filename = \"/tmp/tfe.config\"\n\n config, _ = self.config_from_workers(workers)\n config.save(config_filename)\n\n if self._auto_managed:\n cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n config_filename, player_name\n )\n self._server_process = subprocess.Popen(cmd.split(\" \"))\n else:\n logger.info(\n \"If not done already, please launch the following \"\n \"command in a terminal on host '%s':\\n\"\n \"'python -m tf_encrypted.player --config %s %s'\\n\"\n \"This can be done automatically in a local subprocess by \"\n \"setting `auto_managed=True` when instantiating a TFEWorker.\",\n self.host,\n config_filename,\n player_name,\n )\n\n def stop(self):\n if self.host is None:\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n\n if self._auto_managed:\n if self._server_process is None:\n return\n self._server_process.kill()\n self._server_process.communicate()\n self._server_process = None\n else:\n logger.info(\"Please terminate the process on host '%s'.\", self.host)\n\n def connect_to_model(self, input_shape, output_shape, *workers):\n config, _ = self.config_from_workers(workers)\n tfe.set_config(config)\n\n prot = tfe.protocol.SecureNN(\n config.get_player(\"server0\"), config.get_player(\"server1\"), config.get_player(\"server2\")\n )\n tfe.set_protocol(prot)\n\n self._tf_client = tfe.serving.QueueClient(\n input_shape=input_shape, output_shape=output_shape\n )\n\n sess = tfe.Session(config=config)\n self._tf_session = sess\n\n def query_model(self, data):\n self.query_model_async(data)\n return self.query_model_join()\n\n def query_model_async(self, data):\n self._tf_client.send_input(self._tf_session, data)\n\n def query_model_join(self):\n return self._tf_client.receive_output(self._tf_session)\n\n @classmethod\n def config_from_workers(cls, workers):\n if len(workers) != 3:\n raise ValueError(\"Expected three workers but {} were given\".format(len(workers)))\n\n player_to_worker_mapping = OrderedDict()\n player_to_worker_mapping[\"server0\"] = workers[0]\n player_to_worker_mapping[\"server1\"] = workers[1]\n player_to_worker_mapping[\"server2\"] = workers[2]\n\n use_local_config = all(worker.host is None for worker in workers)\n if use_local_config:\n config = tfe.LocalConfig(\n player_names=player_to_worker_mapping.keys(), auto_add_unknown_players=False\n )\n return config, player_to_worker_mapping\n\n # use tfe.RemoteConfig\n hostmap = OrderedDict(\n [(player_name, worker.host) for player_name, worker in player_to_worker_mapping.items()]\n )\n config = tfe.RemoteConfig(hostmap)\n return config, player_to_worker_mapping\n", "path": "syft/workers/tfe.py"}], "after_files": [{"content": "\"\"\"To be extended in the near future.\"\"\"\nfrom collections import OrderedDict\nimport logging\nimport os\nimport subprocess\nimport tempfile\n\nimport tf_encrypted as tfe\n\n\nlogger = logging.getLogger(\"tf_encrypted\")\n_TMP_DIR = tempfile.gettempdir()\n\n\nclass TFEWorker:\n # TODO(Morten) this should be turned into a proxy, with existing code\n # extracted into a new component that's launched via a script\n\n def __init__(self, host=None, auto_managed=True):\n self.host = host\n self._server_process = None\n self._auto_managed = auto_managed\n\n def start(self, player_name, *workers):\n if self.host is None:\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n\n config_filename = os.path.join(_TMP_DIR, \"tfe.config\")\n\n config, _ = self.config_from_workers(workers)\n config.save(config_filename)\n\n launch_cmd = \"python -m tf_encrypted.player --config {} {}\".format(\n config_filename, player_name\n )\n if self._auto_managed:\n self._server_process = subprocess.Popen(launch_cmd.split(\" \"))\n else:\n logger.info(\n \"If not done already, please launch the following \"\n \"command in a terminal on host %s: '%s'\\n\"\n \"This can be done automatically in a local subprocess by \"\n \"setting `auto_managed=True` when instantiating a TFEWorker.\\n\",\n self.host,\n launch_cmd,\n )\n\n def stop(self):\n if self.host is None:\n # we're running using a tfe.LocalConfig which doesn't require us to do anything\n return\n\n if self._auto_managed:\n if self._server_process is None:\n return\n self._server_process.kill()\n self._server_process.communicate()\n self._server_process = None\n else:\n logger.info(\"Please terminate the process on host '%s'.\", self.host)\n\n def connect_to_model(self, input_shape, output_shape, *workers):\n config, _ = self.config_from_workers(workers)\n tfe.set_config(config)\n\n prot = tfe.protocol.SecureNN(\n config.get_player(\"server0\"), config.get_player(\"server1\"), config.get_player(\"server2\")\n )\n tfe.set_protocol(prot)\n\n self._tf_client = tfe.serving.QueueClient(\n input_shape=input_shape, output_shape=output_shape\n )\n\n sess = tfe.Session(config=config)\n self._tf_session = sess\n\n def query_model(self, data):\n self.query_model_async(data)\n return self.query_model_join()\n\n def query_model_async(self, data):\n self._tf_client.send_input(self._tf_session, data)\n\n def query_model_join(self):\n return self._tf_client.receive_output(self._tf_session)\n\n @classmethod\n def config_from_workers(cls, workers):\n if len(workers) != 3:\n raise ValueError(\"Expected three workers but {} were given\".format(len(workers)))\n\n player_to_worker_mapping = OrderedDict()\n player_to_worker_mapping[\"server0\"] = workers[0]\n player_to_worker_mapping[\"server1\"] = workers[1]\n player_to_worker_mapping[\"server2\"] = workers[2]\n\n use_local_config = all(worker.host is None for worker in workers)\n if use_local_config:\n config = tfe.LocalConfig(\n player_names=player_to_worker_mapping.keys(), auto_add_unknown_players=False\n )\n return config, player_to_worker_mapping\n\n # use tfe.RemoteConfig\n hostmap = OrderedDict(\n [(player_name, worker.host) for player_name, worker in player_to_worker_mapping.items()]\n )\n config = tfe.RemoteConfig(hostmap)\n return config, player_to_worker_mapping\n", "path": "syft/workers/tfe.py"}]}
1,550
442
gh_patches_debug_15049
rasdani/github-patches
git_diff
mne-tools__mne-bids-1077
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- MAINT: New release? It has been ~6 months, time for a release? Also it might be necessary to work with the imminent 1.2 release, not sure. But it's generally better if `mne-*` release before `mne` itself. mne-connectivity and mne-realtime have released recently. WDYT? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mne_bids/__init__.py` Content: ``` 1 """MNE software for easily interacting with BIDS compatible datasets.""" 2 3 __version__ = '0.11.dev0' 4 from mne_bids import commands 5 from mne_bids.report import make_report 6 from mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals, 7 print_dir_tree, get_entities_from_fname, 8 search_folder_for_text, get_bids_path_from_fname) 9 from mne_bids.read import get_head_mri_trans, read_raw_bids 10 from mne_bids.utils import get_anonymization_daysback 11 from mne_bids.write import (make_dataset_description, write_anat, 12 write_raw_bids, mark_channels, 13 write_meg_calibration, write_meg_crosstalk, 14 get_anat_landmarks, anonymize_dataset) 15 from mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks 16 from mne_bids.inspect import inspect_dataset 17 from mne_bids.dig import (template_to_head, convert_montage_to_ras, 18 convert_montage_to_mri) 19 ``` Path: `doc/conf.py` Content: ``` 1 """Configure details for documentation with sphinx.""" 2 import os 3 import sys 4 from datetime import date 5 6 import sphinx_gallery # noqa: F401 7 from sphinx_gallery.sorting import ExampleTitleSortKey 8 9 import mne_bids 10 11 12 # If extensions (or modules to document with autodoc) are in another directory, 13 # add these directories to sys.path here. If the directory is relative to the 14 # documentation root, use os.path.abspath to make it absolute, like shown here. 15 curdir = os.path.dirname(__file__) 16 sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids'))) 17 sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext'))) 18 19 20 # -- General configuration ------------------------------------------------ 21 22 # If your documentation needs a minimal Sphinx version, state it here. 23 # 24 # needs_sphinx = '1.0' 25 26 # Add any Sphinx extension module names here, as strings. They can be 27 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 28 # ones. 29 extensions = [ 30 'sphinx.ext.githubpages', 31 'sphinx.ext.autodoc', 32 'sphinx.ext.mathjax', 33 'sphinx.ext.viewcode', 34 'sphinx.ext.autosummary', 35 'sphinx.ext.doctest', 36 'sphinx.ext.intersphinx', 37 'sphinx_gallery.gen_gallery', 38 'numpydoc', 39 'sphinx_copybutton', 40 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py 41 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py 42 ] 43 44 # configure sphinx-copybutton 45 copybutton_prompt_text = r">>> |\.\.\. |\$ " 46 copybutton_prompt_is_regexp = True 47 48 # configure numpydoc 49 numpydoc_xref_param_type = True 50 numpydoc_class_members_toctree = False 51 numpydoc_attributes_as_param_list = True 52 numpydoc_xref_aliases = { 53 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`', 54 'path-like': ':term:`path-like <mne:path-like>`', 55 'array-like': ':term:`array_like <numpy:array_like>`', 56 'int': ':class:`int <python:int>`', 57 'bool': ':class:`bool <python:bool>`', 58 'float': ':class:`float <python:float>`', 59 'list': ':class:`list <python:list>`', 60 'tuple': ':class:`tuple <python:tuple>`', 61 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage', 62 } 63 numpydoc_xref_ignore = { 64 # words 65 'instance', 'instances', 'of' 66 } 67 68 69 # generate autosummary even if no references 70 autosummary_generate = True 71 autodoc_default_options = {'inherited-members': None} 72 default_role = 'autolink' # XXX silently allows bad syntax, someone should fix 73 74 # configure linkcheck 75 # https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder 76 linkcheck_retries = 2 77 linkcheck_rate_limit_timeout = 15.0 78 linkcheck_ignore = [ 79 r'https://www.researchgate.net/profile/.*', 80 ] 81 82 # The suffix(es) of source filenames. 83 # You can specify multiple suffix as a list of string: 84 # 85 # source_suffix = ['.rst', '.md'] 86 source_suffix = '.rst' 87 88 # The master toctree document. 89 master_doc = 'index' 90 91 # General information about the project. 92 project = u'MNE-BIDS' 93 td = date.today() 94 copyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year, 95 td.isoformat()) 96 97 author = u'MNE Developers' 98 99 # The version info for the project you're documenting, acts as replacement for 100 # |version| and |release|, also used in various other places throughout the 101 # built documents. 102 # 103 # The short X.Y version. 104 version = mne_bids.__version__ 105 # The full version, including alpha/beta/rc tags. 106 release = version 107 108 # List of patterns, relative to source directory, that match files and 109 # directories to ignore when looking for source files. 110 # This patterns also effect to html_static_path and html_extra_path 111 exclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db', 112 '.DS_Store'] 113 114 # HTML options (e.g., theme) 115 html_show_sourcelink = False 116 html_copy_source = False 117 118 html_theme = 'pydata_sphinx_theme' 119 120 # Add any paths that contain templates here, relative to this directory. 121 templates_path = ['_templates'] 122 html_static_path = ['_static'] 123 html_css_files = ['style.css'] 124 125 # Theme options are theme-specific and customize the look and feel of a theme 126 # further. For a list of options available for each theme, see the 127 # documentation. 128 html_theme_options = { 129 'icon_links': [ 130 dict(name='GitHub', 131 url='https://github.com/mne-tools/mne-bids', 132 icon='fab fa-github-square'), 133 dict(name='Discourse', 134 url='https://mne.discourse.group/tags/mne-bids', 135 icon='fab fa-discourse'), 136 ], 137 'icon_links_label': 'Quick Links', # for screen reader 138 'use_edit_page_button': False, 139 'navigation_with_keys': False, 140 'show_toc_level': 1, 141 'navbar_end': ['version-switcher', 'navbar-icon-links'], 142 } 143 144 html_context = { 145 'versions_dropdown': { 146 'dev': 'v0.11 (devel)', 147 'stable': 'v0.10 (stable)', 148 'v0.9': 'v0.9', 149 'v0.8': 'v0.8', 150 'v0.7': 'v0.7', 151 'v0.6': 'v0.6', 152 'v0.5': 'v0.5', 153 'v0.4': 'v0.4', 154 'v0.3': 'v0.3', 155 'v0.2': 'v0.2', 156 'v0.1': 'v0.1', 157 }, 158 } 159 160 html_sidebars = {} 161 162 # Example configuration for intersphinx: refer to the Python standard library. 163 intersphinx_mapping = { 164 'python': ('https://docs.python.org/3', None), 165 'mne': ('https://mne.tools/dev', None), 166 'numpy': ('https://numpy.org/devdocs', None), 167 'scipy': ('https://scipy.github.io/devdocs', None), 168 'matplotlib': ('https://matplotlib.org', None), 169 'nilearn': ('http://nilearn.github.io/stable', None), 170 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None), 171 'nibabel': ('https://nipy.org/nibabel', None), 172 } 173 intersphinx_timeout = 5 174 175 # Resolve binder filepath_prefix. From the docs: 176 # "A prefix to append to the filepath in the Binder links. You should use this 177 # if you will store your built documentation in a sub-folder of a repository, 178 # instead of in the root." 179 # we will store dev docs in a `dev` subdirectory and all other docs in a 180 # directory "v" + version_str. E.g., "v0.3" 181 if 'dev' in version: 182 filepath_prefix = 'dev' 183 else: 184 filepath_prefix = 'v{}'.format(version) 185 186 sphinx_gallery_conf = { 187 'doc_module': 'mne_bids', 188 'reference_url': { 189 'mne_bids': None, 190 }, 191 'backreferences_dir': 'generated', 192 'examples_dirs': '../examples', 193 'within_subsection_order': ExampleTitleSortKey, 194 'gallery_dirs': 'auto_examples', 195 'filename_pattern': '^((?!sgskip).)*$', 196 'binder': { 197 # Required keys 198 'org': 'mne-tools', 199 'repo': 'mne-bids', 200 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs. 201 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org). 202 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links. 203 'dependencies': [ 204 '../test_requirements.txt', 205 './requirements.txt', 206 ], 207 } 208 } 209 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/doc/conf.py b/doc/conf.py --- a/doc/conf.py +++ b/doc/conf.py @@ -143,8 +143,9 @@ html_context = { 'versions_dropdown': { - 'dev': 'v0.11 (devel)', - 'stable': 'v0.10 (stable)', + 'dev': 'v0.12 (devel)', + 'stable': 'v0.11 (stable)', + 'v0.10': 'v0.10', 'v0.9': 'v0.9', 'v0.8': 'v0.8', 'v0.7': 'v0.7', diff --git a/mne_bids/__init__.py b/mne_bids/__init__.py --- a/mne_bids/__init__.py +++ b/mne_bids/__init__.py @@ -1,6 +1,6 @@ """MNE software for easily interacting with BIDS compatible datasets.""" -__version__ = '0.11.dev0' +__version__ = '0.11' from mne_bids import commands from mne_bids.report import make_report from mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals,
{"golden_diff": "diff --git a/doc/conf.py b/doc/conf.py\n--- a/doc/conf.py\n+++ b/doc/conf.py\n@@ -143,8 +143,9 @@\n \n html_context = {\n 'versions_dropdown': {\n- 'dev': 'v0.11 (devel)',\n- 'stable': 'v0.10 (stable)',\n+ 'dev': 'v0.12 (devel)',\n+ 'stable': 'v0.11 (stable)',\n+ 'v0.10': 'v0.10',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\ndiff --git a/mne_bids/__init__.py b/mne_bids/__init__.py\n--- a/mne_bids/__init__.py\n+++ b/mne_bids/__init__.py\n@@ -1,6 +1,6 @@\n \"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n \n-__version__ = '0.11.dev0'\n+__version__ = '0.11'\n from mne_bids import commands\n from mne_bids.report import make_report\n from mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals,\n", "issue": "MAINT: New release?\nIt has been ~6 months, time for a release? Also it might be necessary to work with the imminent 1.2 release, not sure. But it's generally better if `mne-*` release before `mne` itself. mne-connectivity and mne-realtime have released recently. WDYT?\n", "before_files": [{"content": "\"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n\n__version__ = '0.11.dev0'\nfrom mne_bids import commands\nfrom mne_bids.report import make_report\nfrom mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals,\n print_dir_tree, get_entities_from_fname,\n search_folder_for_text, get_bids_path_from_fname)\nfrom mne_bids.read import get_head_mri_trans, read_raw_bids\nfrom mne_bids.utils import get_anonymization_daysback\nfrom mne_bids.write import (make_dataset_description, write_anat,\n write_raw_bids, mark_channels,\n write_meg_calibration, write_meg_crosstalk,\n get_anat_landmarks, anonymize_dataset)\nfrom mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks\nfrom mne_bids.inspect import inspect_dataset\nfrom mne_bids.dig import (template_to_head, convert_montage_to_ras,\n convert_montage_to_mri)\n", "path": "mne_bids/__init__.py"}, {"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n 'array-like': ':term:`array_like <numpy:array_like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n 'list': ':class:`list <python:list>`',\n 'tuple': ':class:`tuple <python:tuple>`',\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'instance', 'instances', 'of'\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# configure linkcheck\n# https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder\nlinkcheck_retries = 2\nlinkcheck_rate_limit_timeout = 15.0\nlinkcheck_ignore = [\n r'https://www.researchgate.net/profile/.*',\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MNE-BIDS'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\nhtml_theme = 'pydata_sphinx_theme'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'icon_links': [\n dict(name='GitHub',\n url='https://github.com/mne-tools/mne-bids',\n icon='fab fa-github-square'),\n dict(name='Discourse',\n url='https://mne.discourse.group/tags/mne-bids',\n icon='fab fa-discourse'),\n ],\n 'icon_links_label': 'Quick Links', # for screen reader\n 'use_edit_page_button': False,\n 'navigation_with_keys': False,\n 'show_toc_level': 1,\n 'navbar_end': ['version-switcher', 'navbar-icon-links'],\n}\n\nhtml_context = {\n 'versions_dropdown': {\n 'dev': 'v0.11 (devel)',\n 'stable': 'v0.10 (stable)',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\n 'v0.6': 'v0.6',\n 'v0.5': 'v0.5',\n 'v0.4': 'v0.4',\n 'v0.3': 'v0.3',\n 'v0.2': 'v0.2',\n 'v0.1': 'v0.1',\n },\n}\n\nhtml_sidebars = {}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('http://nilearn.github.io/stable', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}], "after_files": [{"content": "\"\"\"MNE software for easily interacting with BIDS compatible datasets.\"\"\"\n\n__version__ = '0.11'\nfrom mne_bids import commands\nfrom mne_bids.report import make_report\nfrom mne_bids.path import (BIDSPath, get_datatypes, get_entity_vals,\n print_dir_tree, get_entities_from_fname,\n search_folder_for_text, get_bids_path_from_fname)\nfrom mne_bids.read import get_head_mri_trans, read_raw_bids\nfrom mne_bids.utils import get_anonymization_daysback\nfrom mne_bids.write import (make_dataset_description, write_anat,\n write_raw_bids, mark_channels,\n write_meg_calibration, write_meg_crosstalk,\n get_anat_landmarks, anonymize_dataset)\nfrom mne_bids.sidecar_updates import update_sidecar_json, update_anat_landmarks\nfrom mne_bids.inspect import inspect_dataset\nfrom mne_bids.dig import (template_to_head, convert_montage_to_ras,\n convert_montage_to_mri)\n", "path": "mne_bids/__init__.py"}, {"content": "\"\"\"Configure details for documentation with sphinx.\"\"\"\nimport os\nimport sys\nfrom datetime import date\n\nimport sphinx_gallery # noqa: F401\nfrom sphinx_gallery.sorting import ExampleTitleSortKey\n\nimport mne_bids\n\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\ncurdir = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne_bids')))\nsys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext')))\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.githubpages',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.mathjax',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.doctest',\n 'sphinx.ext.intersphinx',\n 'sphinx_gallery.gen_gallery',\n 'numpydoc',\n 'sphinx_copybutton',\n 'gen_cli', # custom extension, see ./sphinxext/gen_cli.py\n 'gh_substitutions', # custom extension, see ./sphinxext/gh_substitutions.py\n]\n\n# configure sphinx-copybutton\ncopybutton_prompt_text = r\">>> |\\.\\.\\. |\\$ \"\ncopybutton_prompt_is_regexp = True\n\n# configure numpydoc\nnumpydoc_xref_param_type = True\nnumpydoc_class_members_toctree = False\nnumpydoc_attributes_as_param_list = True\nnumpydoc_xref_aliases = {\n 'BIDSPath': ':class:`BIDSPath <mne_bids.BIDSPath>`',\n 'path-like': ':term:`path-like <mne:path-like>`',\n 'array-like': ':term:`array_like <numpy:array_like>`',\n 'int': ':class:`int <python:int>`',\n 'bool': ':class:`bool <python:bool>`',\n 'float': ':class:`float <python:float>`',\n 'list': ':class:`list <python:list>`',\n 'tuple': ':class:`tuple <python:tuple>`',\n 'NibabelImageObject': 'nibabel.spatialimages.SpatialImage',\n}\nnumpydoc_xref_ignore = {\n # words\n 'instance', 'instances', 'of'\n}\n\n\n# generate autosummary even if no references\nautosummary_generate = True\nautodoc_default_options = {'inherited-members': None}\ndefault_role = 'autolink' # XXX silently allows bad syntax, someone should fix\n\n# configure linkcheck\n# https://sphinx-doc.org/en/master/usage/configuration.html?#options-for-the-linkcheck-builder\nlinkcheck_retries = 2\nlinkcheck_rate_limit_timeout = 15.0\nlinkcheck_ignore = [\n r'https://www.researchgate.net/profile/.*',\n]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = u'MNE-BIDS'\ntd = date.today()\ncopyright = u'2017-%s, MNE Developers. Last updated on %s' % (td.year,\n td.isoformat())\n\nauthor = u'MNE Developers'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = mne_bids.__version__\n# The full version, including alpha/beta/rc tags.\nrelease = version\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['auto_examples/index.rst', '_build', 'Thumbs.db',\n '.DS_Store']\n\n# HTML options (e.g., theme)\nhtml_show_sourcelink = False\nhtml_copy_source = False\n\nhtml_theme = 'pydata_sphinx_theme'\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\nhtml_static_path = ['_static']\nhtml_css_files = ['style.css']\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'icon_links': [\n dict(name='GitHub',\n url='https://github.com/mne-tools/mne-bids',\n icon='fab fa-github-square'),\n dict(name='Discourse',\n url='https://mne.discourse.group/tags/mne-bids',\n icon='fab fa-discourse'),\n ],\n 'icon_links_label': 'Quick Links', # for screen reader\n 'use_edit_page_button': False,\n 'navigation_with_keys': False,\n 'show_toc_level': 1,\n 'navbar_end': ['version-switcher', 'navbar-icon-links'],\n}\n\nhtml_context = {\n 'versions_dropdown': {\n 'dev': 'v0.12 (devel)',\n 'stable': 'v0.11 (stable)',\n 'v0.10': 'v0.10',\n 'v0.9': 'v0.9',\n 'v0.8': 'v0.8',\n 'v0.7': 'v0.7',\n 'v0.6': 'v0.6',\n 'v0.5': 'v0.5',\n 'v0.4': 'v0.4',\n 'v0.3': 'v0.3',\n 'v0.2': 'v0.2',\n 'v0.1': 'v0.1',\n },\n}\n\nhtml_sidebars = {}\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n 'python': ('https://docs.python.org/3', None),\n 'mne': ('https://mne.tools/dev', None),\n 'numpy': ('https://numpy.org/devdocs', None),\n 'scipy': ('https://scipy.github.io/devdocs', None),\n 'matplotlib': ('https://matplotlib.org', None),\n 'nilearn': ('http://nilearn.github.io/stable', None),\n 'pandas': ('https://pandas.pydata.org/pandas-docs/dev', None),\n 'nibabel': ('https://nipy.org/nibabel', None),\n}\nintersphinx_timeout = 5\n\n# Resolve binder filepath_prefix. From the docs:\n# \"A prefix to append to the filepath in the Binder links. You should use this\n# if you will store your built documentation in a sub-folder of a repository,\n# instead of in the root.\"\n# we will store dev docs in a `dev` subdirectory and all other docs in a\n# directory \"v\" + version_str. E.g., \"v0.3\"\nif 'dev' in version:\n filepath_prefix = 'dev'\nelse:\n filepath_prefix = 'v{}'.format(version)\n\nsphinx_gallery_conf = {\n 'doc_module': 'mne_bids',\n 'reference_url': {\n 'mne_bids': None,\n },\n 'backreferences_dir': 'generated',\n 'examples_dirs': '../examples',\n 'within_subsection_order': ExampleTitleSortKey,\n 'gallery_dirs': 'auto_examples',\n 'filename_pattern': '^((?!sgskip).)*$',\n 'binder': {\n # Required keys\n 'org': 'mne-tools',\n 'repo': 'mne-bids',\n 'branch': 'gh-pages', # noqa: E501 Can be any branch, tag, or commit hash. Use a branch that hosts your docs.\n 'binderhub_url': 'https://mybinder.org', # noqa: E501 Any URL of a binderhub deployment. Must be full URL (e.g. https://mybinder.org).\n 'filepath_prefix': filepath_prefix, # noqa: E501 A prefix to prepend to any filepaths in Binder links.\n 'dependencies': [\n '../test_requirements.txt',\n './requirements.txt',\n ],\n }\n}\n", "path": "doc/conf.py"}]}
3,008
292
gh_patches_debug_41321
rasdani/github-patches
git_diff
xorbitsai__inference-118
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- BUG: chatglm hang --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `xinference/model/llm/chatglm.py` Content: ``` 1 # Copyright 2022-2023 XProbe Inc. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import logging 16 import time 17 import uuid 18 from pathlib import Path 19 from typing import TYPE_CHECKING, Iterator, List, Optional, TypedDict, Union 20 21 from ...types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage 22 from .core import Model 23 24 if TYPE_CHECKING: 25 from .. import ModelSpec 26 27 logger = logging.getLogger(__name__) 28 29 30 class ChatglmCppModelConfig(TypedDict, total=False): 31 pass 32 33 34 class ChatglmCppGenerateConfig(TypedDict, total=False): 35 max_tokens: int 36 top_p: float 37 temperature: float 38 stream: bool 39 40 41 class ChatglmCppChatModel(Model): 42 def __init__( 43 self, 44 model_uid: str, 45 model_spec: "ModelSpec", 46 model_path: str, 47 model_config: Optional[ChatglmCppModelConfig] = None, 48 ): 49 super().__init__(model_uid, model_spec) 50 self._llm = None 51 self._model_path = model_path 52 if model_spec.model_name == "chatglm": 53 self.max_context_length = 2048 54 elif model_spec.model_name == "chatglm2": 55 self.max_context_length = 8192 56 else: 57 raise ValueError( 58 f"Invalid model name '{model_spec.model_name}'. Expected chatglm or chatglm2." 59 ) 60 61 # just a placeholder for now as the chatglm_cpp repo doesn't support model config. 62 self._model_config = model_config 63 64 @classmethod 65 def _sanitize_generate_config( 66 cls, 67 chatglmcpp_generate_config: Optional[ChatglmCppGenerateConfig], 68 ) -> ChatglmCppGenerateConfig: 69 if chatglmcpp_generate_config is None: 70 chatglmcpp_generate_config = ChatglmCppGenerateConfig() 71 chatglmcpp_generate_config.setdefault("max_tokens", 256) 72 chatglmcpp_generate_config.setdefault("temperature", 0.95) 73 chatglmcpp_generate_config.setdefault("top_p", 0.8) 74 chatglmcpp_generate_config.setdefault("stream", False) 75 return chatglmcpp_generate_config 76 77 def load(self): 78 try: 79 import chatglm_cpp 80 except ImportError: 81 error_message = "Failed to import module 'chatglm_cpp'" 82 installation_guide = [ 83 "Please make sure 'chatglm_cpp' is installed. ", 84 "You can install it by running the following command in the terminal:\n", 85 "pip install git+https://github.com/li-plus/chatglm.cpp.git@main\n\n", 86 "Or visit the original git repo if the above command fails:\n", 87 "https://github.com/li-plus/chatglm.cpp", 88 ] 89 90 raise ImportError(f"{error_message}\n\n{''.join(installation_guide)}") 91 92 self._llm = chatglm_cpp.Pipeline(Path(self._model_path)) 93 94 @staticmethod 95 def _convert_raw_text_chunks_to_chat( 96 tokens: Iterator[str], model_name: str 97 ) -> Iterator[ChatCompletionChunk]: 98 yield { 99 "id": "chat" + f"cmpl-{str(uuid.uuid4())}", 100 "model": model_name, 101 "object": "chat.completion.chunk", 102 "created": int(time.time()), 103 "choices": [ 104 { 105 "index": 0, 106 "delta": { 107 "role": "assistant", 108 }, 109 "finish_reason": None, 110 } 111 ], 112 } 113 for token in enumerate(tokens): 114 yield { 115 "id": "chat" + f"cmpl-{str(uuid.uuid4())}", 116 "model": model_name, 117 "object": "chat.completion.chunk", 118 "created": int(time.time()), 119 "choices": [ 120 { 121 "index": 0, 122 "delta": { 123 "content": token[1], 124 }, 125 "finish_reason": None, 126 } 127 ], 128 } 129 130 @staticmethod 131 def _convert_raw_text_completion_to_chat( 132 text: str, model_name: str 133 ) -> ChatCompletion: 134 return { 135 "id": "chat" + f"cmpl-{str(uuid.uuid4())}", 136 "model": model_name, 137 "object": "chat.completion", 138 "created": int(time.time()), 139 "choices": [ 140 { 141 "index": 0, 142 "message": { 143 "role": "assistant", 144 "content": text, 145 }, 146 "finish_reason": None, 147 } 148 ], 149 "usage": { 150 "prompt_tokens": -1, 151 "completion_tokens": -1, 152 "total_tokens": -1, 153 }, 154 } 155 156 def chat( 157 self, 158 prompt: str, 159 chat_history: Optional[List[ChatCompletionMessage]] = None, 160 generate_config: Optional[ChatglmCppGenerateConfig] = None, 161 ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]: 162 if chat_history is not None: 163 chat_history_list = [message["content"] for message in chat_history] 164 else: 165 chat_history_list = [] 166 167 chat_history_list.append(prompt) 168 logger.debug("Full conversation history:\n%s", str(chat_history_list)) 169 170 generate_config = self._sanitize_generate_config(generate_config) 171 172 assert self._llm is not None 173 174 if generate_config.get("stream", False): 175 it = self._llm.stream_chat( 176 chat_history_list, 177 max_context_length=self.max_context_length, 178 max_length=generate_config["max_tokens"], 179 temperature=generate_config["temperature"], 180 top_p=generate_config["top_p"], 181 ) 182 assert not isinstance(it, str) 183 return self._convert_raw_text_chunks_to_chat(it, self.model_uid) 184 else: 185 c = self._llm.chat( 186 chat_history_list, 187 max_context_length=self.max_context_length, 188 max_length=generate_config["max_tokens"], 189 temperature=generate_config["temperature"], 190 top_p=generate_config["top_p"], 191 ) 192 assert not isinstance(c, Iterator) 193 return self._convert_raw_text_completion_to_chat(c, self.model_uid) 194 ``` Path: `examples/chat.py` Content: ``` 1 from typing import List 2 3 from xinference.client import Client 4 from xinference.types import ChatCompletionMessage 5 6 if __name__ == '__main__': 7 import argparse 8 9 parser = argparse.ArgumentParser() 10 11 parser.add_argument("--endpoint", type=str, help="Xinference endpoint, required") 12 parser.add_argument("--model_name", type=str, help="Name of the model, required") 13 parser.add_argument( 14 "--model_size_in_billions", type=int, required=False, help="Size of the model in billions", ) 15 parser.add_argument("--model_format", type=str, required=False, help="Format of the model", ) 16 parser.add_argument("--quantization", type=str, required=False, help="Quantization") 17 18 args = parser.parse_args() 19 20 endpoint = args.endpoint 21 model_name = args.model_name 22 model_size_in_billions = args.model_size_in_billions 23 model_format = args.model_format 24 quantization = args.quantization 25 26 print(f"Xinference endpoint: {endpoint}") 27 print(f"Model Name: {model_name}") 28 print(f"Model Size (in billions): {model_size_in_billions}") 29 print(f"Model Format: {model_format}") 30 print(f"Quantization: {quantization}") 31 32 client = Client(endpoint) 33 model_uid = client.launch_model( 34 model_name=model_name, 35 model_size_in_billions=model_size_in_billions, 36 model_format=model_format, 37 quantization=quantization, 38 n_ctx=2048 39 ) 40 model = client.get_model(model_uid) 41 42 chat_history: List["ChatCompletionMessage"] = [] 43 while True: 44 prompt = input("you: ") 45 completion = model.chat( 46 prompt, 47 chat_history, 48 generate_config={"max_tokens": 1024} 49 ) 50 content = completion["choices"][0]["message"]["content"] 51 print(f"{model_name}: {content}") 52 chat_history.append( 53 ChatCompletionMessage(role="user", content=prompt) 54 ) 55 chat_history.append( 56 ChatCompletionMessage(role="assistant", content=content) 57 ) 58 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/examples/chat.py b/examples/chat.py --- a/examples/chat.py +++ b/examples/chat.py @@ -30,13 +30,7 @@ print(f"Quantization: {quantization}") client = Client(endpoint) - model_uid = client.launch_model( - model_name=model_name, - model_size_in_billions=model_size_in_billions, - model_format=model_format, - quantization=quantization, - n_ctx=2048 - ) + model_uid = client.launch_model(model_name, n_ctx=2048) model = client.get_model(model_uid) chat_history: List["ChatCompletionMessage"] = [] diff --git a/xinference/model/llm/chatglm.py b/xinference/model/llm/chatglm.py --- a/xinference/model/llm/chatglm.py +++ b/xinference/model/llm/chatglm.py @@ -22,6 +22,8 @@ from .core import Model if TYPE_CHECKING: + from chatglm_cpp import Pipeline + from .. import ModelSpec logger = logging.getLogger(__name__) @@ -47,16 +49,8 @@ model_config: Optional[ChatglmCppModelConfig] = None, ): super().__init__(model_uid, model_spec) - self._llm = None + self._llm: Optional["Pipeline"] = None self._model_path = model_path - if model_spec.model_name == "chatglm": - self.max_context_length = 2048 - elif model_spec.model_name == "chatglm2": - self.max_context_length = 8192 - else: - raise ValueError( - f"Invalid model name '{model_spec.model_name}'. Expected chatglm or chatglm2." - ) # just a placeholder for now as the chatglm_cpp repo doesn't support model config. self._model_config = model_config @@ -68,9 +62,6 @@ ) -> ChatglmCppGenerateConfig: if chatglmcpp_generate_config is None: chatglmcpp_generate_config = ChatglmCppGenerateConfig() - chatglmcpp_generate_config.setdefault("max_tokens", 256) - chatglmcpp_generate_config.setdefault("temperature", 0.95) - chatglmcpp_generate_config.setdefault("top_p", 0.8) chatglmcpp_generate_config.setdefault("stream", False) return chatglmcpp_generate_config @@ -174,20 +165,12 @@ if generate_config.get("stream", False): it = self._llm.stream_chat( chat_history_list, - max_context_length=self.max_context_length, - max_length=generate_config["max_tokens"], - temperature=generate_config["temperature"], - top_p=generate_config["top_p"], ) assert not isinstance(it, str) return self._convert_raw_text_chunks_to_chat(it, self.model_uid) else: c = self._llm.chat( chat_history_list, - max_context_length=self.max_context_length, - max_length=generate_config["max_tokens"], - temperature=generate_config["temperature"], - top_p=generate_config["top_p"], ) assert not isinstance(c, Iterator) return self._convert_raw_text_completion_to_chat(c, self.model_uid)
{"golden_diff": "diff --git a/examples/chat.py b/examples/chat.py\n--- a/examples/chat.py\n+++ b/examples/chat.py\n@@ -30,13 +30,7 @@\n print(f\"Quantization: {quantization}\")\n \n client = Client(endpoint)\n- model_uid = client.launch_model(\n- model_name=model_name,\n- model_size_in_billions=model_size_in_billions,\n- model_format=model_format,\n- quantization=quantization,\n- n_ctx=2048\n- )\n+ model_uid = client.launch_model(model_name, n_ctx=2048)\n model = client.get_model(model_uid)\n \n chat_history: List[\"ChatCompletionMessage\"] = []\ndiff --git a/xinference/model/llm/chatglm.py b/xinference/model/llm/chatglm.py\n--- a/xinference/model/llm/chatglm.py\n+++ b/xinference/model/llm/chatglm.py\n@@ -22,6 +22,8 @@\n from .core import Model\n \n if TYPE_CHECKING:\n+ from chatglm_cpp import Pipeline\n+\n from .. import ModelSpec\n \n logger = logging.getLogger(__name__)\n@@ -47,16 +49,8 @@\n model_config: Optional[ChatglmCppModelConfig] = None,\n ):\n super().__init__(model_uid, model_spec)\n- self._llm = None\n+ self._llm: Optional[\"Pipeline\"] = None\n self._model_path = model_path\n- if model_spec.model_name == \"chatglm\":\n- self.max_context_length = 2048\n- elif model_spec.model_name == \"chatglm2\":\n- self.max_context_length = 8192\n- else:\n- raise ValueError(\n- f\"Invalid model name '{model_spec.model_name}'. Expected chatglm or chatglm2.\"\n- )\n \n # just a placeholder for now as the chatglm_cpp repo doesn't support model config.\n self._model_config = model_config\n@@ -68,9 +62,6 @@\n ) -> ChatglmCppGenerateConfig:\n if chatglmcpp_generate_config is None:\n chatglmcpp_generate_config = ChatglmCppGenerateConfig()\n- chatglmcpp_generate_config.setdefault(\"max_tokens\", 256)\n- chatglmcpp_generate_config.setdefault(\"temperature\", 0.95)\n- chatglmcpp_generate_config.setdefault(\"top_p\", 0.8)\n chatglmcpp_generate_config.setdefault(\"stream\", False)\n return chatglmcpp_generate_config\n \n@@ -174,20 +165,12 @@\n if generate_config.get(\"stream\", False):\n it = self._llm.stream_chat(\n chat_history_list,\n- max_context_length=self.max_context_length,\n- max_length=generate_config[\"max_tokens\"],\n- temperature=generate_config[\"temperature\"],\n- top_p=generate_config[\"top_p\"],\n )\n assert not isinstance(it, str)\n return self._convert_raw_text_chunks_to_chat(it, self.model_uid)\n else:\n c = self._llm.chat(\n chat_history_list,\n- max_context_length=self.max_context_length,\n- max_length=generate_config[\"max_tokens\"],\n- temperature=generate_config[\"temperature\"],\n- top_p=generate_config[\"top_p\"],\n )\n assert not isinstance(c, Iterator)\n return self._convert_raw_text_completion_to_chat(c, self.model_uid)\n", "issue": "BUG: chatglm hang\n\n", "before_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport uuid\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Iterator, List, Optional, TypedDict, Union\n\nfrom ...types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage\nfrom .core import Model\n\nif TYPE_CHECKING:\n from .. import ModelSpec\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChatglmCppModelConfig(TypedDict, total=False):\n pass\n\n\nclass ChatglmCppGenerateConfig(TypedDict, total=False):\n max_tokens: int\n top_p: float\n temperature: float\n stream: bool\n\n\nclass ChatglmCppChatModel(Model):\n def __init__(\n self,\n model_uid: str,\n model_spec: \"ModelSpec\",\n model_path: str,\n model_config: Optional[ChatglmCppModelConfig] = None,\n ):\n super().__init__(model_uid, model_spec)\n self._llm = None\n self._model_path = model_path\n if model_spec.model_name == \"chatglm\":\n self.max_context_length = 2048\n elif model_spec.model_name == \"chatglm2\":\n self.max_context_length = 8192\n else:\n raise ValueError(\n f\"Invalid model name '{model_spec.model_name}'. Expected chatglm or chatglm2.\"\n )\n\n # just a placeholder for now as the chatglm_cpp repo doesn't support model config.\n self._model_config = model_config\n\n @classmethod\n def _sanitize_generate_config(\n cls,\n chatglmcpp_generate_config: Optional[ChatglmCppGenerateConfig],\n ) -> ChatglmCppGenerateConfig:\n if chatglmcpp_generate_config is None:\n chatglmcpp_generate_config = ChatglmCppGenerateConfig()\n chatglmcpp_generate_config.setdefault(\"max_tokens\", 256)\n chatglmcpp_generate_config.setdefault(\"temperature\", 0.95)\n chatglmcpp_generate_config.setdefault(\"top_p\", 0.8)\n chatglmcpp_generate_config.setdefault(\"stream\", False)\n return chatglmcpp_generate_config\n\n def load(self):\n try:\n import chatglm_cpp\n except ImportError:\n error_message = \"Failed to import module 'chatglm_cpp'\"\n installation_guide = [\n \"Please make sure 'chatglm_cpp' is installed. \",\n \"You can install it by running the following command in the terminal:\\n\",\n \"pip install git+https://github.com/li-plus/chatglm.cpp.git@main\\n\\n\",\n \"Or visit the original git repo if the above command fails:\\n\",\n \"https://github.com/li-plus/chatglm.cpp\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n self._llm = chatglm_cpp.Pipeline(Path(self._model_path))\n\n @staticmethod\n def _convert_raw_text_chunks_to_chat(\n tokens: Iterator[str], model_name: str\n ) -> Iterator[ChatCompletionChunk]:\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"role\": \"assistant\",\n },\n \"finish_reason\": None,\n }\n ],\n }\n for token in enumerate(tokens):\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"content\": token[1],\n },\n \"finish_reason\": None,\n }\n ],\n }\n\n @staticmethod\n def _convert_raw_text_completion_to_chat(\n text: str, model_name: str\n ) -> ChatCompletion:\n return {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": text,\n },\n \"finish_reason\": None,\n }\n ],\n \"usage\": {\n \"prompt_tokens\": -1,\n \"completion_tokens\": -1,\n \"total_tokens\": -1,\n },\n }\n\n def chat(\n self,\n prompt: str,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[ChatglmCppGenerateConfig] = None,\n ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:\n if chat_history is not None:\n chat_history_list = [message[\"content\"] for message in chat_history]\n else:\n chat_history_list = []\n\n chat_history_list.append(prompt)\n logger.debug(\"Full conversation history:\\n%s\", str(chat_history_list))\n\n generate_config = self._sanitize_generate_config(generate_config)\n\n assert self._llm is not None\n\n if generate_config.get(\"stream\", False):\n it = self._llm.stream_chat(\n chat_history_list,\n max_context_length=self.max_context_length,\n max_length=generate_config[\"max_tokens\"],\n temperature=generate_config[\"temperature\"],\n top_p=generate_config[\"top_p\"],\n )\n assert not isinstance(it, str)\n return self._convert_raw_text_chunks_to_chat(it, self.model_uid)\n else:\n c = self._llm.chat(\n chat_history_list,\n max_context_length=self.max_context_length,\n max_length=generate_config[\"max_tokens\"],\n temperature=generate_config[\"temperature\"],\n top_p=generate_config[\"top_p\"],\n )\n assert not isinstance(c, Iterator)\n return self._convert_raw_text_completion_to_chat(c, self.model_uid)\n", "path": "xinference/model/llm/chatglm.py"}, {"content": "from typing import List\n\nfrom xinference.client import Client\nfrom xinference.types import ChatCompletionMessage\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--endpoint\", type=str, help=\"Xinference endpoint, required\")\n parser.add_argument(\"--model_name\", type=str, help=\"Name of the model, required\")\n parser.add_argument(\n \"--model_size_in_billions\", type=int, required=False, help=\"Size of the model in billions\", )\n parser.add_argument(\"--model_format\", type=str, required=False, help=\"Format of the model\", )\n parser.add_argument(\"--quantization\", type=str, required=False, help=\"Quantization\")\n\n args = parser.parse_args()\n\n endpoint = args.endpoint\n model_name = args.model_name\n model_size_in_billions = args.model_size_in_billions\n model_format = args.model_format\n quantization = args.quantization\n\n print(f\"Xinference endpoint: {endpoint}\")\n print(f\"Model Name: {model_name}\")\n print(f\"Model Size (in billions): {model_size_in_billions}\")\n print(f\"Model Format: {model_format}\")\n print(f\"Quantization: {quantization}\")\n\n client = Client(endpoint)\n model_uid = client.launch_model(\n model_name=model_name,\n model_size_in_billions=model_size_in_billions,\n model_format=model_format,\n quantization=quantization,\n n_ctx=2048\n )\n model = client.get_model(model_uid)\n\n chat_history: List[\"ChatCompletionMessage\"] = []\n while True:\n prompt = input(\"you: \")\n completion = model.chat(\n prompt,\n chat_history,\n generate_config={\"max_tokens\": 1024}\n )\n content = completion[\"choices\"][0][\"message\"][\"content\"]\n print(f\"{model_name}: {content}\")\n chat_history.append(\n ChatCompletionMessage(role=\"user\", content=prompt)\n )\n chat_history.append(\n ChatCompletionMessage(role=\"assistant\", content=content)\n )\n", "path": "examples/chat.py"}], "after_files": [{"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport uuid\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING, Iterator, List, Optional, TypedDict, Union\n\nfrom ...types import ChatCompletion, ChatCompletionChunk, ChatCompletionMessage\nfrom .core import Model\n\nif TYPE_CHECKING:\n from chatglm_cpp import Pipeline\n\n from .. import ModelSpec\n\nlogger = logging.getLogger(__name__)\n\n\nclass ChatglmCppModelConfig(TypedDict, total=False):\n pass\n\n\nclass ChatglmCppGenerateConfig(TypedDict, total=False):\n max_tokens: int\n top_p: float\n temperature: float\n stream: bool\n\n\nclass ChatglmCppChatModel(Model):\n def __init__(\n self,\n model_uid: str,\n model_spec: \"ModelSpec\",\n model_path: str,\n model_config: Optional[ChatglmCppModelConfig] = None,\n ):\n super().__init__(model_uid, model_spec)\n self._llm: Optional[\"Pipeline\"] = None\n self._model_path = model_path\n\n # just a placeholder for now as the chatglm_cpp repo doesn't support model config.\n self._model_config = model_config\n\n @classmethod\n def _sanitize_generate_config(\n cls,\n chatglmcpp_generate_config: Optional[ChatglmCppGenerateConfig],\n ) -> ChatglmCppGenerateConfig:\n if chatglmcpp_generate_config is None:\n chatglmcpp_generate_config = ChatglmCppGenerateConfig()\n chatglmcpp_generate_config.setdefault(\"stream\", False)\n return chatglmcpp_generate_config\n\n def load(self):\n try:\n import chatglm_cpp\n except ImportError:\n error_message = \"Failed to import module 'chatglm_cpp'\"\n installation_guide = [\n \"Please make sure 'chatglm_cpp' is installed. \",\n \"You can install it by running the following command in the terminal:\\n\",\n \"pip install git+https://github.com/li-plus/chatglm.cpp.git@main\\n\\n\",\n \"Or visit the original git repo if the above command fails:\\n\",\n \"https://github.com/li-plus/chatglm.cpp\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n self._llm = chatglm_cpp.Pipeline(Path(self._model_path))\n\n @staticmethod\n def _convert_raw_text_chunks_to_chat(\n tokens: Iterator[str], model_name: str\n ) -> Iterator[ChatCompletionChunk]:\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"role\": \"assistant\",\n },\n \"finish_reason\": None,\n }\n ],\n }\n for token in enumerate(tokens):\n yield {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion.chunk\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"delta\": {\n \"content\": token[1],\n },\n \"finish_reason\": None,\n }\n ],\n }\n\n @staticmethod\n def _convert_raw_text_completion_to_chat(\n text: str, model_name: str\n ) -> ChatCompletion:\n return {\n \"id\": \"chat\" + f\"cmpl-{str(uuid.uuid4())}\",\n \"model\": model_name,\n \"object\": \"chat.completion\",\n \"created\": int(time.time()),\n \"choices\": [\n {\n \"index\": 0,\n \"message\": {\n \"role\": \"assistant\",\n \"content\": text,\n },\n \"finish_reason\": None,\n }\n ],\n \"usage\": {\n \"prompt_tokens\": -1,\n \"completion_tokens\": -1,\n \"total_tokens\": -1,\n },\n }\n\n def chat(\n self,\n prompt: str,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[ChatglmCppGenerateConfig] = None,\n ) -> Union[ChatCompletion, Iterator[ChatCompletionChunk]]:\n if chat_history is not None:\n chat_history_list = [message[\"content\"] for message in chat_history]\n else:\n chat_history_list = []\n\n chat_history_list.append(prompt)\n logger.debug(\"Full conversation history:\\n%s\", str(chat_history_list))\n\n generate_config = self._sanitize_generate_config(generate_config)\n\n assert self._llm is not None\n\n if generate_config.get(\"stream\", False):\n it = self._llm.stream_chat(\n chat_history_list,\n )\n assert not isinstance(it, str)\n return self._convert_raw_text_chunks_to_chat(it, self.model_uid)\n else:\n c = self._llm.chat(\n chat_history_list,\n )\n assert not isinstance(c, Iterator)\n return self._convert_raw_text_completion_to_chat(c, self.model_uid)\n", "path": "xinference/model/llm/chatglm.py"}, {"content": "from typing import List\n\nfrom xinference.client import Client\nfrom xinference.types import ChatCompletionMessage\n\nif __name__ == '__main__':\n import argparse\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--endpoint\", type=str, help=\"Xinference endpoint, required\")\n parser.add_argument(\"--model_name\", type=str, help=\"Name of the model, required\")\n parser.add_argument(\n \"--model_size_in_billions\", type=int, required=False, help=\"Size of the model in billions\", )\n parser.add_argument(\"--model_format\", type=str, required=False, help=\"Format of the model\", )\n parser.add_argument(\"--quantization\", type=str, required=False, help=\"Quantization\")\n\n args = parser.parse_args()\n\n endpoint = args.endpoint\n model_name = args.model_name\n model_size_in_billions = args.model_size_in_billions\n model_format = args.model_format\n quantization = args.quantization\n\n print(f\"Xinference endpoint: {endpoint}\")\n print(f\"Model Name: {model_name}\")\n print(f\"Model Size (in billions): {model_size_in_billions}\")\n print(f\"Model Format: {model_format}\")\n print(f\"Quantization: {quantization}\")\n\n client = Client(endpoint)\n model_uid = client.launch_model(model_name, n_ctx=2048)\n model = client.get_model(model_uid)\n\n chat_history: List[\"ChatCompletionMessage\"] = []\n while True:\n prompt = input(\"you: \")\n completion = model.chat(\n prompt,\n chat_history,\n generate_config={\"max_tokens\": 1024}\n )\n content = completion[\"choices\"][0][\"message\"][\"content\"]\n print(f\"{model_name}: {content}\")\n chat_history.append(\n ChatCompletionMessage(role=\"user\", content=prompt)\n )\n chat_history.append(\n ChatCompletionMessage(role=\"assistant\", content=content)\n )\n", "path": "examples/chat.py"}]}
2,738
745
gh_patches_debug_20943
rasdani/github-patches
git_diff
PokemonGoF__PokemonGo-Bot-5139
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- telegram_handler : TypeError: expected string or buffer <!-- ===============ISSUE SECTION=============== Before you create an Issue, please check the following: 1. Have you validated that your config.json is valid JSON? Use http://jsonlint.com/ to check. 2. Have you [searched our issue tracker](https://github.com/PokemonGoF/PokemonGo-Bot/issues?q=is%3Aissue+sort%3Aupdated-desc) to see if the issue already exists? If so, comment on that issue instead rather than creating a new issue. 3. Are you running on the `master` branch? We work on the `dev` branch and then add that functionality to `master` when it is stable. Your issue may be fixed on `dev` and there is no need for this issue, just wait and it will eventually be merged to `master`. 4. All Issue sections MUST be completed to help us determine the actual problem and find its cause --> ### Expected Behavior <!-- Tell us what you expect to happen --> Bot to run and telegram to respond to /info request. ### Actual Behavior <!-- Tell us what is happening --> The telegram handler crashes ### Your FULL config.json (remove your username, password, gmapkey and any other private info) <!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter --> "type": "TelegramTask", "config": { "enabled": true, "master": null, "// old syntax, still supported: alert_catch": ["all"], "// new syntax:": {}, "alert_catch": { "all": {"operator": "and", "cp": 1300, "iv": 0.95}, "Snorlax": {"operator": "or", "cp": 900, "iv": 0.9} ### Output when issue occurred <!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter --> > Traceback (most recent call last): > File "/home/felis/PokemonGo-Bot/local/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 115, in wait > listener.cb(fileno) > File "/home/felis/PokemonGo-Bot/local/lib/python2.7/site-packages/eventlet/green/thread.py", line 41, in __thread_body > func(_args, *_kwargs) > File "/home/felis/PokemonGo-Bot/pokemongo_bot/event_handlers/telegram_handler.py", line 82, in run > if not re.match(r'^[0-9]+$', self.master): > File "/home/felis/PokemonGo-Bot/lib/python2.7/re.py", line 141, in match > return _compile(pattern, flags).match(string) > TypeError: expected string or buffer > Removing descriptor: 16 ### Steps to Reproduce <!-- Tell us the steps you have taken to reproduce the issue --> Unsure how it started. Now the error happens every startup since the bot is receiving a previous /info request. master: null is on purpose. Multiple people are watching the same bot instance, so we don't need push notification. ### Other Information OS: Ubuntu 16.04.1 <!-- Tell us what Operating system you're using --> Branch: dev <!-- dev or master --> Git Commit: c270999a79d8e17f43e846249738b7b1f91936b8 <!-- run 'git log -n 1 --pretty=format:"%H"' --> Python Version: Python 2.7.12 <!-- run 'python -V' and paste it here) --> Any other relevant files/configs (eg: path files) <!-- Anything else which may be of relevance --> <!-- ===============END OF ISSUE SECTION=============== --> <!-- Note: Delete these lines and everything BELOW if creating an Issue --> telegram_handler : TypeError: expected string or buffer <!-- ===============ISSUE SECTION=============== Before you create an Issue, please check the following: 1. Have you validated that your config.json is valid JSON? Use http://jsonlint.com/ to check. 2. Have you [searched our issue tracker](https://github.com/PokemonGoF/PokemonGo-Bot/issues?q=is%3Aissue+sort%3Aupdated-desc) to see if the issue already exists? If so, comment on that issue instead rather than creating a new issue. 3. Are you running on the `master` branch? We work on the `dev` branch and then add that functionality to `master` when it is stable. Your issue may be fixed on `dev` and there is no need for this issue, just wait and it will eventually be merged to `master`. 4. All Issue sections MUST be completed to help us determine the actual problem and find its cause --> ### Expected Behavior <!-- Tell us what you expect to happen --> Bot to run and telegram to respond to /info request. ### Actual Behavior <!-- Tell us what is happening --> The telegram handler crashes ### Your FULL config.json (remove your username, password, gmapkey and any other private info) <!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter --> "type": "TelegramTask", "config": { "enabled": true, "master": null, "// old syntax, still supported: alert_catch": ["all"], "// new syntax:": {}, "alert_catch": { "all": {"operator": "and", "cp": 1300, "iv": 0.95}, "Snorlax": {"operator": "or", "cp": 900, "iv": 0.9} ### Output when issue occurred <!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter --> > Traceback (most recent call last): > File "/home/felis/PokemonGo-Bot/local/lib/python2.7/site-packages/eventlet/hubs/poll.py", line 115, in wait > listener.cb(fileno) > File "/home/felis/PokemonGo-Bot/local/lib/python2.7/site-packages/eventlet/green/thread.py", line 41, in __thread_body > func(_args, *_kwargs) > File "/home/felis/PokemonGo-Bot/pokemongo_bot/event_handlers/telegram_handler.py", line 82, in run > if not re.match(r'^[0-9]+$', self.master): > File "/home/felis/PokemonGo-Bot/lib/python2.7/re.py", line 141, in match > return _compile(pattern, flags).match(string) > TypeError: expected string or buffer > Removing descriptor: 16 ### Steps to Reproduce <!-- Tell us the steps you have taken to reproduce the issue --> Unsure how it started. Now the error happens every startup since the bot is receiving a previous /info request. master: null is on purpose. Multiple people are watching the same bot instance, so we don't need push notification. ### Other Information OS: Ubuntu 16.04.1 <!-- Tell us what Operating system you're using --> Branch: dev <!-- dev or master --> Git Commit: c270999a79d8e17f43e846249738b7b1f91936b8 <!-- run 'git log -n 1 --pretty=format:"%H"' --> Python Version: Python 2.7.12 <!-- run 'python -V' and paste it here) --> Any other relevant files/configs (eg: path files) <!-- Anything else which may be of relevance --> <!-- ===============END OF ISSUE SECTION=============== --> <!-- Note: Delete these lines and everything BELOW if creating an Issue --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pokemongo_bot/event_handlers/telegram_handler.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 from pokemongo_bot.event_manager import EventHandler 3 from pokemongo_bot.base_dir import _base_dir 4 import json 5 import os 6 import time 7 import telegram 8 import thread 9 import re 10 11 DEBUG_ON = False 12 13 class FileIOException(Exception): 14 pass 15 16 class TelegramClass: 17 18 update_id = None 19 20 def __init__(self, bot, master, pokemons, config): 21 self.bot = bot 22 self.master = master 23 self.pokemons = pokemons 24 self._tbot = None 25 self.config = config 26 27 def sendMessage(self, chat_id=None, parse_mode='Markdown', text=None): 28 self._tbot.sendMessage(chat_id=chat_id, parse_mode=parse_mode, text=text) 29 30 def connect(self): 31 self._tbot = telegram.Bot(self.bot.config.telegram_token) 32 try: 33 self.update_id = self._tbot.getUpdates()[0].update_id 34 except IndexError: 35 self.update_id = None 36 37 def _get_player_stats(self): 38 web_inventory = os.path.join(_base_dir, "web", "inventory-%s.json" % self.bot.config.username) 39 try: 40 with open(web_inventory, "r") as infile: 41 json_inventory = json.load(infile) 42 except ValueError as exception: 43 self.bot.logger.info('[x] Error while opening inventory file for read: %s' % exception) 44 json_inventory = [] 45 except: 46 raise FileIOException("Unexpected error reading from {}".format(web_inventory)) 47 return next((x["inventory_item_data"]["player_stats"] 48 for x in json_inventory 49 if x.get("inventory_item_data", {}).get("player_stats", {})), 50 None) 51 def send_player_stats_to_chat(self, chat_id): 52 stats = self._get_player_stats() 53 if stats: 54 with self.bot.database as conn: 55 cur = conn.cursor() 56 cur.execute("SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')") 57 catch_day = cur.fetchone()[0] 58 cur.execute("SELECT DISTINCT COUNT(pokestop) FROM pokestop_log WHERE dated >= datetime('now','-1 day')") 59 ps_day = cur.fetchone()[0] 60 res = ( 61 "*"+self.bot.config.username+"*", 62 "_Level:_ "+str(stats["level"]), 63 "_XP:_ "+str(stats["experience"])+"/"+str(stats["next_level_xp"]), 64 "_Pokemons Captured:_ "+str(stats["pokemons_captured"])+" ("+str(catch_day)+" _last 24h_)", 65 "_Poke Stop Visits:_ "+str(stats["poke_stop_visits"])+" ("+str(ps_day)+" _last 24h_)", 66 "_KM Walked:_ "+str("%.2f" % stats["km_walked"]) 67 ) 68 self._tbot.sendMessage(chat_id=chat_id, parse_mode='Markdown', text="\n".join(res)) 69 self._tbot.send_location(chat_id=chat_id, latitude=self.bot.api._position_lat, longitude=self.bot.api._position_lng) 70 else: 71 self._tbot.sendMessage(chat_id=chat_id, parse_mode='Markdown', text="Stats not loaded yet\n") 72 def run(self): 73 time.sleep(1) 74 while True: 75 try: 76 for update in self._tbot.getUpdates(offset=self.update_id, timeout=10): 77 self.update_id = update.update_id+1 78 if update.message: 79 self.bot.logger.info("message from {} ({}): {}".format(update.message.from_user.username, update.message.from_user.id, update.message.text)) 80 if self.master and self.master not in [update.message.from_user.id, "@{}".format(update.message.from_user.username)]: 81 continue 82 if not re.match(r'^[0-9]+$', self.master): 83 # the "master" is not numeric, set self.master to update.message.chat_id and re-instantiate the handler 84 newconfig = self.config 85 newconfig['master'] = update.message.chat_id 86 # remove old handler 87 self.bot.event_manager._handlers = filter(lambda x: not isinstance(x, TelegramHandler), self.bot.event_manager._handlers) 88 # add new handler (passing newconfig as parameter) 89 self.bot.event_manager.add_handler(TelegramHandler(self.bot, newconfig)) 90 if update.message.text == "/info": 91 self.send_player_stats_to_chat(update.message.chat_id) 92 elif update.message.text == "/start" or update.message.text == "/help": 93 res = ( 94 "Commands: ", 95 "/info - info about bot" 96 ) 97 self._tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text="\n".join(res)) 98 except telegram.error.NetworkError: 99 time.sleep(1) 100 except telegram.error.TelegramError: 101 time.sleep(10) 102 except telegram.error.Unauthorized: 103 self.update_id += 1 104 105 class TelegramHandler(EventHandler): 106 def __init__(self, bot, config): 107 self.bot = bot 108 self.tbot = None 109 self.master = config.get('master', None) 110 self.pokemons = config.get('alert_catch', {}) 111 self.whoami = "TelegramHandler" 112 self.config = config 113 114 def handle_event(self, event, sender, level, formatted_msg, data): 115 if self.tbot is None: 116 try: 117 self.tbot = TelegramClass(self.bot, self.master, self.pokemons, self.config) 118 self.tbot.connect() 119 thread.start_new_thread(self.tbot.run) 120 except Exception as inst: 121 self.tbot = None 122 if self.master: 123 if not re.match(r'^[0-9]+$', str(self.master)): 124 return 125 master = self.master 126 127 if event == 'level_up': 128 msg = "level up ({})".format(data["current_level"]) 129 elif event == 'pokemon_caught': 130 if isinstance(self.pokemons, list): 131 if data["pokemon"] in self.pokemons or "all" in self.pokemons: 132 msg = "Caught {} CP: {}, IV: {}".format(data["pokemon"], data["cp"], data["iv"]) 133 else: 134 return 135 else: 136 if data["pokemon"] in self.pokemons: 137 trigger = self.pokemons[data["pokemon"]] 138 elif "all" in self.pokemons: 139 trigger = self.pokemons["all"] 140 else: 141 return 142 if (not "operator" in trigger or trigger["operator"] == "and") and data["cp"] >= trigger["cp"] and data["iv"] >= trigger["iv"] or ("operator" in trigger and trigger["operator"] == "or" and (data["cp"] >= trigger["cp"] or data["iv"] >= trigger["iv"])): 143 msg = "Caught {} CP: {}, IV: {}".format(data["pokemon"], data["cp"], data["iv"]) 144 else: 145 return 146 elif event == 'egg_hatched': 147 msg = "Egg hatched with a {} CP: {}, IV: {}".format(data["pokemon"], data["cp"], data["iv"]) 148 elif event == 'catch_limit': 149 self.tbot.send_player_stats_to_chat(master) 150 msg = "*You have reached your daily catch limit, quitting.*" 151 else: 152 return 153 self.tbot.sendMessage(chat_id=master, parse_mode='Markdown', text=msg) 154 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pokemongo_bot/event_handlers/telegram_handler.py b/pokemongo_bot/event_handlers/telegram_handler.py --- a/pokemongo_bot/event_handlers/telegram_handler.py +++ b/pokemongo_bot/event_handlers/telegram_handler.py @@ -79,7 +79,7 @@ self.bot.logger.info("message from {} ({}): {}".format(update.message.from_user.username, update.message.from_user.id, update.message.text)) if self.master and self.master not in [update.message.from_user.id, "@{}".format(update.message.from_user.username)]: continue - if not re.match(r'^[0-9]+$', self.master): + if self.master and not re.match(r'^[0-9]+$', str(self.master)): # the "master" is not numeric, set self.master to update.message.chat_id and re-instantiate the handler newconfig = self.config newconfig['master'] = update.message.chat_id
{"golden_diff": "diff --git a/pokemongo_bot/event_handlers/telegram_handler.py b/pokemongo_bot/event_handlers/telegram_handler.py\n--- a/pokemongo_bot/event_handlers/telegram_handler.py\n+++ b/pokemongo_bot/event_handlers/telegram_handler.py\n@@ -79,7 +79,7 @@\n self.bot.logger.info(\"message from {} ({}): {}\".format(update.message.from_user.username, update.message.from_user.id, update.message.text))\n if self.master and self.master not in [update.message.from_user.id, \"@{}\".format(update.message.from_user.username)]:\n continue\n- if not re.match(r'^[0-9]+$', self.master):\n+ if self.master and not re.match(r'^[0-9]+$', str(self.master)):\n # the \"master\" is not numeric, set self.master to update.message.chat_id and re-instantiate the handler\n newconfig = self.config\n newconfig['master'] = update.message.chat_id\n", "issue": "telegram_handler : TypeError: expected string or buffer\n<!--\n\n===============ISSUE SECTION===============\n\nBefore you create an Issue, please check the following:\n\n1. Have you validated that your config.json is valid JSON? Use http://jsonlint.com/ to check.\n2. Have you [searched our issue tracker](https://github.com/PokemonGoF/PokemonGo-Bot/issues?q=is%3Aissue+sort%3Aupdated-desc) to see if the issue already exists? If so, comment on that issue instead rather than creating a new issue.\n3. Are you running on the `master` branch? We work on the `dev` branch and then add that functionality to `master` when it is stable. Your issue may be fixed on `dev` and there is no need for this issue, just wait and it will eventually be merged to `master`.\n4. All Issue sections MUST be completed to help us determine the actual problem and find its cause\n-->\n### Expected Behavior\n\n<!-- Tell us what you expect to happen -->\n\nBot to run and telegram to respond to /info request.\n### Actual Behavior\n\n<!-- Tell us what is happening -->\n\nThe telegram handler crashes\n### Your FULL config.json (remove your username, password, gmapkey and any other private info)\n\n<!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter -->\n\n \"type\": \"TelegramTask\",\n \"config\": {\n \"enabled\": true,\n \"master\": null,\n \"// old syntax, still supported: alert_catch\": [\"all\"],\n \"// new syntax:\": {},\n \"alert_catch\": {\n \"all\": {\"operator\": \"and\", \"cp\": 1300, \"iv\": 0.95},\n \"Snorlax\": {\"operator\": \"or\", \"cp\": 900, \"iv\": 0.9}\n### Output when issue occurred\n\n<!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter -->\n\n> Traceback (most recent call last):\n> File \"/home/felis/PokemonGo-Bot/local/lib/python2.7/site-packages/eventlet/hubs/poll.py\", line 115, in wait\n> listener.cb(fileno)\n> File \"/home/felis/PokemonGo-Bot/local/lib/python2.7/site-packages/eventlet/green/thread.py\", line 41, in __thread_body\n> func(_args, *_kwargs)\n> File \"/home/felis/PokemonGo-Bot/pokemongo_bot/event_handlers/telegram_handler.py\", line 82, in run\n> if not re.match(r'^[0-9]+$', self.master):\n> File \"/home/felis/PokemonGo-Bot/lib/python2.7/re.py\", line 141, in match\n> return _compile(pattern, flags).match(string)\n> TypeError: expected string or buffer\n> Removing descriptor: 16\n### Steps to Reproduce\n\n<!-- Tell us the steps you have taken to reproduce the issue -->\n\nUnsure how it started. Now the error happens every startup since the bot is receiving a previous /info request.\nmaster: null is on purpose. Multiple people are watching the same bot instance, so we don't need push notification.\n### Other Information\n\nOS: Ubuntu 16.04.1\n\n<!-- Tell us what Operating system you're using --> \n\nBranch: dev\n\n<!-- dev or master --> \n\nGit Commit: c270999a79d8e17f43e846249738b7b1f91936b8\n\n<!-- run 'git log -n 1 --pretty=format:\"%H\"' --> \n\nPython Version: Python 2.7.12\n\n<!-- run 'python -V' and paste it here) --> \n\nAny other relevant files/configs (eg: path files) \n\n<!-- Anything else which may be of relevance -->\n\n<!-- ===============END OF ISSUE SECTION=============== -->\n\n<!-- Note: Delete these lines and everything BELOW if creating an Issue -->\n\ntelegram_handler : TypeError: expected string or buffer\n<!--\n\n===============ISSUE SECTION===============\n\nBefore you create an Issue, please check the following:\n\n1. Have you validated that your config.json is valid JSON? Use http://jsonlint.com/ to check.\n2. Have you [searched our issue tracker](https://github.com/PokemonGoF/PokemonGo-Bot/issues?q=is%3Aissue+sort%3Aupdated-desc) to see if the issue already exists? If so, comment on that issue instead rather than creating a new issue.\n3. Are you running on the `master` branch? We work on the `dev` branch and then add that functionality to `master` when it is stable. Your issue may be fixed on `dev` and there is no need for this issue, just wait and it will eventually be merged to `master`.\n4. All Issue sections MUST be completed to help us determine the actual problem and find its cause\n-->\n### Expected Behavior\n\n<!-- Tell us what you expect to happen -->\n\nBot to run and telegram to respond to /info request.\n### Actual Behavior\n\n<!-- Tell us what is happening -->\n\nThe telegram handler crashes\n### Your FULL config.json (remove your username, password, gmapkey and any other private info)\n\n<!-- Provide your FULL config file, feel free to use services such as pastebin.com to reduce clutter -->\n\n \"type\": \"TelegramTask\",\n \"config\": {\n \"enabled\": true,\n \"master\": null,\n \"// old syntax, still supported: alert_catch\": [\"all\"],\n \"// new syntax:\": {},\n \"alert_catch\": {\n \"all\": {\"operator\": \"and\", \"cp\": 1300, \"iv\": 0.95},\n \"Snorlax\": {\"operator\": \"or\", \"cp\": 900, \"iv\": 0.9}\n### Output when issue occurred\n\n<!-- Provide a reasonable sample from your output log (not just the error message), feel free to use services such as pastebin.com to reduce clutter -->\n\n> Traceback (most recent call last):\n> File \"/home/felis/PokemonGo-Bot/local/lib/python2.7/site-packages/eventlet/hubs/poll.py\", line 115, in wait\n> listener.cb(fileno)\n> File \"/home/felis/PokemonGo-Bot/local/lib/python2.7/site-packages/eventlet/green/thread.py\", line 41, in __thread_body\n> func(_args, *_kwargs)\n> File \"/home/felis/PokemonGo-Bot/pokemongo_bot/event_handlers/telegram_handler.py\", line 82, in run\n> if not re.match(r'^[0-9]+$', self.master):\n> File \"/home/felis/PokemonGo-Bot/lib/python2.7/re.py\", line 141, in match\n> return _compile(pattern, flags).match(string)\n> TypeError: expected string or buffer\n> Removing descriptor: 16\n### Steps to Reproduce\n\n<!-- Tell us the steps you have taken to reproduce the issue -->\n\nUnsure how it started. Now the error happens every startup since the bot is receiving a previous /info request.\nmaster: null is on purpose. Multiple people are watching the same bot instance, so we don't need push notification.\n### Other Information\n\nOS: Ubuntu 16.04.1\n\n<!-- Tell us what Operating system you're using --> \n\nBranch: dev\n\n<!-- dev or master --> \n\nGit Commit: c270999a79d8e17f43e846249738b7b1f91936b8\n\n<!-- run 'git log -n 1 --pretty=format:\"%H\"' --> \n\nPython Version: Python 2.7.12\n\n<!-- run 'python -V' and paste it here) --> \n\nAny other relevant files/configs (eg: path files) \n\n<!-- Anything else which may be of relevance -->\n\n<!-- ===============END OF ISSUE SECTION=============== -->\n\n<!-- Note: Delete these lines and everything BELOW if creating an Issue -->\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom pokemongo_bot.event_manager import EventHandler\nfrom pokemongo_bot.base_dir import _base_dir\nimport json\nimport os\nimport time\nimport telegram\nimport thread\nimport re\n\nDEBUG_ON = False\n\nclass FileIOException(Exception):\n pass\n\nclass TelegramClass:\n\n update_id = None\n\n def __init__(self, bot, master, pokemons, config):\n self.bot = bot\n self.master = master\n self.pokemons = pokemons\n self._tbot = None\n self.config = config\n\n def sendMessage(self, chat_id=None, parse_mode='Markdown', text=None):\n self._tbot.sendMessage(chat_id=chat_id, parse_mode=parse_mode, text=text)\n\n def connect(self):\n self._tbot = telegram.Bot(self.bot.config.telegram_token)\n try:\n self.update_id = self._tbot.getUpdates()[0].update_id\n except IndexError:\n self.update_id = None\n\n def _get_player_stats(self):\n web_inventory = os.path.join(_base_dir, \"web\", \"inventory-%s.json\" % self.bot.config.username)\n try:\n with open(web_inventory, \"r\") as infile:\n json_inventory = json.load(infile)\n except ValueError as exception:\n self.bot.logger.info('[x] Error while opening inventory file for read: %s' % exception)\n json_inventory = []\n except:\n raise FileIOException(\"Unexpected error reading from {}\".format(web_inventory))\n return next((x[\"inventory_item_data\"][\"player_stats\"]\n for x in json_inventory\n if x.get(\"inventory_item_data\", {}).get(\"player_stats\", {})),\n None)\n def send_player_stats_to_chat(self, chat_id):\n stats = self._get_player_stats()\n if stats:\n with self.bot.database as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')\")\n catch_day = cur.fetchone()[0]\n cur.execute(\"SELECT DISTINCT COUNT(pokestop) FROM pokestop_log WHERE dated >= datetime('now','-1 day')\")\n ps_day = cur.fetchone()[0]\n res = (\n \"*\"+self.bot.config.username+\"*\",\n \"_Level:_ \"+str(stats[\"level\"]),\n \"_XP:_ \"+str(stats[\"experience\"])+\"/\"+str(stats[\"next_level_xp\"]),\n \"_Pokemons Captured:_ \"+str(stats[\"pokemons_captured\"])+\" (\"+str(catch_day)+\" _last 24h_)\",\n \"_Poke Stop Visits:_ \"+str(stats[\"poke_stop_visits\"])+\" (\"+str(ps_day)+\" _last 24h_)\",\n \"_KM Walked:_ \"+str(\"%.2f\" % stats[\"km_walked\"])\n )\n self._tbot.sendMessage(chat_id=chat_id, parse_mode='Markdown', text=\"\\n\".join(res))\n self._tbot.send_location(chat_id=chat_id, latitude=self.bot.api._position_lat, longitude=self.bot.api._position_lng)\n else:\n self._tbot.sendMessage(chat_id=chat_id, parse_mode='Markdown', text=\"Stats not loaded yet\\n\")\n def run(self):\n time.sleep(1)\n while True:\n try:\n for update in self._tbot.getUpdates(offset=self.update_id, timeout=10):\n self.update_id = update.update_id+1\n if update.message:\n self.bot.logger.info(\"message from {} ({}): {}\".format(update.message.from_user.username, update.message.from_user.id, update.message.text))\n if self.master and self.master not in [update.message.from_user.id, \"@{}\".format(update.message.from_user.username)]:\n continue\n if not re.match(r'^[0-9]+$', self.master):\n # the \"master\" is not numeric, set self.master to update.message.chat_id and re-instantiate the handler\n newconfig = self.config\n newconfig['master'] = update.message.chat_id\n # remove old handler\n self.bot.event_manager._handlers = filter(lambda x: not isinstance(x, TelegramHandler), self.bot.event_manager._handlers)\n # add new handler (passing newconfig as parameter)\n self.bot.event_manager.add_handler(TelegramHandler(self.bot, newconfig))\n if update.message.text == \"/info\":\n self.send_player_stats_to_chat(update.message.chat_id)\n elif update.message.text == \"/start\" or update.message.text == \"/help\":\n res = (\n \"Commands: \",\n \"/info - info about bot\"\n )\n self._tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text=\"\\n\".join(res))\n except telegram.error.NetworkError:\n time.sleep(1)\n except telegram.error.TelegramError:\n time.sleep(10)\n except telegram.error.Unauthorized:\n self.update_id += 1\n\nclass TelegramHandler(EventHandler):\n def __init__(self, bot, config):\n self.bot = bot\n self.tbot = None\n self.master = config.get('master', None)\n self.pokemons = config.get('alert_catch', {})\n self.whoami = \"TelegramHandler\"\n self.config = config\n\n def handle_event(self, event, sender, level, formatted_msg, data):\n if self.tbot is None:\n try:\n self.tbot = TelegramClass(self.bot, self.master, self.pokemons, self.config)\n self.tbot.connect()\n thread.start_new_thread(self.tbot.run)\n except Exception as inst:\n self.tbot = None\n if self.master:\n if not re.match(r'^[0-9]+$', str(self.master)):\n return\n master = self.master\n\n if event == 'level_up':\n msg = \"level up ({})\".format(data[\"current_level\"])\n elif event == 'pokemon_caught':\n if isinstance(self.pokemons, list):\n if data[\"pokemon\"] in self.pokemons or \"all\" in self.pokemons:\n msg = \"Caught {} CP: {}, IV: {}\".format(data[\"pokemon\"], data[\"cp\"], data[\"iv\"])\n else:\n return\n else:\n if data[\"pokemon\"] in self.pokemons:\n trigger = self.pokemons[data[\"pokemon\"]]\n elif \"all\" in self.pokemons:\n trigger = self.pokemons[\"all\"]\n else:\n return\n if (not \"operator\" in trigger or trigger[\"operator\"] == \"and\") and data[\"cp\"] >= trigger[\"cp\"] and data[\"iv\"] >= trigger[\"iv\"] or (\"operator\" in trigger and trigger[\"operator\"] == \"or\" and (data[\"cp\"] >= trigger[\"cp\"] or data[\"iv\"] >= trigger[\"iv\"])):\n msg = \"Caught {} CP: {}, IV: {}\".format(data[\"pokemon\"], data[\"cp\"], data[\"iv\"])\n else:\n return\n elif event == 'egg_hatched':\n msg = \"Egg hatched with a {} CP: {}, IV: {}\".format(data[\"pokemon\"], data[\"cp\"], data[\"iv\"])\n elif event == 'catch_limit':\n self.tbot.send_player_stats_to_chat(master)\n msg = \"*You have reached your daily catch limit, quitting.*\"\n else:\n return\n self.tbot.sendMessage(chat_id=master, parse_mode='Markdown', text=msg)\n", "path": "pokemongo_bot/event_handlers/telegram_handler.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom pokemongo_bot.event_manager import EventHandler\nfrom pokemongo_bot.base_dir import _base_dir\nimport json\nimport os\nimport time\nimport telegram\nimport thread\nimport re\n\nDEBUG_ON = False\n\nclass FileIOException(Exception):\n pass\n\nclass TelegramClass:\n\n update_id = None\n\n def __init__(self, bot, master, pokemons, config):\n self.bot = bot\n self.master = master\n self.pokemons = pokemons\n self._tbot = None\n self.config = config\n\n def sendMessage(self, chat_id=None, parse_mode='Markdown', text=None):\n self._tbot.sendMessage(chat_id=chat_id, parse_mode=parse_mode, text=text)\n\n def connect(self):\n self._tbot = telegram.Bot(self.bot.config.telegram_token)\n try:\n self.update_id = self._tbot.getUpdates()[0].update_id\n except IndexError:\n self.update_id = None\n\n def _get_player_stats(self):\n web_inventory = os.path.join(_base_dir, \"web\", \"inventory-%s.json\" % self.bot.config.username)\n try:\n with open(web_inventory, \"r\") as infile:\n json_inventory = json.load(infile)\n except ValueError as exception:\n self.bot.logger.info('[x] Error while opening inventory file for read: %s' % exception)\n json_inventory = []\n except:\n raise FileIOException(\"Unexpected error reading from {}\".format(web_inventory))\n return next((x[\"inventory_item_data\"][\"player_stats\"]\n for x in json_inventory\n if x.get(\"inventory_item_data\", {}).get(\"player_stats\", {})),\n None)\n def send_player_stats_to_chat(self, chat_id):\n stats = self._get_player_stats()\n if stats:\n with self.bot.database as conn:\n cur = conn.cursor()\n cur.execute(\"SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')\")\n catch_day = cur.fetchone()[0]\n cur.execute(\"SELECT DISTINCT COUNT(pokestop) FROM pokestop_log WHERE dated >= datetime('now','-1 day')\")\n ps_day = cur.fetchone()[0]\n res = (\n \"*\"+self.bot.config.username+\"*\",\n \"_Level:_ \"+str(stats[\"level\"]),\n \"_XP:_ \"+str(stats[\"experience\"])+\"/\"+str(stats[\"next_level_xp\"]),\n \"_Pokemons Captured:_ \"+str(stats[\"pokemons_captured\"])+\" (\"+str(catch_day)+\" _last 24h_)\",\n \"_Poke Stop Visits:_ \"+str(stats[\"poke_stop_visits\"])+\" (\"+str(ps_day)+\" _last 24h_)\",\n \"_KM Walked:_ \"+str(\"%.2f\" % stats[\"km_walked\"])\n )\n self._tbot.sendMessage(chat_id=chat_id, parse_mode='Markdown', text=\"\\n\".join(res))\n self._tbot.send_location(chat_id=chat_id, latitude=self.bot.api._position_lat, longitude=self.bot.api._position_lng)\n else:\n self._tbot.sendMessage(chat_id=chat_id, parse_mode='Markdown', text=\"Stats not loaded yet\\n\")\n def run(self):\n time.sleep(1)\n while True:\n try:\n for update in self._tbot.getUpdates(offset=self.update_id, timeout=10):\n self.update_id = update.update_id+1\n if update.message:\n self.bot.logger.info(\"message from {} ({}): {}\".format(update.message.from_user.username, update.message.from_user.id, update.message.text))\n if self.master and self.master not in [update.message.from_user.id, \"@{}\".format(update.message.from_user.username)]:\n continue\n if self.master and not re.match(r'^[0-9]+$', str(self.master)):\n # the \"master\" is not numeric, set self.master to update.message.chat_id and re-instantiate the handler\n newconfig = self.config\n newconfig['master'] = update.message.chat_id\n # remove old handler\n self.bot.event_manager._handlers = filter(lambda x: not isinstance(x, TelegramHandler), self.bot.event_manager._handlers)\n # add new handler (passing newconfig as parameter)\n self.bot.event_manager.add_handler(TelegramHandler(self.bot, newconfig))\n if update.message.text == \"/info\":\n self.send_player_stats_to_chat(update.message.chat_id)\n elif update.message.text == \"/start\" or update.message.text == \"/help\":\n res = (\n \"Commands: \",\n \"/info - info about bot\"\n )\n self._tbot.sendMessage(chat_id=update.message.chat_id, parse_mode='Markdown', text=\"\\n\".join(res))\n except telegram.error.NetworkError:\n time.sleep(1)\n except telegram.error.TelegramError:\n time.sleep(10)\n except telegram.error.Unauthorized:\n self.update_id += 1\n\nclass TelegramHandler(EventHandler):\n def __init__(self, bot, config):\n self.bot = bot\n self.tbot = None\n self.master = config.get('master', None)\n self.pokemons = config.get('alert_catch', {})\n self.whoami = \"TelegramHandler\"\n self.config = config\n\n def handle_event(self, event, sender, level, formatted_msg, data):\n if self.tbot is None:\n try:\n self.tbot = TelegramClass(self.bot, self.master, self.pokemons, self.config)\n self.tbot.connect()\n thread.start_new_thread(self.tbot.run)\n except Exception as inst:\n self.tbot = None\n if self.master:\n if not re.match(r'^[0-9]+$', str(self.master)):\n return\n master = self.master\n\n if event == 'level_up':\n msg = \"level up ({})\".format(data[\"current_level\"])\n elif event == 'pokemon_caught':\n if isinstance(self.pokemons, list):\n if data[\"pokemon\"] in self.pokemons or \"all\" in self.pokemons:\n msg = \"Caught {} CP: {}, IV: {}\".format(data[\"pokemon\"], data[\"cp\"], data[\"iv\"])\n else:\n return\n else:\n if data[\"pokemon\"] in self.pokemons:\n trigger = self.pokemons[data[\"pokemon\"]]\n elif \"all\" in self.pokemons:\n trigger = self.pokemons[\"all\"]\n else:\n return\n if (not \"operator\" in trigger or trigger[\"operator\"] == \"and\") and data[\"cp\"] >= trigger[\"cp\"] and data[\"iv\"] >= trigger[\"iv\"] or (\"operator\" in trigger and trigger[\"operator\"] == \"or\" and (data[\"cp\"] >= trigger[\"cp\"] or data[\"iv\"] >= trigger[\"iv\"])):\n msg = \"Caught {} CP: {}, IV: {}\".format(data[\"pokemon\"], data[\"cp\"], data[\"iv\"])\n else:\n return\n elif event == 'egg_hatched':\n msg = \"Egg hatched with a {} CP: {}, IV: {}\".format(data[\"pokemon\"], data[\"cp\"], data[\"iv\"])\n elif event == 'catch_limit':\n self.tbot.send_player_stats_to_chat(master)\n msg = \"*You have reached your daily catch limit, quitting.*\"\n else:\n return\n self.tbot.sendMessage(chat_id=master, parse_mode='Markdown', text=msg)\n", "path": "pokemongo_bot/event_handlers/telegram_handler.py"}]}
3,944
209
gh_patches_debug_26035
rasdani/github-patches
git_diff
canonical__microk8s-4046
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- update homebrew formula to newest microk8s version (1.27) and ubuntu version (22.04)- otherwise Mac Users can't use it. #### Summary The latest present formula on homebrew as of June 2023 point to ubuntu version 18.04 and microk8s version 1.26. This makes it near to impossible for mac users to use it. #### Why is this important? Lot has changed since that time. The instructions do not work in the present day, leading to newbies like myself wasting precious time, assuming the fault is theirs :) #### Are you interested in contributing to this feature? yep definitely. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `installer/common/definitions.py` Content: ``` 1 MAX_CHARACTERS_WRAP: int = 120 2 command_descriptions = { 3 "add-node": "Adds a node to a cluster", 4 "ambassador": "Ambassador API Gateway and Ingress", 5 "cilium": "The cilium client", 6 "config": "Print the kubeconfig", 7 "ctr": "The containerd client", 8 "dashboard-proxy": "Enable the Kubernetes dashboard and proxy to host", 9 "dbctl": "Backup and restore the Kubernetes datastore", 10 "disable": "Disables running add-ons", 11 "enable": "Enables useful add-ons", 12 "helm": "The helm client", 13 "helm3": "The helm3 client", 14 "inspect": "Checks the cluster and gathers logs", 15 "istioctl": "The istio client", 16 "join": "Joins this instance as a node to a cluster", 17 "kubectl": "The kubernetes client", 18 "leave": "Disconnects this node from any cluster it has joined", 19 "linkerd": "The linkerd client", 20 "refresh-certs": "Refresh the CA certificates in this deployment", 21 "remove-node": "Removes a node from the cluster", 22 "reset": "Cleans the cluster from all workloads", 23 "start": "Starts the kubernetes cluster", 24 "status": "Displays the status of the cluster", 25 "stop": "Stops the kubernetes cluster", 26 } 27 DEFAULT_CORES: int = 2 28 DEFAULT_MEMORY_GB: int = 4 29 DEFAULT_DISK_GB: int = 50 30 DEFAULT_ASSUME: bool = False 31 DEFAULT_CHANNEL: str = "1.26/stable" 32 DEFAULT_IMAGE: str = "18.04" 33 34 MIN_CORES: int = 2 35 MIN_MEMORY_GB: int = 2 36 MIN_DISK_GB: int = 10 37 ``` Path: `installer/vm_providers/_multipass/_windows.py` Content: ``` 1 # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*- 2 # 3 # Copyright (C) 2018 Canonical Ltd 4 # 5 # This program is free software: you can redistribute it and/or modify 6 # it under the terms of the GNU General Public License version 3 as 7 # published by the Free Software Foundation. 8 # 9 # This program is distributed in the hope that it will be useful, 10 # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 # GNU General Public License for more details. 13 # 14 # You should have received a copy of the GNU General Public License 15 # along with this program. If not, see <http://www.gnu.org/licenses/>. 16 17 import logging 18 import os.path 19 import requests 20 import shutil 21 import simplejson 22 import subprocess 23 import sys 24 import tempfile 25 26 from progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength 27 28 from common.file_utils import calculate_sha3_384, is_dumb_terminal 29 from vm_providers.errors import ( 30 ProviderMultipassDownloadFailed, 31 ProviderMultipassInstallationFailed, 32 ) 33 34 if sys.platform == "win32": 35 import winreg 36 37 38 logger = logging.getLogger(__name__) 39 40 41 _MULTIPASS_RELEASES_API_URL = "https://api.github.com/repos/canonical/multipass/releases" 42 _MULTIPASS_DL_VERSION = "1.11.1" 43 _MULTIPASS_DL_NAME = "multipass-{version}+win-win64.exe".format(version=_MULTIPASS_DL_VERSION) 44 45 # Download multipass installer and calculate hash: 46 # python3 -c "from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))" # noqa: E501 47 _MULTIPASS_DL_SHA3_384 = "7691383eb0f4def0f9e2b5c77f04424756a63f222b3500bdc8fb25bf4725f1c0ce3bd0cb0b7cff7f79d8f489e199225b" # noqa: E501 48 49 50 def windows_reload_multipass_path_env(): 51 """Update PATH to include installed Multipass, if not already set.""" 52 53 assert sys.platform == "win32" 54 55 key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Environment") 56 57 paths = os.environ["PATH"].split(";") 58 59 # Drop empty placeholder for trailing comma, if present. 60 if paths[-1] == "": 61 del paths[-1] 62 63 reg_user_path, _ = winreg.QueryValueEx(key, "Path") 64 for path in reg_user_path.split(";"): 65 if path not in paths and "Multipass" in path: 66 paths.append(path) 67 68 # Restore path with trailing comma. 69 os.environ["PATH"] = ";".join(paths) + ";" 70 71 72 def _run_installer(installer_path: str, echoer): 73 """Execute multipass installer.""" 74 75 echoer.info("Installing Multipass...") 76 77 # Multipass requires administrative privileges to install, which requires 78 # the use of `runas` functionality. Some of the options included: 79 # (1) https://stackoverflow.com/a/34216774 80 # (2) ShellExecuteW and wait on installer by attempting to delete it. 81 # Windows would prevent us from deleting installer with a PermissionError: 82 # PermissionError: [WinError 32] The process cannot access the file because 83 # it is being used by another process: <path> 84 # (3) Use PowerShell's "Start-Process" with RunAs verb as shown below. 85 # None of the options are quite ideal, but #3 will do. 86 cmd = """ 87 & {{ 88 try {{ 89 $Output = Start-Process -FilePath {path!r} -Args /S -Verb RunAs -Wait -PassThru 90 }} catch {{ 91 [Environment]::Exit(1) 92 }} 93 }} 94 """.format( 95 path=installer_path 96 ) 97 98 try: 99 subprocess.check_call(["powershell.exe", "-Command", cmd]) 100 except subprocess.CalledProcessError: 101 raise ProviderMultipassInstallationFailed("error launching installer") 102 103 # Reload path environment to see if we can find multipass now. 104 windows_reload_multipass_path_env() 105 106 if not shutil.which("multipass.exe"): 107 # Installation failed. 108 raise ProviderMultipassInstallationFailed("installation did not complete successfully") 109 110 echoer.info("Multipass installation completed successfully.") 111 112 113 def _requests_exception_hint(e: requests.RequestException) -> str: 114 # Use the __doc__ description to give the user a hint. It seems to be a 115 # a decent option over trying to enumerate all of possible types. 116 if e.__doc__: 117 split_lines = e.__doc__.splitlines() 118 if split_lines: 119 return e.__doc__.splitlines()[0].decode().strip() 120 121 # Should never get here. 122 return "unknown download error" 123 124 125 def _fetch_installer_url() -> str: 126 """Verify version set is a valid 127 ref in GitHub and return the full 128 URL. 129 """ 130 131 try: 132 resp = requests.get(_MULTIPASS_RELEASES_API_URL) 133 except requests.RequestException as e: 134 raise ProviderMultipassDownloadFailed(_requests_exception_hint(e)) 135 136 try: 137 data = resp.json() 138 except simplejson.JSONDecodeError: 139 raise ProviderMultipassDownloadFailed( 140 "failed to fetch valid release data from {}".format(_MULTIPASS_RELEASES_API_URL) 141 ) 142 143 for assets in data: 144 for asset in assets.get("assets", list()): 145 # Find matching name. 146 if asset.get("name") != _MULTIPASS_DL_NAME: 147 continue 148 149 return asset.get("browser_download_url") 150 151 # Something changed we don't know about - we will simply categorize 152 # all possible events as an updated version we do not yet know about. 153 raise ProviderMultipassDownloadFailed("ref specified is not a valid ref in GitHub") 154 155 156 def _download_multipass(dl_dir: str, echoer) -> str: 157 """Creates temporary Downloads installer to temp directory.""" 158 159 dl_url = _fetch_installer_url() 160 dl_basename = os.path.basename(dl_url) 161 dl_path = os.path.join(dl_dir, dl_basename) 162 163 echoer.info("Downloading Multipass installer...\n{} -> {}".format(dl_url, dl_path)) 164 165 try: 166 request = requests.get(dl_url, stream=True, allow_redirects=True) 167 request.raise_for_status() 168 download_requests_stream(request, dl_path) 169 except requests.RequestException as e: 170 raise ProviderMultipassDownloadFailed(_requests_exception_hint(e)) 171 172 digest = calculate_sha3_384(dl_path) 173 if digest != _MULTIPASS_DL_SHA3_384: 174 raise ProviderMultipassDownloadFailed( 175 "download failed verification (expected={} but found={})".format( 176 _MULTIPASS_DL_SHA3_384, digest 177 ) 178 ) 179 180 echoer.info("Verified installer successfully...") 181 return dl_path 182 183 184 def windows_install_multipass(echoer) -> None: 185 """Download and install multipass.""" 186 187 assert sys.platform == "win32" 188 189 dl_dir = tempfile.mkdtemp() 190 dl_path = _download_multipass(dl_dir, echoer) 191 _run_installer(dl_path, echoer) 192 193 # Cleanup. 194 shutil.rmtree(dl_dir) 195 196 197 def _init_progress_bar(total_length, destination, message=None): 198 if not message: 199 message = "Downloading {!r}".format(os.path.basename(destination)) 200 201 valid_length = total_length and total_length > 0 202 203 if valid_length and is_dumb_terminal(): 204 widgets = [message, " ", Percentage()] 205 maxval = total_length 206 elif valid_length and not is_dumb_terminal(): 207 widgets = [message, Bar(marker="=", left="[", right="]"), " ", Percentage()] 208 maxval = total_length 209 elif not valid_length and is_dumb_terminal(): 210 widgets = [message] 211 maxval = UnknownLength 212 else: 213 widgets = [message, AnimatedMarker()] 214 maxval = UnknownLength 215 216 return ProgressBar(widgets=widgets, maxval=maxval) 217 218 219 def download_requests_stream(request_stream, destination, message=None, total_read=0): 220 """This is a facility to download a request with nice progress bars.""" 221 222 # Doing len(request_stream.content) may defeat the purpose of a 223 # progress bar 224 total_length = 0 225 if not request_stream.headers.get("Content-Encoding", ""): 226 total_length = int(request_stream.headers.get("Content-Length", "0")) 227 # Content-Length in the case of resuming will be 228 # Content-Length - total_read so we add back up to have the feel of 229 # resuming 230 if os.path.exists(destination): 231 total_length += total_read 232 233 progress_bar = _init_progress_bar(total_length, destination, message) 234 progress_bar.start() 235 236 if os.path.exists(destination): 237 mode = "ab" 238 else: 239 mode = "wb" 240 with open(destination, mode) as destination_file: 241 for buf in request_stream.iter_content(1024): 242 destination_file.write(buf) 243 if not is_dumb_terminal(): 244 total_read += len(buf) 245 progress_bar.update(total_read) 246 progress_bar.finish() 247 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/installer/common/definitions.py b/installer/common/definitions.py --- a/installer/common/definitions.py +++ b/installer/common/definitions.py @@ -28,8 +28,8 @@ DEFAULT_MEMORY_GB: int = 4 DEFAULT_DISK_GB: int = 50 DEFAULT_ASSUME: bool = False -DEFAULT_CHANNEL: str = "1.26/stable" -DEFAULT_IMAGE: str = "18.04" +DEFAULT_CHANNEL: str = "1.27/stable" +DEFAULT_IMAGE: str = "22.04" MIN_CORES: int = 2 MIN_MEMORY_GB: int = 2 diff --git a/installer/vm_providers/_multipass/_windows.py b/installer/vm_providers/_multipass/_windows.py --- a/installer/vm_providers/_multipass/_windows.py +++ b/installer/vm_providers/_multipass/_windows.py @@ -39,12 +39,12 @@ _MULTIPASS_RELEASES_API_URL = "https://api.github.com/repos/canonical/multipass/releases" -_MULTIPASS_DL_VERSION = "1.11.1" +_MULTIPASS_DL_VERSION = "1.12.0" _MULTIPASS_DL_NAME = "multipass-{version}+win-win64.exe".format(version=_MULTIPASS_DL_VERSION) # Download multipass installer and calculate hash: # python3 -c "from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))" # noqa: E501 -_MULTIPASS_DL_SHA3_384 = "7691383eb0f4def0f9e2b5c77f04424756a63f222b3500bdc8fb25bf4725f1c0ce3bd0cb0b7cff7f79d8f489e199225b" # noqa: E501 +_MULTIPASS_DL_SHA3_384 = "ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee" # noqa: E501 def windows_reload_multipass_path_env():
{"golden_diff": "diff --git a/installer/common/definitions.py b/installer/common/definitions.py\n--- a/installer/common/definitions.py\n+++ b/installer/common/definitions.py\n@@ -28,8 +28,8 @@\n DEFAULT_MEMORY_GB: int = 4\n DEFAULT_DISK_GB: int = 50\n DEFAULT_ASSUME: bool = False\n-DEFAULT_CHANNEL: str = \"1.26/stable\"\n-DEFAULT_IMAGE: str = \"18.04\"\n+DEFAULT_CHANNEL: str = \"1.27/stable\"\n+DEFAULT_IMAGE: str = \"22.04\"\n \n MIN_CORES: int = 2\n MIN_MEMORY_GB: int = 2\ndiff --git a/installer/vm_providers/_multipass/_windows.py b/installer/vm_providers/_multipass/_windows.py\n--- a/installer/vm_providers/_multipass/_windows.py\n+++ b/installer/vm_providers/_multipass/_windows.py\n@@ -39,12 +39,12 @@\n \n \n _MULTIPASS_RELEASES_API_URL = \"https://api.github.com/repos/canonical/multipass/releases\"\n-_MULTIPASS_DL_VERSION = \"1.11.1\"\n+_MULTIPASS_DL_VERSION = \"1.12.0\"\n _MULTIPASS_DL_NAME = \"multipass-{version}+win-win64.exe\".format(version=_MULTIPASS_DL_VERSION)\n \n # Download multipass installer and calculate hash:\n # python3 -c \"from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))\" # noqa: E501\n-_MULTIPASS_DL_SHA3_384 = \"7691383eb0f4def0f9e2b5c77f04424756a63f222b3500bdc8fb25bf4725f1c0ce3bd0cb0b7cff7f79d8f489e199225b\" # noqa: E501\n+_MULTIPASS_DL_SHA3_384 = \"ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee\" # noqa: E501\n \n \n def windows_reload_multipass_path_env():\n", "issue": "update homebrew formula to newest microk8s version (1.27) and ubuntu version (22.04)- otherwise Mac Users can't use it. \n#### Summary\r\nThe latest present formula on homebrew as of June 2023 point to ubuntu version 18.04 and microk8s version 1.26. This makes it near to impossible for mac users to use it.\r\n\r\n#### Why is this important?\r\nLot has changed since that time. The instructions do not work in the present day, leading to newbies like myself wasting precious time, assuming the fault is theirs :)\r\n\r\n#### Are you interested in contributing to this feature?\r\nyep definitely.\r\n\n", "before_files": [{"content": "MAX_CHARACTERS_WRAP: int = 120\ncommand_descriptions = {\n \"add-node\": \"Adds a node to a cluster\",\n \"ambassador\": \"Ambassador API Gateway and Ingress\",\n \"cilium\": \"The cilium client\",\n \"config\": \"Print the kubeconfig\",\n \"ctr\": \"The containerd client\",\n \"dashboard-proxy\": \"Enable the Kubernetes dashboard and proxy to host\",\n \"dbctl\": \"Backup and restore the Kubernetes datastore\",\n \"disable\": \"Disables running add-ons\",\n \"enable\": \"Enables useful add-ons\",\n \"helm\": \"The helm client\",\n \"helm3\": \"The helm3 client\",\n \"inspect\": \"Checks the cluster and gathers logs\",\n \"istioctl\": \"The istio client\",\n \"join\": \"Joins this instance as a node to a cluster\",\n \"kubectl\": \"The kubernetes client\",\n \"leave\": \"Disconnects this node from any cluster it has joined\",\n \"linkerd\": \"The linkerd client\",\n \"refresh-certs\": \"Refresh the CA certificates in this deployment\",\n \"remove-node\": \"Removes a node from the cluster\",\n \"reset\": \"Cleans the cluster from all workloads\",\n \"start\": \"Starts the kubernetes cluster\",\n \"status\": \"Displays the status of the cluster\",\n \"stop\": \"Stops the kubernetes cluster\",\n}\nDEFAULT_CORES: int = 2\nDEFAULT_MEMORY_GB: int = 4\nDEFAULT_DISK_GB: int = 50\nDEFAULT_ASSUME: bool = False\nDEFAULT_CHANNEL: str = \"1.26/stable\"\nDEFAULT_IMAGE: str = \"18.04\"\n\nMIN_CORES: int = 2\nMIN_MEMORY_GB: int = 2\nMIN_DISK_GB: int = 10\n", "path": "installer/common/definitions.py"}, {"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2018 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport logging\nimport os.path\nimport requests\nimport shutil\nimport simplejson\nimport subprocess\nimport sys\nimport tempfile\n\nfrom progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength\n\nfrom common.file_utils import calculate_sha3_384, is_dumb_terminal\nfrom vm_providers.errors import (\n ProviderMultipassDownloadFailed,\n ProviderMultipassInstallationFailed,\n)\n\nif sys.platform == \"win32\":\n import winreg\n\n\nlogger = logging.getLogger(__name__)\n\n\n_MULTIPASS_RELEASES_API_URL = \"https://api.github.com/repos/canonical/multipass/releases\"\n_MULTIPASS_DL_VERSION = \"1.11.1\"\n_MULTIPASS_DL_NAME = \"multipass-{version}+win-win64.exe\".format(version=_MULTIPASS_DL_VERSION)\n\n# Download multipass installer and calculate hash:\n# python3 -c \"from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))\" # noqa: E501\n_MULTIPASS_DL_SHA3_384 = \"7691383eb0f4def0f9e2b5c77f04424756a63f222b3500bdc8fb25bf4725f1c0ce3bd0cb0b7cff7f79d8f489e199225b\" # noqa: E501\n\n\ndef windows_reload_multipass_path_env():\n \"\"\"Update PATH to include installed Multipass, if not already set.\"\"\"\n\n assert sys.platform == \"win32\"\n\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Environment\")\n\n paths = os.environ[\"PATH\"].split(\";\")\n\n # Drop empty placeholder for trailing comma, if present.\n if paths[-1] == \"\":\n del paths[-1]\n\n reg_user_path, _ = winreg.QueryValueEx(key, \"Path\")\n for path in reg_user_path.split(\";\"):\n if path not in paths and \"Multipass\" in path:\n paths.append(path)\n\n # Restore path with trailing comma.\n os.environ[\"PATH\"] = \";\".join(paths) + \";\"\n\n\ndef _run_installer(installer_path: str, echoer):\n \"\"\"Execute multipass installer.\"\"\"\n\n echoer.info(\"Installing Multipass...\")\n\n # Multipass requires administrative privileges to install, which requires\n # the use of `runas` functionality. Some of the options included:\n # (1) https://stackoverflow.com/a/34216774\n # (2) ShellExecuteW and wait on installer by attempting to delete it.\n # Windows would prevent us from deleting installer with a PermissionError:\n # PermissionError: [WinError 32] The process cannot access the file because\n # it is being used by another process: <path>\n # (3) Use PowerShell's \"Start-Process\" with RunAs verb as shown below.\n # None of the options are quite ideal, but #3 will do.\n cmd = \"\"\"\n & {{\n try {{\n $Output = Start-Process -FilePath {path!r} -Args /S -Verb RunAs -Wait -PassThru\n }} catch {{\n [Environment]::Exit(1)\n }}\n }}\n \"\"\".format(\n path=installer_path\n )\n\n try:\n subprocess.check_call([\"powershell.exe\", \"-Command\", cmd])\n except subprocess.CalledProcessError:\n raise ProviderMultipassInstallationFailed(\"error launching installer\")\n\n # Reload path environment to see if we can find multipass now.\n windows_reload_multipass_path_env()\n\n if not shutil.which(\"multipass.exe\"):\n # Installation failed.\n raise ProviderMultipassInstallationFailed(\"installation did not complete successfully\")\n\n echoer.info(\"Multipass installation completed successfully.\")\n\n\ndef _requests_exception_hint(e: requests.RequestException) -> str:\n # Use the __doc__ description to give the user a hint. It seems to be a\n # a decent option over trying to enumerate all of possible types.\n if e.__doc__:\n split_lines = e.__doc__.splitlines()\n if split_lines:\n return e.__doc__.splitlines()[0].decode().strip()\n\n # Should never get here.\n return \"unknown download error\"\n\n\ndef _fetch_installer_url() -> str:\n \"\"\"Verify version set is a valid\n ref in GitHub and return the full\n URL.\n \"\"\"\n\n try:\n resp = requests.get(_MULTIPASS_RELEASES_API_URL)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n try:\n data = resp.json()\n except simplejson.JSONDecodeError:\n raise ProviderMultipassDownloadFailed(\n \"failed to fetch valid release data from {}\".format(_MULTIPASS_RELEASES_API_URL)\n )\n\n for assets in data:\n for asset in assets.get(\"assets\", list()):\n # Find matching name.\n if asset.get(\"name\") != _MULTIPASS_DL_NAME:\n continue\n\n return asset.get(\"browser_download_url\")\n\n # Something changed we don't know about - we will simply categorize\n # all possible events as an updated version we do not yet know about.\n raise ProviderMultipassDownloadFailed(\"ref specified is not a valid ref in GitHub\")\n\n\ndef _download_multipass(dl_dir: str, echoer) -> str:\n \"\"\"Creates temporary Downloads installer to temp directory.\"\"\"\n\n dl_url = _fetch_installer_url()\n dl_basename = os.path.basename(dl_url)\n dl_path = os.path.join(dl_dir, dl_basename)\n\n echoer.info(\"Downloading Multipass installer...\\n{} -> {}\".format(dl_url, dl_path))\n\n try:\n request = requests.get(dl_url, stream=True, allow_redirects=True)\n request.raise_for_status()\n download_requests_stream(request, dl_path)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n digest = calculate_sha3_384(dl_path)\n if digest != _MULTIPASS_DL_SHA3_384:\n raise ProviderMultipassDownloadFailed(\n \"download failed verification (expected={} but found={})\".format(\n _MULTIPASS_DL_SHA3_384, digest\n )\n )\n\n echoer.info(\"Verified installer successfully...\")\n return dl_path\n\n\ndef windows_install_multipass(echoer) -> None:\n \"\"\"Download and install multipass.\"\"\"\n\n assert sys.platform == \"win32\"\n\n dl_dir = tempfile.mkdtemp()\n dl_path = _download_multipass(dl_dir, echoer)\n _run_installer(dl_path, echoer)\n\n # Cleanup.\n shutil.rmtree(dl_dir)\n\n\ndef _init_progress_bar(total_length, destination, message=None):\n if not message:\n message = \"Downloading {!r}\".format(os.path.basename(destination))\n\n valid_length = total_length and total_length > 0\n\n if valid_length and is_dumb_terminal():\n widgets = [message, \" \", Percentage()]\n maxval = total_length\n elif valid_length and not is_dumb_terminal():\n widgets = [message, Bar(marker=\"=\", left=\"[\", right=\"]\"), \" \", Percentage()]\n maxval = total_length\n elif not valid_length and is_dumb_terminal():\n widgets = [message]\n maxval = UnknownLength\n else:\n widgets = [message, AnimatedMarker()]\n maxval = UnknownLength\n\n return ProgressBar(widgets=widgets, maxval=maxval)\n\n\ndef download_requests_stream(request_stream, destination, message=None, total_read=0):\n \"\"\"This is a facility to download a request with nice progress bars.\"\"\"\n\n # Doing len(request_stream.content) may defeat the purpose of a\n # progress bar\n total_length = 0\n if not request_stream.headers.get(\"Content-Encoding\", \"\"):\n total_length = int(request_stream.headers.get(\"Content-Length\", \"0\"))\n # Content-Length in the case of resuming will be\n # Content-Length - total_read so we add back up to have the feel of\n # resuming\n if os.path.exists(destination):\n total_length += total_read\n\n progress_bar = _init_progress_bar(total_length, destination, message)\n progress_bar.start()\n\n if os.path.exists(destination):\n mode = \"ab\"\n else:\n mode = \"wb\"\n with open(destination, mode) as destination_file:\n for buf in request_stream.iter_content(1024):\n destination_file.write(buf)\n if not is_dumb_terminal():\n total_read += len(buf)\n progress_bar.update(total_read)\n progress_bar.finish()\n", "path": "installer/vm_providers/_multipass/_windows.py"}], "after_files": [{"content": "MAX_CHARACTERS_WRAP: int = 120\ncommand_descriptions = {\n \"add-node\": \"Adds a node to a cluster\",\n \"ambassador\": \"Ambassador API Gateway and Ingress\",\n \"cilium\": \"The cilium client\",\n \"config\": \"Print the kubeconfig\",\n \"ctr\": \"The containerd client\",\n \"dashboard-proxy\": \"Enable the Kubernetes dashboard and proxy to host\",\n \"dbctl\": \"Backup and restore the Kubernetes datastore\",\n \"disable\": \"Disables running add-ons\",\n \"enable\": \"Enables useful add-ons\",\n \"helm\": \"The helm client\",\n \"helm3\": \"The helm3 client\",\n \"inspect\": \"Checks the cluster and gathers logs\",\n \"istioctl\": \"The istio client\",\n \"join\": \"Joins this instance as a node to a cluster\",\n \"kubectl\": \"The kubernetes client\",\n \"leave\": \"Disconnects this node from any cluster it has joined\",\n \"linkerd\": \"The linkerd client\",\n \"refresh-certs\": \"Refresh the CA certificates in this deployment\",\n \"remove-node\": \"Removes a node from the cluster\",\n \"reset\": \"Cleans the cluster from all workloads\",\n \"start\": \"Starts the kubernetes cluster\",\n \"status\": \"Displays the status of the cluster\",\n \"stop\": \"Stops the kubernetes cluster\",\n}\nDEFAULT_CORES: int = 2\nDEFAULT_MEMORY_GB: int = 4\nDEFAULT_DISK_GB: int = 50\nDEFAULT_ASSUME: bool = False\nDEFAULT_CHANNEL: str = \"1.27/stable\"\nDEFAULT_IMAGE: str = \"22.04\"\n\nMIN_CORES: int = 2\nMIN_MEMORY_GB: int = 2\nMIN_DISK_GB: int = 10\n", "path": "installer/common/definitions.py"}, {"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2018 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport logging\nimport os.path\nimport requests\nimport shutil\nimport simplejson\nimport subprocess\nimport sys\nimport tempfile\n\nfrom progressbar import AnimatedMarker, Bar, Percentage, ProgressBar, UnknownLength\n\nfrom common.file_utils import calculate_sha3_384, is_dumb_terminal\nfrom vm_providers.errors import (\n ProviderMultipassDownloadFailed,\n ProviderMultipassInstallationFailed,\n)\n\nif sys.platform == \"win32\":\n import winreg\n\n\nlogger = logging.getLogger(__name__)\n\n\n_MULTIPASS_RELEASES_API_URL = \"https://api.github.com/repos/canonical/multipass/releases\"\n_MULTIPASS_DL_VERSION = \"1.12.0\"\n_MULTIPASS_DL_NAME = \"multipass-{version}+win-win64.exe\".format(version=_MULTIPASS_DL_VERSION)\n\n# Download multipass installer and calculate hash:\n# python3 -c \"from installer.common.file_utils import calculate_sha3_384; print(calculate_sha3_384('$HOME/Downloads/multipass-1.11.1+win-win64.exe'))\" # noqa: E501\n_MULTIPASS_DL_SHA3_384 = \"ddba66059052a67fa6a363729b75aca374591bc5a2531c938dd70d63f683c22108d5c2ab77025b818b31f69103228eee\" # noqa: E501\n\n\ndef windows_reload_multipass_path_env():\n \"\"\"Update PATH to include installed Multipass, if not already set.\"\"\"\n\n assert sys.platform == \"win32\"\n\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, \"Environment\")\n\n paths = os.environ[\"PATH\"].split(\";\")\n\n # Drop empty placeholder for trailing comma, if present.\n if paths[-1] == \"\":\n del paths[-1]\n\n reg_user_path, _ = winreg.QueryValueEx(key, \"Path\")\n for path in reg_user_path.split(\";\"):\n if path not in paths and \"Multipass\" in path:\n paths.append(path)\n\n # Restore path with trailing comma.\n os.environ[\"PATH\"] = \";\".join(paths) + \";\"\n\n\ndef _run_installer(installer_path: str, echoer):\n \"\"\"Execute multipass installer.\"\"\"\n\n echoer.info(\"Installing Multipass...\")\n\n # Multipass requires administrative privileges to install, which requires\n # the use of `runas` functionality. Some of the options included:\n # (1) https://stackoverflow.com/a/34216774\n # (2) ShellExecuteW and wait on installer by attempting to delete it.\n # Windows would prevent us from deleting installer with a PermissionError:\n # PermissionError: [WinError 32] The process cannot access the file because\n # it is being used by another process: <path>\n # (3) Use PowerShell's \"Start-Process\" with RunAs verb as shown below.\n # None of the options are quite ideal, but #3 will do.\n cmd = \"\"\"\n & {{\n try {{\n $Output = Start-Process -FilePath {path!r} -Args /S -Verb RunAs -Wait -PassThru\n }} catch {{\n [Environment]::Exit(1)\n }}\n }}\n \"\"\".format(\n path=installer_path\n )\n\n try:\n subprocess.check_call([\"powershell.exe\", \"-Command\", cmd])\n except subprocess.CalledProcessError:\n raise ProviderMultipassInstallationFailed(\"error launching installer\")\n\n # Reload path environment to see if we can find multipass now.\n windows_reload_multipass_path_env()\n\n if not shutil.which(\"multipass.exe\"):\n # Installation failed.\n raise ProviderMultipassInstallationFailed(\"installation did not complete successfully\")\n\n echoer.info(\"Multipass installation completed successfully.\")\n\n\ndef _requests_exception_hint(e: requests.RequestException) -> str:\n # Use the __doc__ description to give the user a hint. It seems to be a\n # a decent option over trying to enumerate all of possible types.\n if e.__doc__:\n split_lines = e.__doc__.splitlines()\n if split_lines:\n return e.__doc__.splitlines()[0].decode().strip()\n\n # Should never get here.\n return \"unknown download error\"\n\n\ndef _fetch_installer_url() -> str:\n \"\"\"Verify version set is a valid\n ref in GitHub and return the full\n URL.\n \"\"\"\n\n try:\n resp = requests.get(_MULTIPASS_RELEASES_API_URL)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n try:\n data = resp.json()\n except simplejson.JSONDecodeError:\n raise ProviderMultipassDownloadFailed(\n \"failed to fetch valid release data from {}\".format(_MULTIPASS_RELEASES_API_URL)\n )\n\n for assets in data:\n for asset in assets.get(\"assets\", list()):\n # Find matching name.\n if asset.get(\"name\") != _MULTIPASS_DL_NAME:\n continue\n\n return asset.get(\"browser_download_url\")\n\n # Something changed we don't know about - we will simply categorize\n # all possible events as an updated version we do not yet know about.\n raise ProviderMultipassDownloadFailed(\"ref specified is not a valid ref in GitHub\")\n\n\ndef _download_multipass(dl_dir: str, echoer) -> str:\n \"\"\"Creates temporary Downloads installer to temp directory.\"\"\"\n\n dl_url = _fetch_installer_url()\n dl_basename = os.path.basename(dl_url)\n dl_path = os.path.join(dl_dir, dl_basename)\n\n echoer.info(\"Downloading Multipass installer...\\n{} -> {}\".format(dl_url, dl_path))\n\n try:\n request = requests.get(dl_url, stream=True, allow_redirects=True)\n request.raise_for_status()\n download_requests_stream(request, dl_path)\n except requests.RequestException as e:\n raise ProviderMultipassDownloadFailed(_requests_exception_hint(e))\n\n digest = calculate_sha3_384(dl_path)\n if digest != _MULTIPASS_DL_SHA3_384:\n raise ProviderMultipassDownloadFailed(\n \"download failed verification (expected={} but found={})\".format(\n _MULTIPASS_DL_SHA3_384, digest\n )\n )\n\n echoer.info(\"Verified installer successfully...\")\n return dl_path\n\n\ndef windows_install_multipass(echoer) -> None:\n \"\"\"Download and install multipass.\"\"\"\n\n assert sys.platform == \"win32\"\n\n dl_dir = tempfile.mkdtemp()\n dl_path = _download_multipass(dl_dir, echoer)\n _run_installer(dl_path, echoer)\n\n # Cleanup.\n shutil.rmtree(dl_dir)\n\n\ndef _init_progress_bar(total_length, destination, message=None):\n if not message:\n message = \"Downloading {!r}\".format(os.path.basename(destination))\n\n valid_length = total_length and total_length > 0\n\n if valid_length and is_dumb_terminal():\n widgets = [message, \" \", Percentage()]\n maxval = total_length\n elif valid_length and not is_dumb_terminal():\n widgets = [message, Bar(marker=\"=\", left=\"[\", right=\"]\"), \" \", Percentage()]\n maxval = total_length\n elif not valid_length and is_dumb_terminal():\n widgets = [message]\n maxval = UnknownLength\n else:\n widgets = [message, AnimatedMarker()]\n maxval = UnknownLength\n\n return ProgressBar(widgets=widgets, maxval=maxval)\n\n\ndef download_requests_stream(request_stream, destination, message=None, total_read=0):\n \"\"\"This is a facility to download a request with nice progress bars.\"\"\"\n\n # Doing len(request_stream.content) may defeat the purpose of a\n # progress bar\n total_length = 0\n if not request_stream.headers.get(\"Content-Encoding\", \"\"):\n total_length = int(request_stream.headers.get(\"Content-Length\", \"0\"))\n # Content-Length in the case of resuming will be\n # Content-Length - total_read so we add back up to have the feel of\n # resuming\n if os.path.exists(destination):\n total_length += total_read\n\n progress_bar = _init_progress_bar(total_length, destination, message)\n progress_bar.start()\n\n if os.path.exists(destination):\n mode = \"ab\"\n else:\n mode = \"wb\"\n with open(destination, mode) as destination_file:\n for buf in request_stream.iter_content(1024):\n destination_file.write(buf)\n if not is_dumb_terminal():\n total_read += len(buf)\n progress_bar.update(total_read)\n progress_bar.finish()\n", "path": "installer/vm_providers/_multipass/_windows.py"}]}
3,600
602
gh_patches_debug_16541
rasdani/github-patches
git_diff
microsoft__botbuilder-python-2051
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Advancing msrest version dependency ### Use this [query](https://github.com/Microsoft/botbuilder-python/issues?q=is%3Aissue+is%3Aopen++label%3Afeature-request+) to search for the most popular feature requests. _No open issues are reported for msrest._ **Is your feature request related to a problem? Please describe.** When installing a solution accelerator for OpenAI, the solution requires the bot framework. However, aligning the requirements requires downgrading the msrest package. botbuilder-schema 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible. botframework-connector 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible. Several key azure packages depend on msrest: azure-mgmt-authorization 3.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. azure-mgmt-containerregistry 10.1.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. azure-mgmt-resource 22.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. azure-mgmt-search 9.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. azure-mgmt-storage 21.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible. **Describe the solution you'd like** Align the entire botbuilder-python with msrest >= 0.7.1 **Describe alternatives you've considered** Since my part of the group collaboration does not require development on the botbuilder-python, my workaround in installing msrest back to 0.7.1. Though, it would be good for botbuilder-python to be aligned with the azure-mgmt packages. **Additional context** Our team is forking this solution accelerator for customers deploying an accelerator solution which includes resources including Azure OpenAI, Azure Cognitive Search, and Azure CosmosDB (in addition to the bot), so the alignment will be important since the lost functionality in azure-mgmt is unknown. The original source is at https://github.com/MSUSAzureAccelerators/Azure-Cognitive-Search-Azure-OpenAI-Accelerator --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botframework-connector/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 NAME = "botframework-connector" 8 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" 9 REQUIRES = [ 10 "msrest==0.6.*", 11 # "requests>=2.23.0,<2.26", 12 "PyJWT>=2.4.0", 13 "botbuilder-schema==4.15.0", 14 "msal==1.*", 15 ] 16 17 root = os.path.abspath(os.path.dirname(__file__)) 18 19 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 20 long_description = f.read() 21 22 setup( 23 name=NAME, 24 version=VERSION, 25 description="Microsoft Bot Framework Bot Builder SDK for Python.", 26 author="Microsoft", 27 url="https://www.github.com/Microsoft/botbuilder-python", 28 keywords=["BotFrameworkConnector", "bots", "ai", "botframework", "botbuilder"], 29 install_requires=REQUIRES, 30 packages=[ 31 "botframework.connector", 32 "botframework.connector.auth", 33 "botframework.connector.async_mixin", 34 "botframework.connector.operations", 35 "botframework.connector.models", 36 "botframework.connector.aio", 37 "botframework.connector.aio.operations_async", 38 "botframework.connector.skills", 39 "botframework.connector.teams", 40 "botframework.connector.teams.operations", 41 "botframework.connector.token_api", 42 "botframework.connector.token_api.aio", 43 "botframework.connector.token_api.aio.operations_async", 44 "botframework.connector.token_api.models", 45 "botframework.connector.token_api.operations", 46 ], 47 include_package_data=True, 48 long_description=long_description, 49 long_description_content_type="text/x-rst", 50 license="MIT", 51 classifiers=[ 52 "Programming Language :: Python :: 3.7", 53 "Intended Audience :: Developers", 54 "License :: OSI Approved :: MIT License", 55 "Operating System :: OS Independent", 56 "Development Status :: 5 - Production/Stable", 57 "Topic :: Scientific/Engineering :: Artificial Intelligence", 58 ], 59 ) 60 ``` Path: `libraries/botbuilder-schema/setup.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 import os 5 from setuptools import setup 6 7 NAME = "botbuilder-schema" 8 VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" 9 REQUIRES = ["msrest==0.6.*", "urllib3<2.0.0"] 10 11 root = os.path.abspath(os.path.dirname(__file__)) 12 13 with open(os.path.join(root, "README.rst"), encoding="utf-8") as f: 14 long_description = f.read() 15 16 setup( 17 name=NAME, 18 version=VERSION, 19 description="BotBuilder Schema", 20 author="Microsoft", 21 url="https://github.com/Microsoft/botbuilder-python", 22 keywords=["BotBuilderSchema", "bots", "ai", "botframework", "botbuilder"], 23 long_description=long_description, 24 long_description_content_type="text/x-rst", 25 license="MIT", 26 install_requires=REQUIRES, 27 packages=[ 28 "botbuilder.schema", 29 "botbuilder.schema.teams", 30 ], 31 include_package_data=True, 32 classifiers=[ 33 "Programming Language :: Python :: 3.7", 34 "Intended Audience :: Developers", 35 "License :: OSI Approved :: MIT License", 36 "Operating System :: OS Independent", 37 "Development Status :: 5 - Production/Stable", 38 "Topic :: Scientific/Engineering :: Artificial Intelligence", 39 ], 40 ) 41 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-schema/setup.py b/libraries/botbuilder-schema/setup.py --- a/libraries/botbuilder-schema/setup.py +++ b/libraries/botbuilder-schema/setup.py @@ -6,7 +6,7 @@ NAME = "botbuilder-schema" VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" -REQUIRES = ["msrest==0.6.*", "urllib3<2.0.0"] +REQUIRES = ["msrest== 0.7.*", "urllib3<2.0.0"] root = os.path.abspath(os.path.dirname(__file__)) diff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py --- a/libraries/botframework-connector/setup.py +++ b/libraries/botframework-connector/setup.py @@ -7,7 +7,7 @@ NAME = "botframework-connector" VERSION = os.environ["packageVersion"] if "packageVersion" in os.environ else "4.15.0" REQUIRES = [ - "msrest==0.6.*", + "msrest==0.7.*", # "requests>=2.23.0,<2.26", "PyJWT>=2.4.0", "botbuilder-schema==4.15.0",
{"golden_diff": "diff --git a/libraries/botbuilder-schema/setup.py b/libraries/botbuilder-schema/setup.py\n--- a/libraries/botbuilder-schema/setup.py\n+++ b/libraries/botbuilder-schema/setup.py\n@@ -6,7 +6,7 @@\n \r\n NAME = \"botbuilder-schema\"\r\n VERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\r\n-REQUIRES = [\"msrest==0.6.*\", \"urllib3<2.0.0\"]\r\n+REQUIRES = [\"msrest== 0.7.*\", \"urllib3<2.0.0\"]\r\n \r\n root = os.path.abspath(os.path.dirname(__file__))\r\n \r\ndiff --git a/libraries/botframework-connector/setup.py b/libraries/botframework-connector/setup.py\n--- a/libraries/botframework-connector/setup.py\n+++ b/libraries/botframework-connector/setup.py\n@@ -7,7 +7,7 @@\n NAME = \"botframework-connector\"\n VERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\n REQUIRES = [\n- \"msrest==0.6.*\",\n+ \"msrest==0.7.*\",\n # \"requests>=2.23.0,<2.26\",\n \"PyJWT>=2.4.0\",\n \"botbuilder-schema==4.15.0\",\n", "issue": "Advancing msrest version dependency\n### Use this [query](https://github.com/Microsoft/botbuilder-python/issues?q=is%3Aissue+is%3Aopen++label%3Afeature-request+) to search for the most popular feature requests.\r\n\r\n_No open issues are reported for msrest._\r\n\r\n**Is your feature request related to a problem? Please describe.**\r\nWhen installing a solution accelerator for OpenAI, the solution requires the bot framework. However, aligning the requirements requires downgrading the msrest package.\r\n\r\nbotbuilder-schema 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible.\r\nbotframework-connector 4.14.4 requires msrest==0.6.*, but you have msrest 0.7.1 which is incompatible.\r\n\r\nSeveral key azure packages depend on msrest:\r\nazure-mgmt-authorization 3.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\nazure-mgmt-containerregistry 10.1.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\nazure-mgmt-resource 22.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\nazure-mgmt-search 9.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\nazure-mgmt-storage 21.0.0 requires msrest>=0.7.1, but you have msrest 0.6.21 which is incompatible.\r\n\r\n**Describe the solution you'd like**\r\nAlign the entire botbuilder-python with msrest >= 0.7.1 \r\n\r\n**Describe alternatives you've considered**\r\nSince my part of the group collaboration does not require development on the botbuilder-python, my workaround in installing msrest back to 0.7.1. Though, it would be good for botbuilder-python to be aligned with the azure-mgmt packages.\r\n\r\n**Additional context**\r\nOur team is forking this solution accelerator for customers deploying an accelerator solution which includes resources including Azure OpenAI, Azure Cognitive Search, and Azure CosmosDB (in addition to the bot), so the alignment will be important since the lost functionality in azure-mgmt is unknown. The original source is at https://github.com/MSUSAzureAccelerators/Azure-Cognitive-Search-Azure-OpenAI-Accelerator\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\nREQUIRES = [\n \"msrest==0.6.*\",\n # \"requests>=2.23.0,<2.26\",\n \"PyJWT>=2.4.0\",\n \"botbuilder-schema==4.15.0\",\n \"msal==1.*\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.skills\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.aio.operations_async\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nimport os\r\nfrom setuptools import setup\r\n\r\nNAME = \"botbuilder-schema\"\r\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\r\nREQUIRES = [\"msrest==0.6.*\", \"urllib3<2.0.0\"]\r\n\r\nroot = os.path.abspath(os.path.dirname(__file__))\r\n\r\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\r\n long_description = f.read()\r\n\r\nsetup(\r\n name=NAME,\r\n version=VERSION,\r\n description=\"BotBuilder Schema\",\r\n author=\"Microsoft\",\r\n url=\"https://github.com/Microsoft/botbuilder-python\",\r\n keywords=[\"BotBuilderSchema\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\r\n long_description=long_description,\r\n long_description_content_type=\"text/x-rst\",\r\n license=\"MIT\",\r\n install_requires=REQUIRES,\r\n packages=[\r\n \"botbuilder.schema\",\r\n \"botbuilder.schema.teams\",\r\n ],\r\n include_package_data=True,\r\n classifiers=[\r\n \"Programming Language :: Python :: 3.7\",\r\n \"Intended Audience :: Developers\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n \"Development Status :: 5 - Production/Stable\",\r\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\r\n ],\r\n)\r\n", "path": "libraries/botbuilder-schema/setup.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\nimport os\nfrom setuptools import setup\n\nNAME = \"botframework-connector\"\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\nREQUIRES = [\n \"msrest==0.7.*\",\n # \"requests>=2.23.0,<2.26\",\n \"PyJWT>=2.4.0\",\n \"botbuilder-schema==4.15.0\",\n \"msal==1.*\",\n]\n\nroot = os.path.abspath(os.path.dirname(__file__))\n\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\n long_description = f.read()\n\nsetup(\n name=NAME,\n version=VERSION,\n description=\"Microsoft Bot Framework Bot Builder SDK for Python.\",\n author=\"Microsoft\",\n url=\"https://www.github.com/Microsoft/botbuilder-python\",\n keywords=[\"BotFrameworkConnector\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\n install_requires=REQUIRES,\n packages=[\n \"botframework.connector\",\n \"botframework.connector.auth\",\n \"botframework.connector.async_mixin\",\n \"botframework.connector.operations\",\n \"botframework.connector.models\",\n \"botframework.connector.aio\",\n \"botframework.connector.aio.operations_async\",\n \"botframework.connector.skills\",\n \"botframework.connector.teams\",\n \"botframework.connector.teams.operations\",\n \"botframework.connector.token_api\",\n \"botframework.connector.token_api.aio\",\n \"botframework.connector.token_api.aio.operations_async\",\n \"botframework.connector.token_api.models\",\n \"botframework.connector.token_api.operations\",\n ],\n include_package_data=True,\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n license=\"MIT\",\n classifiers=[\n \"Programming Language :: Python :: 3.7\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n \"Development Status :: 5 - Production/Stable\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n ],\n)\n", "path": "libraries/botframework-connector/setup.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nimport os\r\nfrom setuptools import setup\r\n\r\nNAME = \"botbuilder-schema\"\r\nVERSION = os.environ[\"packageVersion\"] if \"packageVersion\" in os.environ else \"4.15.0\"\r\nREQUIRES = [\"msrest== 0.7.*\", \"urllib3<2.0.0\"]\r\n\r\nroot = os.path.abspath(os.path.dirname(__file__))\r\n\r\nwith open(os.path.join(root, \"README.rst\"), encoding=\"utf-8\") as f:\r\n long_description = f.read()\r\n\r\nsetup(\r\n name=NAME,\r\n version=VERSION,\r\n description=\"BotBuilder Schema\",\r\n author=\"Microsoft\",\r\n url=\"https://github.com/Microsoft/botbuilder-python\",\r\n keywords=[\"BotBuilderSchema\", \"bots\", \"ai\", \"botframework\", \"botbuilder\"],\r\n long_description=long_description,\r\n long_description_content_type=\"text/x-rst\",\r\n license=\"MIT\",\r\n install_requires=REQUIRES,\r\n packages=[\r\n \"botbuilder.schema\",\r\n \"botbuilder.schema.teams\",\r\n ],\r\n include_package_data=True,\r\n classifiers=[\r\n \"Programming Language :: Python :: 3.7\",\r\n \"Intended Audience :: Developers\",\r\n \"License :: OSI Approved :: MIT License\",\r\n \"Operating System :: OS Independent\",\r\n \"Development Status :: 5 - Production/Stable\",\r\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\r\n ],\r\n)\r\n", "path": "libraries/botbuilder-schema/setup.py"}]}
1,785
308
gh_patches_debug_15306
rasdani/github-patches
git_diff
great-expectations__great_expectations-2531
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use cleaner solution for non-truncating division in python 2 Prefer `from __future__ import division` to `1.*x/y` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py` Content: ``` 1 from typing import Optional 2 3 from great_expectations.core import ExpectationConfiguration 4 from great_expectations.execution_engine import ( 5 ExecutionEngine, 6 PandasExecutionEngine, 7 SparkDFExecutionEngine, 8 ) 9 from great_expectations.execution_engine.sqlalchemy_execution_engine import ( 10 SqlAlchemyExecutionEngine, 11 ) 12 from great_expectations.expectations.metrics.column_aggregate_metric import ( 13 ColumnMetricProvider, 14 column_aggregate_partial, 15 column_aggregate_value, 16 ) 17 from great_expectations.expectations.metrics.column_aggregate_metric import sa as sa 18 from great_expectations.expectations.metrics.metric_provider import metric_value 19 from great_expectations.validator.validation_graph import MetricConfiguration 20 21 22 def unique_proportion(_metrics): 23 total_values = _metrics.get("table.row_count") 24 unique_values = _metrics.get("column.distinct_values.count") 25 null_count = _metrics.get("column_values.nonnull.unexpected_count") 26 27 if total_values > 0: 28 return unique_values / (total_values - null_count) 29 else: 30 return 0 31 32 33 class ColumnUniqueProportion(ColumnMetricProvider): 34 metric_name = "column.unique_proportion" 35 36 @metric_value(engine=PandasExecutionEngine) 37 def _pandas(*args, metrics, **kwargs): 38 return unique_proportion(metrics) 39 40 @metric_value(engine=SqlAlchemyExecutionEngine) 41 def _sqlalchemy(*args, metrics, **kwargs): 42 return unique_proportion(metrics) 43 44 @metric_value(engine=SparkDFExecutionEngine) 45 def _spark(*args, metrics, **kwargs): 46 return unique_proportion(metrics) 47 48 @classmethod 49 def _get_evaluation_dependencies( 50 cls, 51 metric: MetricConfiguration, 52 configuration: Optional[ExpectationConfiguration] = None, 53 execution_engine: Optional[ExecutionEngine] = None, 54 runtime_configuration: Optional[dict] = None, 55 ): 56 table_domain_kwargs = { 57 k: v for k, v in metric.metric_domain_kwargs.items() if k != "column" 58 } 59 return { 60 "column.distinct_values.count": MetricConfiguration( 61 "column.distinct_values.count", metric.metric_domain_kwargs 62 ), 63 "table.row_count": MetricConfiguration( 64 "table.row_count", table_domain_kwargs 65 ), 66 "column_values.nonnull.unexpected_count": MetricConfiguration( 67 "column_values.nonnull.unexpected_count", metric.metric_domain_kwargs 68 ), 69 } 70 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py --- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py +++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py @@ -20,11 +20,13 @@ def unique_proportion(_metrics): + """Computes the proportion of unique non-null values out of all non-null values""" total_values = _metrics.get("table.row_count") unique_values = _metrics.get("column.distinct_values.count") null_count = _metrics.get("column_values.nonnull.unexpected_count") - if total_values > 0: + # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values) + if total_values > 0 and total_values != null_count: return unique_values / (total_values - null_count) else: return 0
{"golden_diff": "diff --git a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n--- a/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n+++ b/great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py\n@@ -20,11 +20,13 @@\n \n \n def unique_proportion(_metrics):\n+ \"\"\"Computes the proportion of unique non-null values out of all non-null values\"\"\"\n total_values = _metrics.get(\"table.row_count\")\n unique_values = _metrics.get(\"column.distinct_values.count\")\n null_count = _metrics.get(\"column_values.nonnull.unexpected_count\")\n \n- if total_values > 0:\n+ # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values)\n+ if total_values > 0 and total_values != null_count:\n return unique_values / (total_values - null_count)\n else:\n return 0\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "from typing import Optional\n\nfrom great_expectations.core import ExpectationConfiguration\nfrom great_expectations.execution_engine import (\n ExecutionEngine,\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n)\nfrom great_expectations.execution_engine.sqlalchemy_execution_engine import (\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric import (\n ColumnMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric import sa as sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.validator.validation_graph import MetricConfiguration\n\n\ndef unique_proportion(_metrics):\n total_values = _metrics.get(\"table.row_count\")\n unique_values = _metrics.get(\"column.distinct_values.count\")\n null_count = _metrics.get(\"column_values.nonnull.unexpected_count\")\n\n if total_values > 0:\n return unique_values / (total_values - null_count)\n else:\n return 0\n\n\nclass ColumnUniqueProportion(ColumnMetricProvider):\n metric_name = \"column.unique_proportion\"\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @classmethod\n def _get_evaluation_dependencies(\n cls,\n metric: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration] = None,\n execution_engine: Optional[ExecutionEngine] = None,\n runtime_configuration: Optional[dict] = None,\n ):\n table_domain_kwargs = {\n k: v for k, v in metric.metric_domain_kwargs.items() if k != \"column\"\n }\n return {\n \"column.distinct_values.count\": MetricConfiguration(\n \"column.distinct_values.count\", metric.metric_domain_kwargs\n ),\n \"table.row_count\": MetricConfiguration(\n \"table.row_count\", table_domain_kwargs\n ),\n \"column_values.nonnull.unexpected_count\": MetricConfiguration(\n \"column_values.nonnull.unexpected_count\", metric.metric_domain_kwargs\n ),\n }\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py"}], "after_files": [{"content": "from typing import Optional\n\nfrom great_expectations.core import ExpectationConfiguration\nfrom great_expectations.execution_engine import (\n ExecutionEngine,\n PandasExecutionEngine,\n SparkDFExecutionEngine,\n)\nfrom great_expectations.execution_engine.sqlalchemy_execution_engine import (\n SqlAlchemyExecutionEngine,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric import (\n ColumnMetricProvider,\n column_aggregate_partial,\n column_aggregate_value,\n)\nfrom great_expectations.expectations.metrics.column_aggregate_metric import sa as sa\nfrom great_expectations.expectations.metrics.metric_provider import metric_value\nfrom great_expectations.validator.validation_graph import MetricConfiguration\n\n\ndef unique_proportion(_metrics):\n \"\"\"Computes the proportion of unique non-null values out of all non-null values\"\"\"\n total_values = _metrics.get(\"table.row_count\")\n unique_values = _metrics.get(\"column.distinct_values.count\")\n null_count = _metrics.get(\"column_values.nonnull.unexpected_count\")\n\n # Ensuring that we do not divide by 0, returning 0 if all values are nulls (we only consider non-nulls unique values)\n if total_values > 0 and total_values != null_count:\n return unique_values / (total_values - null_count)\n else:\n return 0\n\n\nclass ColumnUniqueProportion(ColumnMetricProvider):\n metric_name = \"column.unique_proportion\"\n\n @metric_value(engine=PandasExecutionEngine)\n def _pandas(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @metric_value(engine=SqlAlchemyExecutionEngine)\n def _sqlalchemy(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @metric_value(engine=SparkDFExecutionEngine)\n def _spark(*args, metrics, **kwargs):\n return unique_proportion(metrics)\n\n @classmethod\n def _get_evaluation_dependencies(\n cls,\n metric: MetricConfiguration,\n configuration: Optional[ExpectationConfiguration] = None,\n execution_engine: Optional[ExecutionEngine] = None,\n runtime_configuration: Optional[dict] = None,\n ):\n table_domain_kwargs = {\n k: v for k, v in metric.metric_domain_kwargs.items() if k != \"column\"\n }\n return {\n \"column.distinct_values.count\": MetricConfiguration(\n \"column.distinct_values.count\", metric.metric_domain_kwargs\n ),\n \"table.row_count\": MetricConfiguration(\n \"table.row_count\", table_domain_kwargs\n ),\n \"column_values.nonnull.unexpected_count\": MetricConfiguration(\n \"column_values.nonnull.unexpected_count\", metric.metric_domain_kwargs\n ),\n }\n", "path": "great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py"}]}
930
253
gh_patches_debug_3587
rasdani/github-patches
git_diff
streamlit__streamlit-1942
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Crazy error message shown when two widgets have the same key # Steps to reproduce 1. Run this code ``` import streamlit as st st.button("OK") st.button("OK") ``` 2. Observe! ## Expected behavior: You should get one button plus an error message explaining you can't have to `st.button` calls with the same key. ## Actual behavior: The error message complains about `st.-3952690150221448179` :scream_cat: See screenshot: ![image](https://user-images.githubusercontent.com/690814/91766363-4f35d480-eb8f-11ea-8648-ce832caf4da3.png) ## Is this a regression? yes # Debug info - Streamlit version: 0.65.0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lib/streamlit/elements/utils.py` Content: ``` 1 import textwrap 2 3 from streamlit import type_util 4 from streamlit.report_thread import get_report_ctx 5 from streamlit.errors import DuplicateWidgetID 6 from typing import Optional, Any 7 8 9 class NoValue(object): 10 """Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget() 11 call to return None. This is needed because `DeltaGenerator._enqueue` 12 replaces `None` with a `DeltaGenerator` (for use in non-widget elements). 13 """ 14 15 pass 16 17 18 def _clean_text(text): 19 return textwrap.dedent(str(text)).strip() 20 21 22 def _build_duplicate_widget_message( 23 widget_func_name: str, user_key: Optional[str] = None 24 ) -> str: 25 if user_key is not None: 26 message = textwrap.dedent( 27 """ 28 There are multiple identical `st.{widget_type}` widgets with 29 `key='{user_key}'`. 30 31 To fix this, please make sure that the `key` argument is unique for 32 each `st.{widget_type}` you create. 33 """ 34 ) 35 else: 36 message = textwrap.dedent( 37 """ 38 There are multiple identical `st.{widget_type}` widgets with the 39 same generated key. 40 41 (When a widget is created, it's assigned an internal key based on 42 its structure. Multiple widgets with an identical structure will 43 result in the same internal key, which causes this error.) 44 45 To fix this, please pass a unique `key` argument to 46 `st.{widget_type}`. 47 """ 48 ) 49 50 return message.strip("\n").format(widget_type=widget_func_name, user_key=user_key) 51 52 53 def _set_widget_id( 54 element_type: str, 55 element_proto: Any, 56 user_key: Optional[str] = None, 57 widget_func_name: Optional[str] = None, 58 ) -> None: 59 """Set the widget id. 60 61 Parameters 62 ---------- 63 element_type : str 64 The type of the element as stored in proto. 65 element_proto : proto 66 The proto of the specified type (e.g. Button/Multiselect/Slider proto) 67 user_key : str or None 68 Optional user-specified key to use for the widget ID. 69 If this is None, we'll generate an ID by hashing the element. 70 widget_func_name : str or None 71 The widget's DeltaGenerator function name, if it's different from 72 its element_type. Custom components are a special case: they all have 73 the element_type "component_instance", but are instantiated with 74 dynamically-named functions. 75 76 """ 77 78 if widget_func_name is None: 79 widget_func_name = element_type 80 81 # Identify the widget with a hash of type + contents 82 element_hash = hash((element_type, element_proto.SerializeToString())) 83 if user_key is not None: 84 widget_id = "%s-%s" % (user_key, element_hash) 85 else: 86 widget_id = "%s" % element_hash 87 88 ctx = get_report_ctx() 89 if ctx is not None: 90 added = ctx.widget_ids_this_run.add(widget_id) 91 if not added: 92 raise DuplicateWidgetID( 93 _build_duplicate_widget_message(widget_id, user_key) 94 ) 95 element_proto.id = widget_id 96 97 98 def _get_widget_ui_value( 99 element_type: str, 100 element_proto: Any, 101 user_key: Optional[str] = None, 102 widget_func_name: Optional[str] = None, 103 ) -> Any: 104 """Get the widget ui_value from the report context. 105 NOTE: This function should be called after the proto has been filled. 106 107 Parameters 108 ---------- 109 element_type : str 110 The type of the element as stored in proto. 111 element : proto 112 The proto of the specified type (e.g. Button/Multiselect/Slider proto) 113 user_key : str 114 Optional user-specified string to use as the widget ID. 115 If this is None, we'll generate an ID by hashing the element. 116 widget_func_name : str or None 117 The widget's DeltaGenerator function name, if it's different from 118 its element_type. Custom components are a special case: they all have 119 the element_type "component_instance", but are instantiated with 120 dynamically-named functions. 121 122 Returns 123 ------- 124 ui_value : any 125 The value of the widget set by the client or 126 the default value passed. If the report context 127 doesn't exist, None will be returned. 128 129 """ 130 _set_widget_id(element_type, element_proto, user_key, widget_func_name) 131 ctx = get_report_ctx() 132 ui_value = ctx.widgets.get_widget_value(element_proto.id) if ctx else None 133 return ui_value 134 135 136 def last_index_for_melted_dataframes(data): 137 if type_util.is_dataframe_compatible(data): 138 data = type_util.convert_anything_to_df(data) 139 140 if data.index.size > 0: 141 return data.index[-1] 142 143 return None 144 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lib/streamlit/elements/utils.py b/lib/streamlit/elements/utils.py --- a/lib/streamlit/elements/utils.py +++ b/lib/streamlit/elements/utils.py @@ -90,7 +90,7 @@ added = ctx.widget_ids_this_run.add(widget_id) if not added: raise DuplicateWidgetID( - _build_duplicate_widget_message(widget_id, user_key) + _build_duplicate_widget_message(widget_func_name, user_key) ) element_proto.id = widget_id
{"golden_diff": "diff --git a/lib/streamlit/elements/utils.py b/lib/streamlit/elements/utils.py\n--- a/lib/streamlit/elements/utils.py\n+++ b/lib/streamlit/elements/utils.py\n@@ -90,7 +90,7 @@\n added = ctx.widget_ids_this_run.add(widget_id)\n if not added:\n raise DuplicateWidgetID(\n- _build_duplicate_widget_message(widget_id, user_key)\n+ _build_duplicate_widget_message(widget_func_name, user_key)\n )\n element_proto.id = widget_id\n", "issue": "Crazy error message shown when two widgets have the same key\n# Steps to reproduce\r\n\r\n1. Run this code\r\n ```\r\n import streamlit as st\r\n\r\n st.button(\"OK\")\r\n st.button(\"OK\")\r\n ```\r\n2. Observe!\r\n\r\n## Expected behavior:\r\n\r\nYou should get one button plus an error message explaining you can't have to `st.button` calls with the same key.\r\n\r\n## Actual behavior:\r\n\r\nThe error message complains about `st.-3952690150221448179` :scream_cat: \r\n\r\nSee screenshot:\r\n![image](https://user-images.githubusercontent.com/690814/91766363-4f35d480-eb8f-11ea-8648-ce832caf4da3.png)\r\n\r\n## Is this a regression?\r\n\r\nyes\r\n\r\n# Debug info\r\n\r\n- Streamlit version: 0.65.0\r\n\n", "before_files": [{"content": "import textwrap\n\nfrom streamlit import type_util\nfrom streamlit.report_thread import get_report_ctx\nfrom streamlit.errors import DuplicateWidgetID\nfrom typing import Optional, Any\n\n\nclass NoValue(object):\n \"\"\"Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget()\n call to return None. This is needed because `DeltaGenerator._enqueue`\n replaces `None` with a `DeltaGenerator` (for use in non-widget elements).\n \"\"\"\n\n pass\n\n\ndef _clean_text(text):\n return textwrap.dedent(str(text)).strip()\n\n\ndef _build_duplicate_widget_message(\n widget_func_name: str, user_key: Optional[str] = None\n) -> str:\n if user_key is not None:\n message = textwrap.dedent(\n \"\"\"\n There are multiple identical `st.{widget_type}` widgets with\n `key='{user_key}'`.\n\n To fix this, please make sure that the `key` argument is unique for\n each `st.{widget_type}` you create.\n \"\"\"\n )\n else:\n message = textwrap.dedent(\n \"\"\"\n There are multiple identical `st.{widget_type}` widgets with the\n same generated key.\n\n (When a widget is created, it's assigned an internal key based on\n its structure. Multiple widgets with an identical structure will\n result in the same internal key, which causes this error.)\n\n To fix this, please pass a unique `key` argument to\n `st.{widget_type}`.\n \"\"\"\n )\n\n return message.strip(\"\\n\").format(widget_type=widget_func_name, user_key=user_key)\n\n\ndef _set_widget_id(\n element_type: str,\n element_proto: Any,\n user_key: Optional[str] = None,\n widget_func_name: Optional[str] = None,\n) -> None:\n \"\"\"Set the widget id.\n\n Parameters\n ----------\n element_type : str\n The type of the element as stored in proto.\n element_proto : proto\n The proto of the specified type (e.g. Button/Multiselect/Slider proto)\n user_key : str or None\n Optional user-specified key to use for the widget ID.\n If this is None, we'll generate an ID by hashing the element.\n widget_func_name : str or None\n The widget's DeltaGenerator function name, if it's different from\n its element_type. Custom components are a special case: they all have\n the element_type \"component_instance\", but are instantiated with\n dynamically-named functions.\n\n \"\"\"\n\n if widget_func_name is None:\n widget_func_name = element_type\n\n # Identify the widget with a hash of type + contents\n element_hash = hash((element_type, element_proto.SerializeToString()))\n if user_key is not None:\n widget_id = \"%s-%s\" % (user_key, element_hash)\n else:\n widget_id = \"%s\" % element_hash\n\n ctx = get_report_ctx()\n if ctx is not None:\n added = ctx.widget_ids_this_run.add(widget_id)\n if not added:\n raise DuplicateWidgetID(\n _build_duplicate_widget_message(widget_id, user_key)\n )\n element_proto.id = widget_id\n\n\ndef _get_widget_ui_value(\n element_type: str,\n element_proto: Any,\n user_key: Optional[str] = None,\n widget_func_name: Optional[str] = None,\n) -> Any:\n \"\"\"Get the widget ui_value from the report context.\n NOTE: This function should be called after the proto has been filled.\n\n Parameters\n ----------\n element_type : str\n The type of the element as stored in proto.\n element : proto\n The proto of the specified type (e.g. Button/Multiselect/Slider proto)\n user_key : str\n Optional user-specified string to use as the widget ID.\n If this is None, we'll generate an ID by hashing the element.\n widget_func_name : str or None\n The widget's DeltaGenerator function name, if it's different from\n its element_type. Custom components are a special case: they all have\n the element_type \"component_instance\", but are instantiated with\n dynamically-named functions.\n\n Returns\n -------\n ui_value : any\n The value of the widget set by the client or\n the default value passed. If the report context\n doesn't exist, None will be returned.\n\n \"\"\"\n _set_widget_id(element_type, element_proto, user_key, widget_func_name)\n ctx = get_report_ctx()\n ui_value = ctx.widgets.get_widget_value(element_proto.id) if ctx else None\n return ui_value\n\n\ndef last_index_for_melted_dataframes(data):\n if type_util.is_dataframe_compatible(data):\n data = type_util.convert_anything_to_df(data)\n\n if data.index.size > 0:\n return data.index[-1]\n\n return None\n", "path": "lib/streamlit/elements/utils.py"}], "after_files": [{"content": "import textwrap\n\nfrom streamlit import type_util\nfrom streamlit.report_thread import get_report_ctx\nfrom streamlit.errors import DuplicateWidgetID\nfrom typing import Optional, Any\n\n\nclass NoValue(object):\n \"\"\"Return this from DeltaGenerator.foo_widget() when you want the st.foo_widget()\n call to return None. This is needed because `DeltaGenerator._enqueue`\n replaces `None` with a `DeltaGenerator` (for use in non-widget elements).\n \"\"\"\n\n pass\n\n\ndef _clean_text(text):\n return textwrap.dedent(str(text)).strip()\n\n\ndef _build_duplicate_widget_message(\n widget_func_name: str, user_key: Optional[str] = None\n) -> str:\n if user_key is not None:\n message = textwrap.dedent(\n \"\"\"\n There are multiple identical `st.{widget_type}` widgets with\n `key='{user_key}'`.\n\n To fix this, please make sure that the `key` argument is unique for\n each `st.{widget_type}` you create.\n \"\"\"\n )\n else:\n message = textwrap.dedent(\n \"\"\"\n There are multiple identical `st.{widget_type}` widgets with the\n same generated key.\n\n (When a widget is created, it's assigned an internal key based on\n its structure. Multiple widgets with an identical structure will\n result in the same internal key, which causes this error.)\n\n To fix this, please pass a unique `key` argument to\n `st.{widget_type}`.\n \"\"\"\n )\n\n return message.strip(\"\\n\").format(widget_type=widget_func_name, user_key=user_key)\n\n\ndef _set_widget_id(\n element_type: str,\n element_proto: Any,\n user_key: Optional[str] = None,\n widget_func_name: Optional[str] = None,\n) -> None:\n \"\"\"Set the widget id.\n\n Parameters\n ----------\n element_type : str\n The type of the element as stored in proto.\n element_proto : proto\n The proto of the specified type (e.g. Button/Multiselect/Slider proto)\n user_key : str or None\n Optional user-specified key to use for the widget ID.\n If this is None, we'll generate an ID by hashing the element.\n widget_func_name : str or None\n The widget's DeltaGenerator function name, if it's different from\n its element_type. Custom components are a special case: they all have\n the element_type \"component_instance\", but are instantiated with\n dynamically-named functions.\n\n \"\"\"\n\n if widget_func_name is None:\n widget_func_name = element_type\n\n # Identify the widget with a hash of type + contents\n element_hash = hash((element_type, element_proto.SerializeToString()))\n if user_key is not None:\n widget_id = \"%s-%s\" % (user_key, element_hash)\n else:\n widget_id = \"%s\" % element_hash\n\n ctx = get_report_ctx()\n if ctx is not None:\n added = ctx.widget_ids_this_run.add(widget_id)\n if not added:\n raise DuplicateWidgetID(\n _build_duplicate_widget_message(widget_func_name, user_key)\n )\n element_proto.id = widget_id\n\n\ndef _get_widget_ui_value(\n element_type: str,\n element_proto: Any,\n user_key: Optional[str] = None,\n widget_func_name: Optional[str] = None,\n) -> Any:\n \"\"\"Get the widget ui_value from the report context.\n NOTE: This function should be called after the proto has been filled.\n\n Parameters\n ----------\n element_type : str\n The type of the element as stored in proto.\n element : proto\n The proto of the specified type (e.g. Button/Multiselect/Slider proto)\n user_key : str\n Optional user-specified string to use as the widget ID.\n If this is None, we'll generate an ID by hashing the element.\n widget_func_name : str or None\n The widget's DeltaGenerator function name, if it's different from\n its element_type. Custom components are a special case: they all have\n the element_type \"component_instance\", but are instantiated with\n dynamically-named functions.\n\n Returns\n -------\n ui_value : any\n The value of the widget set by the client or\n the default value passed. If the report context\n doesn't exist, None will be returned.\n\n \"\"\"\n _set_widget_id(element_type, element_proto, user_key, widget_func_name)\n ctx = get_report_ctx()\n ui_value = ctx.widgets.get_widget_value(element_proto.id) if ctx else None\n return ui_value\n\n\ndef last_index_for_melted_dataframes(data):\n if type_util.is_dataframe_compatible(data):\n data = type_util.convert_anything_to_df(data)\n\n if data.index.size > 0:\n return data.index[-1]\n\n return None\n", "path": "lib/streamlit/elements/utils.py"}]}
1,858
112
gh_patches_debug_23329
rasdani/github-patches
git_diff
fossasia__open-event-server-6647
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Tickets can be created with end date greater than event end date **Describe the bug** Tickets can be created with end date greater than event end date. **Expected behavior** There should be a server-side check to check that this doesn't happen **Additional context** - [x] Taking this up --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `app/api/tickets.py` Content: ``` 1 from flask import request, current_app 2 from flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship 3 from flask_rest_jsonapi.exceptions import ObjectNotFound 4 from flask_jwt_extended import current_user, verify_jwt_in_request 5 from sqlalchemy.orm.exc import NoResultFound 6 7 from app.api.bootstrap import api 8 from app.api.helpers.db import safe_query 9 from app.api.helpers.permission_manager import has_access 10 from app.api.helpers.query import event_query 11 from app.api.helpers.utilities import require_relationship 12 from app.api.schema.tickets import TicketSchema, TicketSchemaPublic 13 from app.models import db 14 from app.models.access_code import AccessCode 15 from app.models.discount_code import DiscountCode 16 from app.models.order import Order 17 from app.models.ticket import Ticket, TicketTag, ticket_tags_table 18 from app.models.event import Event 19 from app.models.ticket_holder import TicketHolder 20 from app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity 21 from app.api.helpers.db import get_count 22 23 class TicketListPost(ResourceList): 24 """ 25 Create and List Tickets 26 """ 27 def before_post(self, args, kwargs, data): 28 """ 29 before post method to check for required relationship and proper permission 30 :param args: 31 :param kwargs: 32 :param data: 33 :return: 34 """ 35 require_relationship(['event'], data) 36 if not has_access('is_coorganizer', event_id=data['event']): 37 raise ObjectNotFound({'parameter': 'event_id'}, 38 "Event: {} not found".format(data['event'])) 39 40 if get_count(db.session.query(Ticket.id).filter_by(name=data['name'], event_id=int(data['event']), 41 deleted_at=None)) > 0: 42 raise ConflictException({'pointer': '/data/attributes/name'}, "Ticket already exists") 43 44 def before_create_object(self, data, view_kwargs): 45 """ 46 before create method to check if paid ticket has a paymentMethod enabled 47 :param data: 48 :param view_kwargs: 49 :return: 50 """ 51 if data.get('type') == 'paid' and data.get('event'): 52 try: 53 event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one() 54 except NoResultFound: 55 raise UnprocessableEntity({'event_id': data['event']}, "Event does not exist") 56 if not event.is_payment_enabled(): 57 raise UnprocessableEntity( 58 {'event_id': data['event']}, "Event having paid ticket must have a payment method") 59 60 schema = TicketSchema 61 methods = ['POST', ] 62 data_layer = {'session': db.session, 63 'model': Ticket, 64 'methods': { 65 'before_create_object': before_create_object, 66 'before_post': before_post 67 }} 68 69 70 class TicketList(ResourceList): 71 """ 72 List Tickets based on different params 73 """ 74 def before_get(self, args, view_kwargs): 75 """ 76 before get method to get the resource id for assigning schema 77 :param args: 78 :param view_kwargs: 79 :return: 80 """ 81 if view_kwargs.get('ticket_tag_id') or view_kwargs.get('access_code_id') or view_kwargs.get('order_identifier'): 82 self.schema = TicketSchemaPublic 83 84 def query(self, view_kwargs): 85 """ 86 query method for resource list 87 :param view_kwargs: 88 :return: 89 """ 90 91 if 'Authorization' in request.headers: 92 verify_jwt_in_request() 93 if current_user.is_super_admin or current_user.is_admin: 94 query_ = self.session.query(Ticket) 95 elif view_kwargs.get('event_id') and has_access('is_organizer', event_id=view_kwargs['event_id']): 96 query_ = self.session.query(Ticket) 97 else: 98 query_ = self.session.query(Ticket).filter_by(is_hidden=False) 99 else: 100 query_ = self.session.query(Ticket).filter_by(is_hidden=False) 101 102 if view_kwargs.get('ticket_tag_id'): 103 ticket_tag = safe_query(self, TicketTag, 'id', view_kwargs['ticket_tag_id'], 'ticket_tag_id') 104 query_ = query_.join(ticket_tags_table).filter_by(ticket_tag_id=ticket_tag.id) 105 query_ = event_query(self, query_, view_kwargs) 106 if view_kwargs.get('access_code_id'): 107 access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id') 108 # access_code - ticket :: many-to-many relationship 109 query_ = Ticket.query.filter(Ticket.access_codes.any(id=access_code.id)) 110 111 if view_kwargs.get('discount_code_id'): 112 discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id') 113 # discount_code - ticket :: many-to-many relationship 114 query_ = Ticket.query.filter(Ticket.discount_codes.any(id=discount_code.id)) 115 116 if view_kwargs.get('order_identifier'): 117 order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier') 118 ticket_ids = [] 119 for ticket in order.tickets: 120 ticket_ids.append(ticket.id) 121 query_ = query_.filter(Ticket.id.in_(tuple(ticket_ids))) 122 123 return query_ 124 125 view_kwargs = True 126 methods = ['GET', ] 127 decorators = (api.has_permission('is_coorganizer', fetch='event_id', 128 fetch_as="event_id", model=Ticket, methods="POST", 129 check=lambda a: a.get('event_id') or a.get('event_identifier')),) 130 schema = TicketSchema 131 data_layer = {'session': db.session, 132 'model': Ticket, 133 'methods': { 134 'query': query, 135 }} 136 137 138 class TicketDetail(ResourceDetail): 139 """ 140 Ticket Resource 141 """ 142 def before_get(self, args, view_kwargs): 143 """ 144 before get method to get the resource id for assigning schema 145 :param args: 146 :param view_kwargs: 147 :return: 148 """ 149 if view_kwargs.get('attendee_id'): 150 self.schema = TicketSchemaPublic 151 152 def before_get_object(self, view_kwargs): 153 """ 154 before get object method to get the resource id for fetching details 155 :param view_kwargs: 156 :return: 157 """ 158 if view_kwargs.get('attendee_id') is not None: 159 attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id') 160 if attendee.ticket_id is not None: 161 view_kwargs['id'] = attendee.ticket_id 162 else: 163 view_kwargs['id'] = None 164 165 def before_update_object(self, ticket, data, view_kwargs): 166 """ 167 method to check if paid ticket has payment method before updating ticket object 168 :param ticket: 169 :param data: 170 :param view_kwargs: 171 :return: 172 """ 173 if ticket.type == 'paid': 174 try: 175 event = db.session.query(Event).filter_by(id=ticket.event.id, deleted_at=None).one() 176 except NoResultFound: 177 raise UnprocessableEntity({'event_id': ticket.event.id}, "Event does not exist") 178 if not event.is_payment_enabled(): 179 raise UnprocessableEntity( 180 {'event_id': ticket.event.id}, "Event having paid ticket must have a payment method") 181 182 decorators = (api.has_permission('is_coorganizer', fetch='event_id', 183 fetch_as="event_id", model=Ticket, methods="PATCH,DELETE"),) 184 schema = TicketSchema 185 data_layer = {'session': db.session, 186 'model': Ticket, 187 'methods': { 188 'before_get_object': before_get_object, 189 'before_update_object': before_update_object 190 }} 191 192 193 class TicketRelationshipRequired(ResourceRelationship): 194 """ 195 Tickets Relationship (Required) 196 """ 197 decorators = (api.has_permission('is_coorganizer', fetch='event_id', 198 fetch_as="event_id", model=Ticket, methods="PATCH"),) 199 methods = ['GET', 'PATCH'] 200 schema = TicketSchema 201 data_layer = {'session': db.session, 202 'model': Ticket} 203 204 205 class TicketRelationshipOptional(ResourceRelationship): 206 """ 207 Tickets Relationship (Optional) 208 """ 209 decorators = (api.has_permission('is_coorganizer', fetch='event_id', 210 fetch_as="event_id", model=Ticket, methods="PATCH,DELETE"),) 211 schema = TicketSchema 212 data_layer = {'session': db.session, 213 'model': Ticket} 214 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/app/api/tickets.py b/app/api/tickets.py --- a/app/api/tickets.py +++ b/app/api/tickets.py @@ -48,14 +48,20 @@ :param view_kwargs: :return: """ - if data.get('type') == 'paid' and data.get('event'): + if data.get('event'): try: event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one() except NoResultFound: raise UnprocessableEntity({'event_id': data['event']}, "Event does not exist") - if not event.is_payment_enabled(): - raise UnprocessableEntity( - {'event_id': data['event']}, "Event having paid ticket must have a payment method") + + if data.get('type') == 'paid': + if not event.is_payment_enabled(): + raise UnprocessableEntity( + {'event_id': data['event']}, "Event having paid ticket must have a payment method") + + if data.get('sales_ends_at') > event.ends_at: + raise UnprocessableEntity({'sales_ends_at': '/data/attributes/sales-ends-at'}, + "Ticket end date cannot be greater than event end date") schema = TicketSchema methods = ['POST', ]
{"golden_diff": "diff --git a/app/api/tickets.py b/app/api/tickets.py\n--- a/app/api/tickets.py\n+++ b/app/api/tickets.py\n@@ -48,14 +48,20 @@\n :param view_kwargs:\n :return:\n \"\"\"\n- if data.get('type') == 'paid' and data.get('event'):\n+ if data.get('event'):\n try:\n event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': data['event']}, \"Event does not exist\")\n- if not event.is_payment_enabled():\n- raise UnprocessableEntity(\n- {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n+\n+ if data.get('type') == 'paid':\n+ if not event.is_payment_enabled():\n+ raise UnprocessableEntity(\n+ {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n+\n+ if data.get('sales_ends_at') > event.ends_at:\n+ raise UnprocessableEntity({'sales_ends_at': '/data/attributes/sales-ends-at'},\n+ \"Ticket end date cannot be greater than event end date\")\n \n schema = TicketSchema\n methods = ['POST', ]\n", "issue": "Tickets can be created with end date greater than event end date\n**Describe the bug**\r\nTickets can be created with end date greater than event end date.\r\n\r\n\r\n**Expected behavior**\r\nThere should be a server-side check to check that this doesn't happen\r\n\r\n\r\n**Additional context**\r\n- [x] Taking this up\n", "before_files": [{"content": "from flask import request, current_app\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom flask_jwt_extended import current_user, verify_jwt_in_request\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.tickets import TicketSchema, TicketSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket, TicketTag, ticket_tags_table\nfrom app.models.event import Event\nfrom app.models.ticket_holder import TicketHolder\nfrom app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity\nfrom app.api.helpers.db import get_count\n\nclass TicketListPost(ResourceList):\n \"\"\"\n Create and List Tickets\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event']))\n\n if get_count(db.session.query(Ticket.id).filter_by(name=data['name'], event_id=int(data['event']),\n deleted_at=None)) > 0:\n raise ConflictException({'pointer': '/data/attributes/name'}, \"Ticket already exists\")\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n before create method to check if paid ticket has a paymentMethod enabled\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('type') == 'paid' and data.get('event'):\n try:\n event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': data['event']}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n\n schema = TicketSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_create_object': before_create_object,\n 'before_post': before_post\n }}\n\n\nclass TicketList(ResourceList):\n \"\"\"\n List Tickets based on different params\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('ticket_tag_id') or view_kwargs.get('access_code_id') or view_kwargs.get('order_identifier'):\n self.schema = TicketSchemaPublic\n\n def query(self, view_kwargs):\n \"\"\"\n query method for resource list\n :param view_kwargs:\n :return:\n \"\"\"\n\n if 'Authorization' in request.headers:\n verify_jwt_in_request()\n if current_user.is_super_admin or current_user.is_admin:\n query_ = self.session.query(Ticket)\n elif view_kwargs.get('event_id') and has_access('is_organizer', event_id=view_kwargs['event_id']):\n query_ = self.session.query(Ticket)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n\n if view_kwargs.get('ticket_tag_id'):\n ticket_tag = safe_query(self, TicketTag, 'id', view_kwargs['ticket_tag_id'], 'ticket_tag_id')\n query_ = query_.join(ticket_tags_table).filter_by(ticket_tag_id=ticket_tag.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('access_code_id'):\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n # access_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.access_codes.any(id=access_code.id))\n\n if view_kwargs.get('discount_code_id'):\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n # discount_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.discount_codes.any(id=discount_code.id))\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n ticket_ids = []\n for ticket in order.tickets:\n ticket_ids.append(ticket.id)\n query_ = query_.filter(Ticket.id.in_(tuple(ticket_ids)))\n\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"POST\",\n check=lambda a: a.get('event_id') or a.get('event_identifier')),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'query': query,\n }}\n\n\nclass TicketDetail(ResourceDetail):\n \"\"\"\n Ticket Resource\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id'):\n self.schema = TicketSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.ticket_id is not None:\n view_kwargs['id'] = attendee.ticket_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, ticket, data, view_kwargs):\n \"\"\"\n method to check if paid ticket has payment method before updating ticket object\n :param ticket:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if ticket.type == 'paid':\n try:\n event = db.session.query(Event).filter_by(id=ticket.event.id, deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': ticket.event.id}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': ticket.event.id}, \"Event having paid ticket must have a payment method\")\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object\n }}\n\n\nclass TicketRelationshipRequired(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n\n\nclass TicketRelationshipOptional(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Optional)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n", "path": "app/api/tickets.py"}], "after_files": [{"content": "from flask import request, current_app\nfrom flask_rest_jsonapi import ResourceDetail, ResourceList, ResourceRelationship\nfrom flask_rest_jsonapi.exceptions import ObjectNotFound\nfrom flask_jwt_extended import current_user, verify_jwt_in_request\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom app.api.bootstrap import api\nfrom app.api.helpers.db import safe_query\nfrom app.api.helpers.permission_manager import has_access\nfrom app.api.helpers.query import event_query\nfrom app.api.helpers.utilities import require_relationship\nfrom app.api.schema.tickets import TicketSchema, TicketSchemaPublic\nfrom app.models import db\nfrom app.models.access_code import AccessCode\nfrom app.models.discount_code import DiscountCode\nfrom app.models.order import Order\nfrom app.models.ticket import Ticket, TicketTag, ticket_tags_table\nfrom app.models.event import Event\nfrom app.models.ticket_holder import TicketHolder\nfrom app.api.helpers.exceptions import ConflictException, MethodNotAllowed, UnprocessableEntity\nfrom app.api.helpers.db import get_count\n\nclass TicketListPost(ResourceList):\n \"\"\"\n Create and List Tickets\n \"\"\"\n def before_post(self, args, kwargs, data):\n \"\"\"\n before post method to check for required relationship and proper permission\n :param args:\n :param kwargs:\n :param data:\n :return:\n \"\"\"\n require_relationship(['event'], data)\n if not has_access('is_coorganizer', event_id=data['event']):\n raise ObjectNotFound({'parameter': 'event_id'},\n \"Event: {} not found\".format(data['event']))\n\n if get_count(db.session.query(Ticket.id).filter_by(name=data['name'], event_id=int(data['event']),\n deleted_at=None)) > 0:\n raise ConflictException({'pointer': '/data/attributes/name'}, \"Ticket already exists\")\n\n def before_create_object(self, data, view_kwargs):\n \"\"\"\n before create method to check if paid ticket has a paymentMethod enabled\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if data.get('event'):\n try:\n event = db.session.query(Event).filter_by(id=data['event'], deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': data['event']}, \"Event does not exist\")\n\n if data.get('type') == 'paid':\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': data['event']}, \"Event having paid ticket must have a payment method\")\n\n if data.get('sales_ends_at') > event.ends_at:\n raise UnprocessableEntity({'sales_ends_at': '/data/attributes/sales-ends-at'},\n \"Ticket end date cannot be greater than event end date\")\n\n schema = TicketSchema\n methods = ['POST', ]\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_create_object': before_create_object,\n 'before_post': before_post\n }}\n\n\nclass TicketList(ResourceList):\n \"\"\"\n List Tickets based on different params\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('ticket_tag_id') or view_kwargs.get('access_code_id') or view_kwargs.get('order_identifier'):\n self.schema = TicketSchemaPublic\n\n def query(self, view_kwargs):\n \"\"\"\n query method for resource list\n :param view_kwargs:\n :return:\n \"\"\"\n\n if 'Authorization' in request.headers:\n verify_jwt_in_request()\n if current_user.is_super_admin or current_user.is_admin:\n query_ = self.session.query(Ticket)\n elif view_kwargs.get('event_id') and has_access('is_organizer', event_id=view_kwargs['event_id']):\n query_ = self.session.query(Ticket)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n else:\n query_ = self.session.query(Ticket).filter_by(is_hidden=False)\n\n if view_kwargs.get('ticket_tag_id'):\n ticket_tag = safe_query(self, TicketTag, 'id', view_kwargs['ticket_tag_id'], 'ticket_tag_id')\n query_ = query_.join(ticket_tags_table).filter_by(ticket_tag_id=ticket_tag.id)\n query_ = event_query(self, query_, view_kwargs)\n if view_kwargs.get('access_code_id'):\n access_code = safe_query(self, AccessCode, 'id', view_kwargs['access_code_id'], 'access_code_id')\n # access_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.access_codes.any(id=access_code.id))\n\n if view_kwargs.get('discount_code_id'):\n discount_code = safe_query(self, DiscountCode, 'id', view_kwargs['discount_code_id'], 'discount_code_id')\n # discount_code - ticket :: many-to-many relationship\n query_ = Ticket.query.filter(Ticket.discount_codes.any(id=discount_code.id))\n\n if view_kwargs.get('order_identifier'):\n order = safe_query(self, Order, 'identifier', view_kwargs['order_identifier'], 'order_identifier')\n ticket_ids = []\n for ticket in order.tickets:\n ticket_ids.append(ticket.id)\n query_ = query_.filter(Ticket.id.in_(tuple(ticket_ids)))\n\n return query_\n\n view_kwargs = True\n methods = ['GET', ]\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"POST\",\n check=lambda a: a.get('event_id') or a.get('event_identifier')),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'query': query,\n }}\n\n\nclass TicketDetail(ResourceDetail):\n \"\"\"\n Ticket Resource\n \"\"\"\n def before_get(self, args, view_kwargs):\n \"\"\"\n before get method to get the resource id for assigning schema\n :param args:\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id'):\n self.schema = TicketSchemaPublic\n\n def before_get_object(self, view_kwargs):\n \"\"\"\n before get object method to get the resource id for fetching details\n :param view_kwargs:\n :return:\n \"\"\"\n if view_kwargs.get('attendee_id') is not None:\n attendee = safe_query(self, TicketHolder, 'id', view_kwargs['attendee_id'], 'attendee_id')\n if attendee.ticket_id is not None:\n view_kwargs['id'] = attendee.ticket_id\n else:\n view_kwargs['id'] = None\n\n def before_update_object(self, ticket, data, view_kwargs):\n \"\"\"\n method to check if paid ticket has payment method before updating ticket object\n :param ticket:\n :param data:\n :param view_kwargs:\n :return:\n \"\"\"\n if ticket.type == 'paid':\n try:\n event = db.session.query(Event).filter_by(id=ticket.event.id, deleted_at=None).one()\n except NoResultFound:\n raise UnprocessableEntity({'event_id': ticket.event.id}, \"Event does not exist\")\n if not event.is_payment_enabled():\n raise UnprocessableEntity(\n {'event_id': ticket.event.id}, \"Event having paid ticket must have a payment method\")\n\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket,\n 'methods': {\n 'before_get_object': before_get_object,\n 'before_update_object': before_update_object\n }}\n\n\nclass TicketRelationshipRequired(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Required)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH\"),)\n methods = ['GET', 'PATCH']\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n\n\nclass TicketRelationshipOptional(ResourceRelationship):\n \"\"\"\n Tickets Relationship (Optional)\n \"\"\"\n decorators = (api.has_permission('is_coorganizer', fetch='event_id',\n fetch_as=\"event_id\", model=Ticket, methods=\"PATCH,DELETE\"),)\n schema = TicketSchema\n data_layer = {'session': db.session,\n 'model': Ticket}\n", "path": "app/api/tickets.py"}]}
2,628
288
gh_patches_debug_35583
rasdani/github-patches
git_diff
lightly-ai__lightly-431
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Error: number of embeddings does not match number of samples I got an error while using `lightly-magic` on a larger dataset. My gut feeling tells me that this could be related to the number of workers or the batch size. Let's debug and fix the issue :) I used the following command: ``` lightly-magic token='TOKEN' dataset_id='DATASET_ID' input_dir=/datasets/data trainer.max_epochs=40 loader.batch_size=256 loader.num_workers=12 ``` The dataset consists of `74202` images. ``` Epoch 39: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████| 289/289 [02:24<00:00, 2.00it/s, loss=4.66, v_num=0] Best model is stored at: /datasets/lightly_outputs/2021-06-07/07-44-35/lightly_epoch_9.ckpt ########## Starting to embed your dataset. Compute efficiency: 0.03: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 290/290 [00:31<00:00, 9.25it/s] Embeddings are stored at /datasets/lightly_outputs/2021-06-07/07-44-35/embeddings.csv ########## Starting to upload your dataset to the Lightly platform. Uploading images (with 12 workers). 0%|▏ | 99/74202 [00:09<1:31:21, 13.52imgs/s]/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly_utils/image_processing/metadata.py:53: RuntimeWarning: divide by zero encountered in double_scalars return float(np.where(std == 0., 0, mean / std)) 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 74202/74202 [3:47:52<00:00, 4.35imgs/s]Finished the upload of the dataset. Starting upload of embeddings. Traceback (most recent call last): File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/lightly_cli.py", line 80, in lightly_cli return _lightly_cli(cfg) File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/lightly_cli.py", line 38, in _lightly_cli _upload_cli(cfg) File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/upload_cli.py", line 76, in _upload_cli path_to_embeddings_csv=path_to_embeddings, name=name File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/api/api_workflow_upload_embeddings.py", line 71, in upload_embeddings path_to_embeddings_csv=path_to_embeddings_csv) File "/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/api/api_workflow_upload_embeddings.py", line 104, in _order_csv_by_filenames raise ValueError(f'There are {len(filenames)} rows in the embedding file, but ' ValueError: There are 74202 rows in the embedding file, but 74208 filenames/samples on the server. Set the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace. 100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 74202/74202 [3:48:50<00:00, 5.40imgs/s] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lightly/api/api_workflow_upload_dataset.py` Content: ``` 1 import os 2 import warnings 3 from concurrent.futures.thread import ThreadPoolExecutor 4 from typing import Union 5 6 import lightly_utils.image_processing 7 import tqdm 8 9 from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls 10 from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest 11 from lightly.api.utils import check_filename, PIL_to_bytes 12 from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest 13 from lightly.data.dataset import LightlyDataset 14 15 from lightly.api.utils import retry 16 17 from lightly_utils import image_processing 18 19 20 21 class _UploadDatasetMixin: 22 23 def upload_dataset(self, input: Union[str, LightlyDataset], max_workers: int = 8, 24 mode: str = 'thumbnails', verbose: bool = True): 25 """Uploads a dataset to to the Lightly cloud solution. 26 27 Args: 28 input: 29 one of the following: 30 - the path to the dataset, e.g. "path/to/dataset" 31 - the dataset in form of a LightlyDataset 32 max_workers: 33 Maximum number of workers uploading images in parallel. 34 max_requests: 35 Maximum number of requests a single worker can do before he has 36 to wait for the others. 37 mode: 38 One of [full, thumbnails, metadata]. Whether to upload thumbnails, 39 full images, or metadata only. 40 41 Raises: 42 ValueError if dataset is too large or input has the wrong type 43 RuntimeError if the connection to the server failed. 44 45 """ 46 no_tags_on_server = len(self._get_all_tags()) 47 if no_tags_on_server > 0: 48 warnings.warn(f"Dataset with id {self.dataset_id} has already been completely uploaded to the platform. Skipping upload.") 49 return 50 51 # Check input variable 'input' 52 if isinstance(input, str): 53 dataset = LightlyDataset(input_dir=input) 54 elif isinstance(input, LightlyDataset): 55 dataset = input 56 else: 57 raise ValueError(f"input must either be a LightlyDataset or the path to the dataset as str, " 58 f"but is of type {type(input)}") 59 60 # check the allowed dataset size 61 max_dataset_size_str = self.quota_api.get_quota_maximum_dataset_size() 62 max_dataset_size = int(max_dataset_size_str) 63 if len(dataset) > max_dataset_size: 64 msg = f'Your dataset has {len(dataset)} samples which' 65 msg += f' is more than the allowed maximum of {max_dataset_size}' 66 raise ValueError(msg) 67 68 # handle the case where len(dataset) < max_workers 69 max_workers = min(len(dataset), max_workers) 70 max_workers = max(max_workers, 1) 71 72 # upload the samples 73 if verbose: 74 print(f'Uploading images (with {max_workers} workers).', flush=True) 75 76 pbar = tqdm.tqdm(unit='imgs', total=len(dataset)) 77 tqdm_lock = tqdm.tqdm.get_lock() 78 79 # calculate the files size more efficiently 80 lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0 81 82 # define lambda function for concurrent upload 83 def lambda_(i): 84 # load image 85 image, label, filename = dataset[i] 86 filepath = dataset.get_filepath_from_filename(filename, image) 87 # try to upload image 88 try: 89 self._upload_single_image( 90 image=image, 91 label=label, 92 filename=filename, 93 filepath=filepath, 94 mode=mode, 95 ) 96 success = True 97 except Exception as e: 98 warnings.warn(f"Upload of image {filename} failed with error {e}") 99 success = False 100 101 # update the progress bar 102 tqdm_lock.acquire() # lock 103 pbar.update(1) # update 104 tqdm_lock.release() # unlock 105 # return whether the upload was successful 106 return success 107 108 with ThreadPoolExecutor(max_workers=max_workers) as executor: 109 results = list(executor.map( 110 lambda_, [i for i in range(len(dataset))], chunksize=1)) 111 112 if not all(results): 113 msg = 'Warning: Unsuccessful upload(s)! ' 114 msg += 'This could cause problems when uploading embeddings.' 115 msg += 'Failed at image: {}'.format(results.index(False)) 116 warnings.warn(msg) 117 118 # set image type of data and create initial tag 119 if mode == 'full': 120 img_type = 'full' 121 elif mode == 'thumbnails': 122 img_type = 'thumbnail' 123 else: 124 img_type = 'meta' 125 126 initial_tag_create_request = InitialTagCreateRequest(img_type=img_type, creator=TagCreator.USER_PIP) 127 self.tags_api.create_initial_tag_by_dataset_id(body=initial_tag_create_request, dataset_id=self.dataset_id) 128 129 def _upload_single_image(self, image, label, filename: str, filepath: str, mode): 130 """Uploads a single image to the Lightly platform. 131 132 """ 133 self.samples_api: SamplesApi 134 135 # check whether the filename is too long 136 basename = filename 137 if not check_filename(basename): 138 msg = (f'Filename {basename} is longer than the allowed maximum of ' 139 'characters and will be skipped.') 140 warnings.warn(msg) 141 return False 142 143 # calculate metadata, and check if corrupted 144 metadata = image_processing.Metadata(image).to_dict() 145 metadata["sizeInBytes"] = os.path.getsize(filepath) 146 147 # try to get exif data 148 try: 149 exifdata = image_processing.Exifdata(image) 150 except Exception: 151 exifdata = None 152 153 # generate thumbnail if necessary 154 thumbname = None 155 if not metadata['is_corrupted'] and mode in ["thumbnails", "full"]: 156 thumbname = '.'.join(basename.split('.')[:-1]) + '_thumb.webp' 157 158 body = SampleCreateRequest( 159 file_name=basename, 160 thumb_name=thumbname, 161 meta_data=metadata, 162 exif=exifdata if exifdata is None else exifdata.to_dict(), 163 ) 164 sample_id = retry( 165 self.samples_api.create_sample_by_dataset_id, 166 body=body, 167 dataset_id=self.dataset_id 168 ).id 169 170 if not metadata['is_corrupted'] and mode in ["thumbnails", "full"]: 171 172 def upload_thumbnail(image, signed_url): 173 thumbnail = image_processing.Thumbnail(image) 174 image_to_upload = thumbnail.to_bytes() 175 retry( 176 self.upload_file_with_signed_url, 177 image_to_upload, 178 signed_url 179 ) 180 thumbnail.thumbnail.close() 181 182 def upload_full_image(filepath, signed_url): 183 with open(filepath, 'rb') as image_to_upload: 184 retry( 185 self.upload_file_with_signed_url, 186 image_to_upload, 187 signed_url 188 ) 189 190 if mode == "thumbnails": 191 thumbnail_url = retry( 192 self.samples_api.get_sample_image_write_url_by_id, 193 dataset_id=self.dataset_id, 194 sample_id=sample_id, 195 is_thumbnail=True 196 ) 197 upload_thumbnail(image, thumbnail_url) 198 elif mode == "full": 199 sample_write_urls: SampleWriteUrls = retry( 200 self.samples_api.get_sample_image_write_urls_by_id, dataset_id=self.dataset_id, sample_id=sample_id 201 ) 202 upload_thumbnail(image, sample_write_urls.thumb) 203 upload_full_image(filepath, sample_write_urls.full) 204 205 206 207 image.close() 208 209 210 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lightly/api/api_workflow_upload_dataset.py b/lightly/api/api_workflow_upload_dataset.py --- a/lightly/api/api_workflow_upload_dataset.py +++ b/lightly/api/api_workflow_upload_dataset.py @@ -1,12 +1,12 @@ import os import warnings from concurrent.futures.thread import ThreadPoolExecutor -from typing import Union +from typing import Union, List import lightly_utils.image_processing import tqdm -from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls +from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls, SampleData from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest from lightly.api.utils import check_filename, PIL_to_bytes from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest @@ -73,16 +73,29 @@ if verbose: print(f'Uploading images (with {max_workers} workers).', flush=True) - pbar = tqdm.tqdm(unit='imgs', total=len(dataset)) - tqdm_lock = tqdm.tqdm.get_lock() # calculate the files size more efficiently lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0 + # get the filenames of the samples already on the server + self.samples_api: SamplesApi + samples: List[SampleData] = self.samples_api.get_samples_by_dataset_id(dataset_id=self.dataset_id) + filenames = [sample.file_name for sample in samples] + if len(filenames) > 0: + print(f"Found {len(filenames)} images already on the server, they are skipped during the upload.") + filenames_set = set(filenames) + + pbar = tqdm.tqdm(unit='imgs', total=len(dataset)-len(filenames)) + tqdm_lock = tqdm.tqdm.get_lock() + # define lambda function for concurrent upload def lambda_(i): # load image image, label, filename = dataset[i] + if filename in filenames_set: + # the sample was already uploaded + return True + filepath = dataset.get_filepath_from_filename(filename, image) # try to upload image try:
{"golden_diff": "diff --git a/lightly/api/api_workflow_upload_dataset.py b/lightly/api/api_workflow_upload_dataset.py\n--- a/lightly/api/api_workflow_upload_dataset.py\n+++ b/lightly/api/api_workflow_upload_dataset.py\n@@ -1,12 +1,12 @@\n import os\n import warnings\n from concurrent.futures.thread import ThreadPoolExecutor\n-from typing import Union\n+from typing import Union, List\n \n import lightly_utils.image_processing\n import tqdm\n \n-from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls\n+from lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls, SampleData\n from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest\n from lightly.api.utils import check_filename, PIL_to_bytes\n from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest\n@@ -73,16 +73,29 @@\n if verbose:\n print(f'Uploading images (with {max_workers} workers).', flush=True)\n \n- pbar = tqdm.tqdm(unit='imgs', total=len(dataset))\n- tqdm_lock = tqdm.tqdm.get_lock()\n \n # calculate the files size more efficiently\n lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0\n \n+ # get the filenames of the samples already on the server\n+ self.samples_api: SamplesApi\n+ samples: List[SampleData] = self.samples_api.get_samples_by_dataset_id(dataset_id=self.dataset_id)\n+ filenames = [sample.file_name for sample in samples]\n+ if len(filenames) > 0:\n+ print(f\"Found {len(filenames)} images already on the server, they are skipped during the upload.\")\n+ filenames_set = set(filenames)\n+\n+ pbar = tqdm.tqdm(unit='imgs', total=len(dataset)-len(filenames))\n+ tqdm_lock = tqdm.tqdm.get_lock()\n+\n # define lambda function for concurrent upload\n def lambda_(i):\n # load image\n image, label, filename = dataset[i]\n+ if filename in filenames_set:\n+ # the sample was already uploaded\n+ return True\n+\n filepath = dataset.get_filepath_from_filename(filename, image)\n # try to upload image\n try:\n", "issue": "Error: number of embeddings does not match number of samples\nI got an error while using `lightly-magic` on a larger dataset.\r\nMy gut feeling tells me that this could be related to the number of workers or the batch size. Let's debug and fix the issue :)\r\n\r\nI used the following command:\r\n```\r\nlightly-magic token='TOKEN' dataset_id='DATASET_ID' input_dir=/datasets/data trainer.max_epochs=40 loader.batch_size=256 loader.num_workers=12\r\n```\r\n\r\nThe dataset consists of `74202` images.\r\n\r\n```\r\nEpoch 39: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 289/289 [02:24<00:00, 2.00it/s, loss=4.66, v_num=0]\r\nBest model is stored at: /datasets/lightly_outputs/2021-06-07/07-44-35/lightly_epoch_9.ckpt \r\n########## Starting to embed your dataset.\r\nCompute efficiency: 0.03: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 290/290 [00:31<00:00, 9.25it/s]\r\nEmbeddings are stored at /datasets/lightly_outputs/2021-06-07/07-44-35/embeddings.csv \r\n########## Starting to upload your dataset to the Lightly platform.\r\nUploading images (with 12 workers).\r\n 0%|\u258f | 99/74202 [00:09<1:31:21, 13.52imgs/s]/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly_utils/image_processing/metadata.py:53: RuntimeWarning: divide by zero encountered in double_scalars \r\n return float(np.where(std == 0., 0, mean / std))\r\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 74202/74202 [3:47:52<00:00, 4.35imgs/s]Finished the upload of the dataset.\r\nStarting upload of embeddings.\r\nTraceback (most recent call last):\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/lightly_cli.py\", line 80, in lightly_cli \r\n return _lightly_cli(cfg)\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/lightly_cli.py\", line 38, in _lightly_cli \r\n _upload_cli(cfg)\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/cli/upload_cli.py\", line 76, in _upload_cli \r\n path_to_embeddings_csv=path_to_embeddings, name=name\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/api/api_workflow_upload_embeddings.py\", line 71, in upload_embeddings \r\n path_to_embeddings_csv=path_to_embeddings_csv)\r\n File \"/opt/conda/envs/lightly/lib/python3.7/site-packages/lightly/api/api_workflow_upload_embeddings.py\", line 104, in _order_csv_by_filenames \r\n raise ValueError(f'There are {len(filenames)} rows in the embedding file, but '\r\nValueError: There are 74202 rows in the embedding file, but 74208 filenames/samples on the server. \r\n\r\nSet the environment variable HYDRA_FULL_ERROR=1 for a complete stack trace.\r\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 74202/74202 [3:48:50<00:00, 5.40imgs/s]\r\n```\n", "before_files": [{"content": "import os\nimport warnings\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom typing import Union\n\nimport lightly_utils.image_processing\nimport tqdm\n\nfrom lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls\nfrom lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest\nfrom lightly.api.utils import check_filename, PIL_to_bytes\nfrom lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest\nfrom lightly.data.dataset import LightlyDataset\n\nfrom lightly.api.utils import retry\n\nfrom lightly_utils import image_processing\n\n\n\nclass _UploadDatasetMixin:\n\n def upload_dataset(self, input: Union[str, LightlyDataset], max_workers: int = 8,\n mode: str = 'thumbnails', verbose: bool = True):\n \"\"\"Uploads a dataset to to the Lightly cloud solution.\n\n Args:\n input:\n one of the following:\n - the path to the dataset, e.g. \"path/to/dataset\"\n - the dataset in form of a LightlyDataset\n max_workers:\n Maximum number of workers uploading images in parallel.\n max_requests:\n Maximum number of requests a single worker can do before he has\n to wait for the others.\n mode:\n One of [full, thumbnails, metadata]. Whether to upload thumbnails,\n full images, or metadata only.\n\n Raises:\n ValueError if dataset is too large or input has the wrong type\n RuntimeError if the connection to the server failed.\n\n \"\"\"\n no_tags_on_server = len(self._get_all_tags())\n if no_tags_on_server > 0:\n warnings.warn(f\"Dataset with id {self.dataset_id} has already been completely uploaded to the platform. Skipping upload.\")\n return\n\n # Check input variable 'input'\n if isinstance(input, str):\n dataset = LightlyDataset(input_dir=input)\n elif isinstance(input, LightlyDataset):\n dataset = input\n else:\n raise ValueError(f\"input must either be a LightlyDataset or the path to the dataset as str, \"\n f\"but is of type {type(input)}\")\n\n # check the allowed dataset size\n max_dataset_size_str = self.quota_api.get_quota_maximum_dataset_size()\n max_dataset_size = int(max_dataset_size_str)\n if len(dataset) > max_dataset_size:\n msg = f'Your dataset has {len(dataset)} samples which'\n msg += f' is more than the allowed maximum of {max_dataset_size}'\n raise ValueError(msg)\n\n # handle the case where len(dataset) < max_workers\n max_workers = min(len(dataset), max_workers)\n max_workers = max(max_workers, 1)\n\n # upload the samples\n if verbose:\n print(f'Uploading images (with {max_workers} workers).', flush=True)\n\n pbar = tqdm.tqdm(unit='imgs', total=len(dataset))\n tqdm_lock = tqdm.tqdm.get_lock()\n\n # calculate the files size more efficiently\n lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0\n\n # define lambda function for concurrent upload\n def lambda_(i):\n # load image\n image, label, filename = dataset[i]\n filepath = dataset.get_filepath_from_filename(filename, image)\n # try to upload image\n try:\n self._upload_single_image(\n image=image,\n label=label,\n filename=filename,\n filepath=filepath,\n mode=mode,\n )\n success = True\n except Exception as e:\n warnings.warn(f\"Upload of image {filename} failed with error {e}\")\n success = False\n\n # update the progress bar\n tqdm_lock.acquire() # lock\n pbar.update(1) # update\n tqdm_lock.release() # unlock\n # return whether the upload was successful\n return success\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n results = list(executor.map(\n lambda_, [i for i in range(len(dataset))], chunksize=1))\n\n if not all(results):\n msg = 'Warning: Unsuccessful upload(s)! '\n msg += 'This could cause problems when uploading embeddings.'\n msg += 'Failed at image: {}'.format(results.index(False))\n warnings.warn(msg)\n\n # set image type of data and create initial tag\n if mode == 'full':\n img_type = 'full'\n elif mode == 'thumbnails':\n img_type = 'thumbnail'\n else:\n img_type = 'meta'\n\n initial_tag_create_request = InitialTagCreateRequest(img_type=img_type, creator=TagCreator.USER_PIP)\n self.tags_api.create_initial_tag_by_dataset_id(body=initial_tag_create_request, dataset_id=self.dataset_id)\n\n def _upload_single_image(self, image, label, filename: str, filepath: str, mode):\n \"\"\"Uploads a single image to the Lightly platform.\n\n \"\"\"\n self.samples_api: SamplesApi\n\n # check whether the filename is too long\n basename = filename\n if not check_filename(basename):\n msg = (f'Filename {basename} is longer than the allowed maximum of '\n 'characters and will be skipped.')\n warnings.warn(msg)\n return False\n\n # calculate metadata, and check if corrupted\n metadata = image_processing.Metadata(image).to_dict()\n metadata[\"sizeInBytes\"] = os.path.getsize(filepath)\n\n # try to get exif data\n try:\n exifdata = image_processing.Exifdata(image)\n except Exception:\n exifdata = None\n\n # generate thumbnail if necessary\n thumbname = None\n if not metadata['is_corrupted'] and mode in [\"thumbnails\", \"full\"]:\n thumbname = '.'.join(basename.split('.')[:-1]) + '_thumb.webp'\n\n body = SampleCreateRequest(\n file_name=basename,\n thumb_name=thumbname,\n meta_data=metadata,\n exif=exifdata if exifdata is None else exifdata.to_dict(),\n )\n sample_id = retry(\n self.samples_api.create_sample_by_dataset_id,\n body=body,\n dataset_id=self.dataset_id\n ).id\n\n if not metadata['is_corrupted'] and mode in [\"thumbnails\", \"full\"]:\n\n def upload_thumbnail(image, signed_url):\n thumbnail = image_processing.Thumbnail(image)\n image_to_upload = thumbnail.to_bytes()\n retry(\n self.upload_file_with_signed_url,\n image_to_upload,\n signed_url\n )\n thumbnail.thumbnail.close()\n\n def upload_full_image(filepath, signed_url):\n with open(filepath, 'rb') as image_to_upload:\n retry(\n self.upload_file_with_signed_url,\n image_to_upload,\n signed_url\n )\n\n if mode == \"thumbnails\":\n thumbnail_url = retry(\n self.samples_api.get_sample_image_write_url_by_id,\n dataset_id=self.dataset_id,\n sample_id=sample_id,\n is_thumbnail=True\n )\n upload_thumbnail(image, thumbnail_url)\n elif mode == \"full\":\n sample_write_urls: SampleWriteUrls = retry(\n self.samples_api.get_sample_image_write_urls_by_id, dataset_id=self.dataset_id, sample_id=sample_id\n )\n upload_thumbnail(image, sample_write_urls.thumb)\n upload_full_image(filepath, sample_write_urls.full)\n\n\n\n image.close()\n\n\n", "path": "lightly/api/api_workflow_upload_dataset.py"}], "after_files": [{"content": "import os\nimport warnings\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom typing import Union, List\n\nimport lightly_utils.image_processing\nimport tqdm\n\nfrom lightly.openapi_generated.swagger_client import TagCreator, SamplesApi, SampleWriteUrls, SampleData\nfrom lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest\nfrom lightly.api.utils import check_filename, PIL_to_bytes\nfrom lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest\nfrom lightly.data.dataset import LightlyDataset\n\nfrom lightly.api.utils import retry\n\nfrom lightly_utils import image_processing\n\n\n\nclass _UploadDatasetMixin:\n\n def upload_dataset(self, input: Union[str, LightlyDataset], max_workers: int = 8,\n mode: str = 'thumbnails', verbose: bool = True):\n \"\"\"Uploads a dataset to to the Lightly cloud solution.\n\n Args:\n input:\n one of the following:\n - the path to the dataset, e.g. \"path/to/dataset\"\n - the dataset in form of a LightlyDataset\n max_workers:\n Maximum number of workers uploading images in parallel.\n max_requests:\n Maximum number of requests a single worker can do before he has\n to wait for the others.\n mode:\n One of [full, thumbnails, metadata]. Whether to upload thumbnails,\n full images, or metadata only.\n\n Raises:\n ValueError if dataset is too large or input has the wrong type\n RuntimeError if the connection to the server failed.\n\n \"\"\"\n no_tags_on_server = len(self._get_all_tags())\n if no_tags_on_server > 0:\n warnings.warn(f\"Dataset with id {self.dataset_id} has already been completely uploaded to the platform. Skipping upload.\")\n return\n\n # Check input variable 'input'\n if isinstance(input, str):\n dataset = LightlyDataset(input_dir=input)\n elif isinstance(input, LightlyDataset):\n dataset = input\n else:\n raise ValueError(f\"input must either be a LightlyDataset or the path to the dataset as str, \"\n f\"but is of type {type(input)}\")\n\n # check the allowed dataset size\n max_dataset_size_str = self.quota_api.get_quota_maximum_dataset_size()\n max_dataset_size = int(max_dataset_size_str)\n if len(dataset) > max_dataset_size:\n msg = f'Your dataset has {len(dataset)} samples which'\n msg += f' is more than the allowed maximum of {max_dataset_size}'\n raise ValueError(msg)\n\n # handle the case where len(dataset) < max_workers\n max_workers = min(len(dataset), max_workers)\n max_workers = max(max_workers, 1)\n\n # upload the samples\n if verbose:\n print(f'Uploading images (with {max_workers} workers).', flush=True)\n\n\n # calculate the files size more efficiently\n lightly_utils.image_processing.metadata._size_in_bytes = lambda img: 0\n\n # get the filenames of the samples already on the server\n self.samples_api: SamplesApi\n samples: List[SampleData] = self.samples_api.get_samples_by_dataset_id(dataset_id=self.dataset_id)\n filenames = [sample.file_name for sample in samples]\n if len(filenames) > 0:\n print(f\"Found {len(filenames)} images already on the server, they are skipped during the upload.\")\n filenames_set = set(filenames)\n\n pbar = tqdm.tqdm(unit='imgs', total=len(dataset)-len(filenames))\n tqdm_lock = tqdm.tqdm.get_lock()\n\n # define lambda function for concurrent upload\n def lambda_(i):\n # load image\n image, label, filename = dataset[i]\n if filename in filenames_set:\n # the sample was already uploaded\n return True\n\n filepath = dataset.get_filepath_from_filename(filename, image)\n # try to upload image\n try:\n self._upload_single_image(\n image=image,\n label=label,\n filename=filename,\n filepath=filepath,\n mode=mode,\n )\n success = True\n except Exception as e:\n warnings.warn(f\"Upload of image {filename} failed with error {e}\")\n success = False\n\n # update the progress bar\n tqdm_lock.acquire() # lock\n pbar.update(1) # update\n tqdm_lock.release() # unlock\n # return whether the upload was successful\n return success\n\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n results = list(executor.map(\n lambda_, [i for i in range(len(dataset))], chunksize=1))\n\n if not all(results):\n msg = 'Warning: Unsuccessful upload(s)! '\n msg += 'This could cause problems when uploading embeddings.'\n msg += 'Failed at image: {}'.format(results.index(False))\n warnings.warn(msg)\n\n # set image type of data and create initial tag\n if mode == 'full':\n img_type = 'full'\n elif mode == 'thumbnails':\n img_type = 'thumbnail'\n else:\n img_type = 'meta'\n\n initial_tag_create_request = InitialTagCreateRequest(img_type=img_type, creator=TagCreator.USER_PIP)\n self.tags_api.create_initial_tag_by_dataset_id(body=initial_tag_create_request, dataset_id=self.dataset_id)\n\n def _upload_single_image(self, image, label, filename: str, filepath: str, mode):\n \"\"\"Uploads a single image to the Lightly platform.\n\n \"\"\"\n self.samples_api: SamplesApi\n\n # check whether the filename is too long\n basename = filename\n if not check_filename(basename):\n msg = (f'Filename {basename} is longer than the allowed maximum of '\n 'characters and will be skipped.')\n warnings.warn(msg)\n return False\n\n # calculate metadata, and check if corrupted\n metadata = image_processing.Metadata(image).to_dict()\n metadata[\"sizeInBytes\"] = os.path.getsize(filepath)\n\n # try to get exif data\n try:\n exifdata = image_processing.Exifdata(image)\n except Exception:\n exifdata = None\n\n # generate thumbnail if necessary\n thumbname = None\n if not metadata['is_corrupted'] and mode in [\"thumbnails\", \"full\"]:\n thumbname = '.'.join(basename.split('.')[:-1]) + '_thumb.webp'\n\n body = SampleCreateRequest(\n file_name=basename,\n thumb_name=thumbname,\n meta_data=metadata,\n exif=exifdata if exifdata is None else exifdata.to_dict(),\n )\n sample_id = retry(\n self.samples_api.create_sample_by_dataset_id,\n body=body,\n dataset_id=self.dataset_id\n ).id\n\n if not metadata['is_corrupted'] and mode in [\"thumbnails\", \"full\"]:\n\n def upload_thumbnail(image, signed_url):\n thumbnail = image_processing.Thumbnail(image)\n image_to_upload = thumbnail.to_bytes()\n retry(\n self.upload_file_with_signed_url,\n image_to_upload,\n signed_url\n )\n thumbnail.thumbnail.close()\n\n def upload_full_image(filepath, signed_url):\n with open(filepath, 'rb') as image_to_upload:\n retry(\n self.upload_file_with_signed_url,\n image_to_upload,\n signed_url\n )\n\n if mode == \"thumbnails\":\n thumbnail_url = retry(\n self.samples_api.get_sample_image_write_url_by_id,\n dataset_id=self.dataset_id,\n sample_id=sample_id,\n is_thumbnail=True\n )\n upload_thumbnail(image, thumbnail_url)\n elif mode == \"full\":\n sample_write_urls: SampleWriteUrls = retry(\n self.samples_api.get_sample_image_write_urls_by_id, dataset_id=self.dataset_id, sample_id=sample_id\n )\n upload_thumbnail(image, sample_write_urls.thumb)\n upload_full_image(filepath, sample_write_urls.full)\n\n\n\n image.close()\n\n\n", "path": "lightly/api/api_workflow_upload_dataset.py"}]}
3,293
483
gh_patches_debug_1543
rasdani/github-patches
git_diff
ansible__molecule-659
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- If raw_env_vars contains a var whose value is not a string, molecule silently fails to execute Ansible # Issue Type - Bug report # Molecule and Ansible details ``` ansible --version 2.2.0.0 molecule --version 1.14.1.dev37 ``` Sha256 of commit is 8eed3f539f0b30b04d98f8ab418a2a5d55cce01f, molecule --version for source checkout seems to be stuck on 1.14 - Molecule installation method: source # Desired Behaviour There is no possible key value pair in the ``ansible: {raw_env_vars: {}}`` section that prevents Ansible from getting executed # Actual Behaviour (Bug report only) Except from molecule.yml: ``` raw_env_vars: RETRY_FILES_ENABLED: 0 ``` ``` molecule --debug syntax; echo $? --> Checking playbook's syntax... DEBUG: COMMAND /usr/bin/ansible-playbook tests/test.yml -vvvv --inventory-file=localhost, --syntax-check --limit=all --timeout=30 --diff 255 ``` ``` $ /usr/bin/ansible-playbook tests/test.yml -vvvv --inventory-file=localhost, --syntax-check --limit=all --timeout=30 --diff; echo $? Using $ROLE_DIR/ansible.cfg as config file Set default localhost to localhost 3 plays in tests/test.yml playbook: tests/test.yml 0 ``` The same happens with ``molecule converge``. This is especially bad since that one is really hard to debug, because there is no error message at all. I am not even sure if this is the fault of molecule, because [the doc to sh.pys `_env` arg only mentions it must be a dict, not that all values must be string types](https://amoffat.github.io/sh/sections/special_arguments.html?highlight=_env#env). --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `molecule/ansible_playbook.py` Content: ``` 1 # Copyright (c) 2015-2016 Cisco Systems, Inc. 2 # 3 # Permission is hereby granted, free of charge, to any person obtaining a copy 4 # of this software and associated documentation files (the "Software"), to 5 # deal in the Software without restriction, including without limitation the 6 # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 7 # sell copies of the Software, and to permit persons to whom the Software is 8 # furnished to do so, subject to the following conditions: 9 # 10 # The above copyright notice and this permission notice shall be included in 11 # all copies or substantial portions of the Software. 12 # 13 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 18 # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 19 # DEALINGS IN THE SOFTWARE. 20 21 import os 22 import sh 23 24 from molecule import util 25 26 27 class AnsiblePlaybook(object): 28 def __init__(self, 29 args, 30 connection_params, 31 raw_ansible_args=None, 32 _env=None, 33 _out=util.callback_info, 34 _err=util.callback_error, 35 debug=False): 36 """ 37 Sets up requirements for ansible-playbook and returns None. 38 39 :param args: A dict containing arguments to pass to ansible-playbook. 40 :param connection_params: A dict containing driver specific connection 41 params to pass to ansible-playbook. 42 :param _env: An optional environment to pass to underlying :func:`sh` 43 call. 44 :param _out: An optional function to process STDOUT for underlying 45 :func:`sh` call. 46 :param _err: An optional function to process STDERR for underlying 47 :func:`sh` call. 48 :param debug: An optional bool to toggle debug output. 49 :return: None 50 """ 51 self._playbook = None 52 self._ansible = None 53 self._cli = {} 54 self._cli_pos = [] 55 self._raw_ansible_args = raw_ansible_args 56 self._env = _env if _env else os.environ.copy() 57 self._debug = debug 58 59 for k, v in args.iteritems(): 60 self.parse_arg(k, v) 61 62 for k, v in connection_params.items(): 63 self.add_cli_arg(k, v) 64 65 self.add_env_arg('PYTHONUNBUFFERED', '1') 66 self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true') 67 68 self.add_cli_arg('_out', _out) 69 self.add_cli_arg('_err', _err) 70 71 @property 72 def env(self): 73 return self._env 74 75 def bake(self): 76 """ 77 Bake ansible-playbook command so it's ready to execute and returns 78 None. 79 80 :return: None 81 """ 82 self._ansible = sh.ansible_playbook.bake( 83 self._playbook, *self._cli_pos, _env=self._env, **self._cli) 84 if self._raw_ansible_args: 85 self._ansible = self._ansible.bake(self._raw_ansible_args) 86 87 def parse_arg(self, name, value): 88 """ 89 Adds argument to CLI or environment and returns None. 90 91 :param name: A string containing the name of argument to be added. 92 :param value: The value of argument to be added. 93 :return: None 94 """ 95 96 if name == 'raw_env_vars': 97 for k, v in value.iteritems(): 98 self.add_env_arg(k, v) 99 return 100 101 if name == 'host_key_checking': 102 self.add_env_arg('ANSIBLE_HOST_KEY_CHECKING', str(value).lower()) 103 return 104 105 if name == 'raw_ssh_args': 106 self.add_env_arg('ANSIBLE_SSH_ARGS', ' '.join(value)) 107 return 108 109 if name == 'config_file': 110 self.add_env_arg('ANSIBLE_CONFIG', value) 111 return 112 113 if name == 'playbook': 114 self._playbook = value 115 return 116 117 if name == 'host_vars' or name == 'group_vars': 118 return 119 120 # verbose is weird, must be -vvvv not verbose=vvvv 121 if name == 'verbose' and value: 122 # for cases where someone passes in verbose: True 123 if value is True: 124 value = 'vvvv' 125 self._cli_pos.append('-' + value) 126 return 127 128 self.add_cli_arg(name, value) 129 130 def add_cli_arg(self, name, value): 131 """ 132 Adds argument to CLI passed to ansible-playbook and returns None. 133 134 :param name: A string containing the name of argument to be added. 135 :param value: The value of argument to be added. 136 :return: None 137 """ 138 if value: 139 self._cli[name] = value 140 141 def remove_cli_arg(self, name): 142 """ 143 Removes CLI argument and returns None. 144 145 :param name: A string containing the name of argument to be removed. 146 :return: None 147 """ 148 self._cli.pop(name, None) 149 150 def add_env_arg(self, name, value): 151 """ 152 Adds argument to environment passed to ansible-playbook and returns 153 None. 154 155 :param name: A string containing the name of argument to be added. 156 :param value: The value of argument to be added. 157 :return: None 158 """ 159 self._env[name] = value 160 161 def remove_env_arg(self, name): 162 """ 163 Removes environment argument and returns None. 164 165 :param name: A string containing the name of argument to be removed. 166 :return: None 167 """ 168 self._env.pop(name, None) 169 170 def execute(self, hide_errors=False): 171 """ 172 Executes ansible-playbook and returns command's stdout. 173 174 :param hide_errors: An optional bool to toggle output of errors. 175 :return: The command's output, otherwise sys.exit on command failure. 176 """ 177 if self._ansible is None: 178 self.bake() 179 180 try: 181 return None, util.run_command( 182 self._ansible, debug=self._debug).stdout 183 except sh.ErrorReturnCode as e: 184 if not hide_errors: 185 util.print_error(str(e)) 186 187 return e.exit_code, None 188 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/molecule/ansible_playbook.py b/molecule/ansible_playbook.py --- a/molecule/ansible_playbook.py +++ b/molecule/ansible_playbook.py @@ -95,6 +95,8 @@ if name == 'raw_env_vars': for k, v in value.iteritems(): + if not isinstance(v, basestring): + v = unicode(v) self.add_env_arg(k, v) return
{"golden_diff": "diff --git a/molecule/ansible_playbook.py b/molecule/ansible_playbook.py\n--- a/molecule/ansible_playbook.py\n+++ b/molecule/ansible_playbook.py\n@@ -95,6 +95,8 @@\n \n if name == 'raw_env_vars':\n for k, v in value.iteritems():\n+ if not isinstance(v, basestring):\n+ v = unicode(v)\n self.add_env_arg(k, v)\n return\n", "issue": "If raw_env_vars contains a var whose value is not a string, molecule silently fails to execute Ansible\n# Issue Type\r\n\r\n- Bug report\r\n\r\n# Molecule and Ansible details\r\n\r\n```\r\nansible --version\r\n2.2.0.0\r\n\r\nmolecule --version\r\n\r\n1.14.1.dev37\r\n```\r\n\r\nSha256 of commit is 8eed3f539f0b30b04d98f8ab418a2a5d55cce01f, molecule --version for source checkout seems to be stuck on 1.14\r\n\r\n- Molecule installation method: source\r\n\r\n# Desired Behaviour\r\n\r\nThere is no possible key value pair in the ``ansible: {raw_env_vars: {}}`` section that prevents Ansible from getting executed\r\n\r\n\r\n# Actual Behaviour (Bug report only)\r\n\r\nExcept from molecule.yml:\r\n\r\n```\r\n raw_env_vars:\r\n RETRY_FILES_ENABLED: 0\r\n```\r\n\r\n```\r\nmolecule --debug syntax; echo $?\r\n--> Checking playbook's syntax...\r\nDEBUG: COMMAND\r\n/usr/bin/ansible-playbook tests/test.yml -vvvv --inventory-file=localhost, --syntax-check --limit=all --timeout=30 --diff\r\n255\r\n```\r\n\r\n```\r\n$ /usr/bin/ansible-playbook tests/test.yml -vvvv --inventory-file=localhost, --syntax-check --limit=all --timeout=30 --diff; echo $?\r\nUsing $ROLE_DIR/ansible.cfg as config file\r\nSet default localhost to localhost\r\n3 plays in tests/test.yml\r\n\r\nplaybook: tests/test.yml\r\n0\r\n```\r\nThe same happens with ``molecule converge``.\r\n\r\nThis is especially bad since that one is really hard to debug, because there is no error message at all.\r\n\r\nI am not even sure if this is the fault of molecule, because [the doc to sh.pys `_env` arg only mentions it must be a dict, not that all values must be string types](https://amoffat.github.io/sh/sections/special_arguments.html?highlight=_env#env).\n", "before_files": [{"content": "# Copyright (c) 2015-2016 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport sh\n\nfrom molecule import util\n\n\nclass AnsiblePlaybook(object):\n def __init__(self,\n args,\n connection_params,\n raw_ansible_args=None,\n _env=None,\n _out=util.callback_info,\n _err=util.callback_error,\n debug=False):\n \"\"\"\n Sets up requirements for ansible-playbook and returns None.\n\n :param args: A dict containing arguments to pass to ansible-playbook.\n :param connection_params: A dict containing driver specific connection\n params to pass to ansible-playbook.\n :param _env: An optional environment to pass to underlying :func:`sh`\n call.\n :param _out: An optional function to process STDOUT for underlying\n :func:`sh` call.\n :param _err: An optional function to process STDERR for underlying\n :func:`sh` call.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n self._playbook = None\n self._ansible = None\n self._cli = {}\n self._cli_pos = []\n self._raw_ansible_args = raw_ansible_args\n self._env = _env if _env else os.environ.copy()\n self._debug = debug\n\n for k, v in args.iteritems():\n self.parse_arg(k, v)\n\n for k, v in connection_params.items():\n self.add_cli_arg(k, v)\n\n self.add_env_arg('PYTHONUNBUFFERED', '1')\n self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true')\n\n self.add_cli_arg('_out', _out)\n self.add_cli_arg('_err', _err)\n\n @property\n def env(self):\n return self._env\n\n def bake(self):\n \"\"\"\n Bake ansible-playbook command so it's ready to execute and returns\n None.\n\n :return: None\n \"\"\"\n self._ansible = sh.ansible_playbook.bake(\n self._playbook, *self._cli_pos, _env=self._env, **self._cli)\n if self._raw_ansible_args:\n self._ansible = self._ansible.bake(self._raw_ansible_args)\n\n def parse_arg(self, name, value):\n \"\"\"\n Adds argument to CLI or environment and returns None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n\n if name == 'raw_env_vars':\n for k, v in value.iteritems():\n self.add_env_arg(k, v)\n return\n\n if name == 'host_key_checking':\n self.add_env_arg('ANSIBLE_HOST_KEY_CHECKING', str(value).lower())\n return\n\n if name == 'raw_ssh_args':\n self.add_env_arg('ANSIBLE_SSH_ARGS', ' '.join(value))\n return\n\n if name == 'config_file':\n self.add_env_arg('ANSIBLE_CONFIG', value)\n return\n\n if name == 'playbook':\n self._playbook = value\n return\n\n if name == 'host_vars' or name == 'group_vars':\n return\n\n # verbose is weird, must be -vvvv not verbose=vvvv\n if name == 'verbose' and value:\n # for cases where someone passes in verbose: True\n if value is True:\n value = 'vvvv'\n self._cli_pos.append('-' + value)\n return\n\n self.add_cli_arg(name, value)\n\n def add_cli_arg(self, name, value):\n \"\"\"\n Adds argument to CLI passed to ansible-playbook and returns None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n if value:\n self._cli[name] = value\n\n def remove_cli_arg(self, name):\n \"\"\"\n Removes CLI argument and returns None.\n\n :param name: A string containing the name of argument to be removed.\n :return: None\n \"\"\"\n self._cli.pop(name, None)\n\n def add_env_arg(self, name, value):\n \"\"\"\n Adds argument to environment passed to ansible-playbook and returns\n None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n self._env[name] = value\n\n def remove_env_arg(self, name):\n \"\"\"\n Removes environment argument and returns None.\n\n :param name: A string containing the name of argument to be removed.\n :return: None\n \"\"\"\n self._env.pop(name, None)\n\n def execute(self, hide_errors=False):\n \"\"\"\n Executes ansible-playbook and returns command's stdout.\n\n :param hide_errors: An optional bool to toggle output of errors.\n :return: The command's output, otherwise sys.exit on command failure.\n \"\"\"\n if self._ansible is None:\n self.bake()\n\n try:\n return None, util.run_command(\n self._ansible, debug=self._debug).stdout\n except sh.ErrorReturnCode as e:\n if not hide_errors:\n util.print_error(str(e))\n\n return e.exit_code, None\n", "path": "molecule/ansible_playbook.py"}], "after_files": [{"content": "# Copyright (c) 2015-2016 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport os\nimport sh\n\nfrom molecule import util\n\n\nclass AnsiblePlaybook(object):\n def __init__(self,\n args,\n connection_params,\n raw_ansible_args=None,\n _env=None,\n _out=util.callback_info,\n _err=util.callback_error,\n debug=False):\n \"\"\"\n Sets up requirements for ansible-playbook and returns None.\n\n :param args: A dict containing arguments to pass to ansible-playbook.\n :param connection_params: A dict containing driver specific connection\n params to pass to ansible-playbook.\n :param _env: An optional environment to pass to underlying :func:`sh`\n call.\n :param _out: An optional function to process STDOUT for underlying\n :func:`sh` call.\n :param _err: An optional function to process STDERR for underlying\n :func:`sh` call.\n :param debug: An optional bool to toggle debug output.\n :return: None\n \"\"\"\n self._playbook = None\n self._ansible = None\n self._cli = {}\n self._cli_pos = []\n self._raw_ansible_args = raw_ansible_args\n self._env = _env if _env else os.environ.copy()\n self._debug = debug\n\n for k, v in args.iteritems():\n self.parse_arg(k, v)\n\n for k, v in connection_params.items():\n self.add_cli_arg(k, v)\n\n self.add_env_arg('PYTHONUNBUFFERED', '1')\n self.add_env_arg('ANSIBLE_FORCE_COLOR', 'true')\n\n self.add_cli_arg('_out', _out)\n self.add_cli_arg('_err', _err)\n\n @property\n def env(self):\n return self._env\n\n def bake(self):\n \"\"\"\n Bake ansible-playbook command so it's ready to execute and returns\n None.\n\n :return: None\n \"\"\"\n self._ansible = sh.ansible_playbook.bake(\n self._playbook, *self._cli_pos, _env=self._env, **self._cli)\n if self._raw_ansible_args:\n self._ansible = self._ansible.bake(self._raw_ansible_args)\n\n def parse_arg(self, name, value):\n \"\"\"\n Adds argument to CLI or environment and returns None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n\n if name == 'raw_env_vars':\n for k, v in value.iteritems():\n if not isinstance(v, basestring):\n v = unicode(v)\n self.add_env_arg(k, v)\n return\n\n if name == 'host_key_checking':\n self.add_env_arg('ANSIBLE_HOST_KEY_CHECKING', str(value).lower())\n return\n\n if name == 'raw_ssh_args':\n self.add_env_arg('ANSIBLE_SSH_ARGS', ' '.join(value))\n return\n\n if name == 'config_file':\n self.add_env_arg('ANSIBLE_CONFIG', value)\n return\n\n if name == 'playbook':\n self._playbook = value\n return\n\n if name == 'host_vars' or name == 'group_vars':\n return\n\n # verbose is weird, must be -vvvv not verbose=vvvv\n if name == 'verbose' and value:\n # for cases where someone passes in verbose: True\n if value is True:\n value = 'vvvv'\n self._cli_pos.append('-' + value)\n return\n\n self.add_cli_arg(name, value)\n\n def add_cli_arg(self, name, value):\n \"\"\"\n Adds argument to CLI passed to ansible-playbook and returns None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n if value:\n self._cli[name] = value\n\n def remove_cli_arg(self, name):\n \"\"\"\n Removes CLI argument and returns None.\n\n :param name: A string containing the name of argument to be removed.\n :return: None\n \"\"\"\n self._cli.pop(name, None)\n\n def add_env_arg(self, name, value):\n \"\"\"\n Adds argument to environment passed to ansible-playbook and returns\n None.\n\n :param name: A string containing the name of argument to be added.\n :param value: The value of argument to be added.\n :return: None\n \"\"\"\n self._env[name] = value\n\n def remove_env_arg(self, name):\n \"\"\"\n Removes environment argument and returns None.\n\n :param name: A string containing the name of argument to be removed.\n :return: None\n \"\"\"\n self._env.pop(name, None)\n\n def execute(self, hide_errors=False):\n \"\"\"\n Executes ansible-playbook and returns command's stdout.\n\n :param hide_errors: An optional bool to toggle output of errors.\n :return: The command's output, otherwise sys.exit on command failure.\n \"\"\"\n if self._ansible is None:\n self.bake()\n\n try:\n return None, util.run_command(\n self._ansible, debug=self._debug).stdout\n except sh.ErrorReturnCode as e:\n if not hide_errors:\n util.print_error(str(e))\n\n return e.exit_code, None\n", "path": "molecule/ansible_playbook.py"}]}
2,561
100
gh_patches_debug_6408
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-5228
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [tensor] fix some unittests [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py` Content: ``` 1 #!/usr/bin/env python 2 # -*- encoding: utf-8 -*- 3 4 """ 5 Initialize new tokenizer for continual pre-training 6 """ 7 8 import argparse 9 import os 10 import json 11 from typing import List, Union 12 13 from transformers.models.llama.tokenization_llama import LlamaTokenizer 14 from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model 15 16 from colossalai.logging import get_dist_logger 17 18 os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" 19 20 logger = get_dist_logger() 21 22 23 def expand_vocab_tokenizer( 24 source_tokenizer_dir: Union[str, os.PathLike], target_tokenizer_dir: Union[str, os.PathLike], new_tokens: List[str] 25 ) -> None: 26 """Expand tokenizer for continue pre-training.""" 27 if os.path.exists(target_tokenizer_dir): 28 raise RuntimeError(f"Find existed directory {target_tokenizer_dir}") 29 30 source_tokenizer = LlamaTokenizer.from_pretrained(source_tokenizer_dir) 31 logger.info(source_tokenizer) 32 source_sp_processor = source_tokenizer.sp_model 33 source_spm = sp_pb2_model.ModelProto() 34 source_spm.ParseFromString(source_sp_processor.serialized_model_proto()) 35 36 logger.info(f"Source tokenizer size: {len(source_sp_processor)}") 37 38 # Add new tokens to source tokenizer. 39 source_spm_tokens = set([p.piece for p in source_spm.pieces]) 40 for piece in new_tokens: 41 assert isinstance(piece, str), f"Invalid token({piece}) type {type(piece)}" 42 if piece in source_spm_tokens: 43 # Skip existed token. 44 continue 45 new_p = sp_pb2_model.ModelProto().SentencePiece() 46 new_p.piece = piece 47 new_p.score = 0 48 source_spm.pieces.append(new_p) 49 logger.info(f"Expand vocab from {len(source_spm_tokens)} to {len(source_spm.pieces)}") 50 51 # Save 52 os.makedirs(target_tokenizer_dir) 53 target_tokenizer_model_path = os.path.join(target_tokenizer_dir, "tokenizer.model") 54 with open(file=target_tokenizer_model_path, mode="wb") as fp: 55 fp.write(source_spm.SerializeToString()) 56 57 target_tokenizer = LlamaTokenizer(vocab_file=target_tokenizer_model_path) 58 target_tokenizer.save_pretrained(save_directory=target_tokenizer_dir) 59 logger.info(f"Successfully save expand tokenizer to {target_tokenizer_dir}") 60 61 62 def main(): 63 parser = argparse.ArgumentParser() 64 parser.add_argument( 65 "--source_tokenizer_dir", type=str, required=True, default=None, help="Source tokenizer directory" 66 ) 67 parser.add_argument( 68 "--target_tokenizer_dir", type=str, required=True, default=None, help="Target tokenizer directory" 69 ) 70 parser.add_argument( 71 "--expand_tokens_file", 72 type=str, 73 required=True, 74 default=None, 75 help="Path of the file containing tokens to be extended", 76 ) 77 args = parser.parse_args() 78 79 expand_tokens = [] 80 with open(file=args.expand_tokens_file, mode="r", encoding="utf-8") as fp_reader: 81 for line in fp_reader: 82 item = json.loads(line) 83 # e.g., {"piece": "你好"} 84 token = item["piece"] 85 if token in expand_tokens: 86 continue 87 expand_tokens.append(token) 88 expand_tokens.sort(key=lambda t: len(t), reverse=False) 89 90 expand_vocab_tokenizer( 91 source_tokenizer_dir=args.source_tokenizer_dir, 92 target_tokenizer_dir=args.target_tokenizer_dir, 93 new_tokens=expand_tokens, 94 ) 95 96 97 if __name__ == "__main__": 98 main() 99 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py --- a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py +++ b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py @@ -6,12 +6,12 @@ """ import argparse -import os import json +import os from typing import List, Union -from transformers.models.llama.tokenization_llama import LlamaTokenizer from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model +from transformers.models.llama.tokenization_llama import LlamaTokenizer from colossalai.logging import get_dist_logger
{"golden_diff": "diff --git a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n--- a/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n+++ b/applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py\n@@ -6,12 +6,12 @@\n \"\"\"\n \n import argparse\n-import os\n import json\n+import os\n from typing import List, Union\n \n-from transformers.models.llama.tokenization_llama import LlamaTokenizer\n from sentencepiece import sentencepiece_model_pb2 as sp_pb2_model\n+from transformers.models.llama.tokenization_llama import LlamaTokenizer\n \n from colossalai.logging import get_dist_logger\n", "issue": "[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\nInitialize new tokenizer for continual pre-training\n\"\"\"\n\nimport argparse\nimport os\nimport json\nfrom typing import List, Union\n\nfrom transformers.models.llama.tokenization_llama import LlamaTokenizer\nfrom sentencepiece import sentencepiece_model_pb2 as sp_pb2_model\n\nfrom colossalai.logging import get_dist_logger\n\nos.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\"\n\nlogger = get_dist_logger()\n\n\ndef expand_vocab_tokenizer(\n source_tokenizer_dir: Union[str, os.PathLike], target_tokenizer_dir: Union[str, os.PathLike], new_tokens: List[str]\n) -> None:\n \"\"\"Expand tokenizer for continue pre-training.\"\"\"\n if os.path.exists(target_tokenizer_dir):\n raise RuntimeError(f\"Find existed directory {target_tokenizer_dir}\")\n\n source_tokenizer = LlamaTokenizer.from_pretrained(source_tokenizer_dir)\n logger.info(source_tokenizer)\n source_sp_processor = source_tokenizer.sp_model\n source_spm = sp_pb2_model.ModelProto()\n source_spm.ParseFromString(source_sp_processor.serialized_model_proto())\n\n logger.info(f\"Source tokenizer size: {len(source_sp_processor)}\")\n\n # Add new tokens to source tokenizer.\n source_spm_tokens = set([p.piece for p in source_spm.pieces])\n for piece in new_tokens:\n assert isinstance(piece, str), f\"Invalid token({piece}) type {type(piece)}\"\n if piece in source_spm_tokens:\n # Skip existed token.\n continue\n new_p = sp_pb2_model.ModelProto().SentencePiece()\n new_p.piece = piece\n new_p.score = 0\n source_spm.pieces.append(new_p)\n logger.info(f\"Expand vocab from {len(source_spm_tokens)} to {len(source_spm.pieces)}\")\n\n # Save\n os.makedirs(target_tokenizer_dir)\n target_tokenizer_model_path = os.path.join(target_tokenizer_dir, \"tokenizer.model\")\n with open(file=target_tokenizer_model_path, mode=\"wb\") as fp:\n fp.write(source_spm.SerializeToString())\n\n target_tokenizer = LlamaTokenizer(vocab_file=target_tokenizer_model_path)\n target_tokenizer.save_pretrained(save_directory=target_tokenizer_dir)\n logger.info(f\"Successfully save expand tokenizer to {target_tokenizer_dir}\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--source_tokenizer_dir\", type=str, required=True, default=None, help=\"Source tokenizer directory\"\n )\n parser.add_argument(\n \"--target_tokenizer_dir\", type=str, required=True, default=None, help=\"Target tokenizer directory\"\n )\n parser.add_argument(\n \"--expand_tokens_file\",\n type=str,\n required=True,\n default=None,\n help=\"Path of the file containing tokens to be extended\",\n )\n args = parser.parse_args()\n\n expand_tokens = []\n with open(file=args.expand_tokens_file, mode=\"r\", encoding=\"utf-8\") as fp_reader:\n for line in fp_reader:\n item = json.loads(line)\n # e.g., {\"piece\": \"\u4f60\u597d\"}\n token = item[\"piece\"]\n if token in expand_tokens:\n continue\n expand_tokens.append(token)\n expand_tokens.sort(key=lambda t: len(t), reverse=False)\n\n expand_vocab_tokenizer(\n source_tokenizer_dir=args.source_tokenizer_dir,\n target_tokenizer_dir=args.target_tokenizer_dir,\n new_tokens=expand_tokens,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\nInitialize new tokenizer for continual pre-training\n\"\"\"\n\nimport argparse\nimport json\nimport os\nfrom typing import List, Union\n\nfrom sentencepiece import sentencepiece_model_pb2 as sp_pb2_model\nfrom transformers.models.llama.tokenization_llama import LlamaTokenizer\n\nfrom colossalai.logging import get_dist_logger\n\nos.environ[\"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION\"] = \"python\"\n\nlogger = get_dist_logger()\n\n\ndef expand_vocab_tokenizer(\n source_tokenizer_dir: Union[str, os.PathLike], target_tokenizer_dir: Union[str, os.PathLike], new_tokens: List[str]\n) -> None:\n \"\"\"Expand tokenizer for continue pre-training.\"\"\"\n if os.path.exists(target_tokenizer_dir):\n raise RuntimeError(f\"Find existed directory {target_tokenizer_dir}\")\n\n source_tokenizer = LlamaTokenizer.from_pretrained(source_tokenizer_dir)\n logger.info(source_tokenizer)\n source_sp_processor = source_tokenizer.sp_model\n source_spm = sp_pb2_model.ModelProto()\n source_spm.ParseFromString(source_sp_processor.serialized_model_proto())\n\n logger.info(f\"Source tokenizer size: {len(source_sp_processor)}\")\n\n # Add new tokens to source tokenizer.\n source_spm_tokens = set([p.piece for p in source_spm.pieces])\n for piece in new_tokens:\n assert isinstance(piece, str), f\"Invalid token({piece}) type {type(piece)}\"\n if piece in source_spm_tokens:\n # Skip existed token.\n continue\n new_p = sp_pb2_model.ModelProto().SentencePiece()\n new_p.piece = piece\n new_p.score = 0\n source_spm.pieces.append(new_p)\n logger.info(f\"Expand vocab from {len(source_spm_tokens)} to {len(source_spm.pieces)}\")\n\n # Save\n os.makedirs(target_tokenizer_dir)\n target_tokenizer_model_path = os.path.join(target_tokenizer_dir, \"tokenizer.model\")\n with open(file=target_tokenizer_model_path, mode=\"wb\") as fp:\n fp.write(source_spm.SerializeToString())\n\n target_tokenizer = LlamaTokenizer(vocab_file=target_tokenizer_model_path)\n target_tokenizer.save_pretrained(save_directory=target_tokenizer_dir)\n logger.info(f\"Successfully save expand tokenizer to {target_tokenizer_dir}\")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--source_tokenizer_dir\", type=str, required=True, default=None, help=\"Source tokenizer directory\"\n )\n parser.add_argument(\n \"--target_tokenizer_dir\", type=str, required=True, default=None, help=\"Target tokenizer directory\"\n )\n parser.add_argument(\n \"--expand_tokens_file\",\n type=str,\n required=True,\n default=None,\n help=\"Path of the file containing tokens to be extended\",\n )\n args = parser.parse_args()\n\n expand_tokens = []\n with open(file=args.expand_tokens_file, mode=\"r\", encoding=\"utf-8\") as fp_reader:\n for line in fp_reader:\n item = json.loads(line)\n # e.g., {\"piece\": \"\u4f60\u597d\"}\n token = item[\"piece\"]\n if token in expand_tokens:\n continue\n expand_tokens.append(token)\n expand_tokens.sort(key=lambda t: len(t), reverse=False)\n\n expand_vocab_tokenizer(\n source_tokenizer_dir=args.source_tokenizer_dir,\n target_tokenizer_dir=args.target_tokenizer_dir,\n new_tokens=expand_tokens,\n )\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "applications/Colossal-LLaMA-2/colossal_llama2/tokenizer/init_tokenizer.py"}]}
1,249
191
gh_patches_debug_16830
rasdani/github-patches
git_diff
voicepaw__so-vits-svc-fork-45
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Prepare model for inference **Is your feature request related to a problem? Please describe.** The first time inference occurs takes the longest, where as the next inferences afterwards are faster. Since normally the first time is in the sounddevice callback, it's likely that audio will not be processed in time and will end up delayed. **Describe the solution you'd like** After loading the model, run an initial inference with some dummy data, perhaps torch.zeros of appropriate sizes. **Additional context** On my computer with a RTX 3050, the first time inference takes about 3 seconds to complete. Otherwise I get a Realtime coef of ~28 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/so_vits_svc_fork/inference_main.py` Content: ``` 1 from __future__ import annotations 2 3 from logging import getLogger 4 from pathlib import Path 5 from typing import Literal 6 7 import librosa 8 import numpy as np 9 import soundfile 10 import torch 11 12 from .inference.infer_tool import RealtimeVC, RealtimeVC2, Svc 13 14 LOG = getLogger(__name__) 15 16 17 def infer( 18 *, 19 # paths 20 input_path: Path | str, 21 output_path: Path | str, 22 model_path: Path | str, 23 config_path: Path | str, 24 # svc config 25 speaker: int | str, 26 cluster_model_path: Path | str | None = None, 27 transpose: int = 0, 28 auto_predict_f0: bool = False, 29 cluster_infer_ratio: float = 0, 30 noise_scale: float = 0.4, 31 f0_method: Literal["crepe", "parselmouth", "dio", "harvest"] = "crepe", 32 # slice config 33 db_thresh: int = -40, 34 pad_seconds: float = 0.5, 35 chunk_seconds: float = 0.5, 36 absolute_thresh: bool = False, 37 device: Literal["cpu", "cuda"] = "cuda" if torch.cuda.is_available() else "cpu", 38 ): 39 model_path = Path(model_path) 40 output_path = Path(output_path) 41 input_path = Path(input_path) 42 config_path = Path(config_path) 43 cluster_model_path = Path(cluster_model_path) if cluster_model_path else None 44 svc_model = Svc( 45 net_g_path=model_path.as_posix(), 46 config_path=config_path.as_posix(), 47 cluster_model_path=cluster_model_path.as_posix() 48 if cluster_model_path 49 else None, 50 device=device, 51 ) 52 53 audio, _ = librosa.load(input_path, sr=svc_model.target_sample) 54 audio = svc_model.infer_silence( 55 audio.astype(np.float32), 56 speaker=speaker, 57 transpose=transpose, 58 auto_predict_f0=auto_predict_f0, 59 cluster_infer_ratio=cluster_infer_ratio, 60 noise_scale=noise_scale, 61 f0_method=f0_method, 62 db_thresh=db_thresh, 63 pad_seconds=pad_seconds, 64 chunk_seconds=chunk_seconds, 65 absolute_thresh=absolute_thresh, 66 ) 67 68 soundfile.write(output_path, audio, svc_model.target_sample) 69 70 71 def realtime( 72 *, 73 # paths 74 model_path: Path | str, 75 config_path: Path | str, 76 # svc config 77 speaker: str, 78 cluster_model_path: Path | str | None = None, 79 transpose: int = 0, 80 auto_predict_f0: bool = False, 81 cluster_infer_ratio: float = 0, 82 noise_scale: float = 0.4, 83 f0_method: Literal["crepe", "parselmouth", "dio", "harvest"] = "crepe", 84 # slice config 85 db_thresh: int = -40, 86 pad_seconds: float = 0.5, 87 chunk_seconds: float = 0.5, 88 # realtime config 89 crossfade_seconds: float = 0.05, 90 block_seconds: float = 0.5, 91 version: int = 2, 92 input_device: int | str | None = None, 93 output_device: int | str | None = None, 94 device: Literal["cpu", "cuda"] = "cuda" if torch.cuda.is_available() else "cpu", 95 ): 96 import sounddevice as sd 97 98 model_path = Path(model_path) 99 config_path = Path(config_path) 100 cluster_model_path = Path(cluster_model_path) if cluster_model_path else None 101 svc_model = Svc( 102 net_g_path=model_path.as_posix(), 103 config_path=config_path.as_posix(), 104 cluster_model_path=cluster_model_path.as_posix() 105 if cluster_model_path 106 else None, 107 device=device, 108 ) 109 if version == 1: 110 model = RealtimeVC( 111 svc_model=svc_model, 112 crossfade_len=int(crossfade_seconds * svc_model.target_sample), 113 ) 114 else: 115 model = RealtimeVC2( 116 svc_model=svc_model, 117 ) 118 119 # LOG all device info 120 devices = sd.query_devices() 121 LOG.info(f"Device: {devices}") 122 if isinstance(input_device, str): 123 input_device_candidates = [ 124 i for i, d in enumerate(devices) if d["name"] == input_device 125 ] 126 if len(input_device_candidates) == 0: 127 LOG.warning(f"Input device {input_device} not found, using default") 128 input_device = None 129 else: 130 input_device = input_device_candidates[0] 131 if isinstance(output_device, str): 132 output_device_candidates = [ 133 i for i, d in enumerate(devices) if d["name"] == output_device 134 ] 135 if len(output_device_candidates) == 0: 136 LOG.warning(f"Output device {output_device} not found, using default") 137 output_device = None 138 else: 139 output_device = output_device_candidates[0] 140 if input_device is None or input_device >= len(devices): 141 input_device = sd.default.device[0] 142 if output_device is None or output_device >= len(devices): 143 output_device = sd.default.device[1] 144 LOG.info( 145 f"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}" 146 ) 147 148 def callback( 149 indata: np.ndarray, 150 outdata: np.ndarray, 151 frames: int, 152 time: int, 153 status: sd.CallbackFlags, 154 ) -> None: 155 LOG.debug( 156 f"Frames: {frames}, Status: {status}, Shape: {indata.shape}, Time: {time}" 157 ) 158 159 kwargs = dict( 160 input_audio=indata.mean(axis=1).astype(np.float32), 161 # svc config 162 speaker=speaker, 163 transpose=transpose, 164 auto_predict_f0=auto_predict_f0, 165 cluster_infer_ratio=cluster_infer_ratio, 166 noise_scale=noise_scale, 167 f0_method=f0_method, 168 # slice config 169 db_thresh=db_thresh, 170 # pad_seconds=pad_seconds, 171 chunk_seconds=chunk_seconds, 172 ) 173 if version == 1: 174 kwargs["pad_seconds"] = pad_seconds 175 outdata[:] = model.process( 176 **kwargs, 177 ).reshape(-1, 1) 178 179 with sd.Stream( 180 device=(input_device, output_device), 181 channels=1, 182 callback=callback, 183 samplerate=svc_model.target_sample, 184 blocksize=int(block_seconds * svc_model.target_sample), 185 ): 186 while True: 187 sd.sleep(1) 188 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/so_vits_svc_fork/inference_main.py b/src/so_vits_svc_fork/inference_main.py --- a/src/so_vits_svc_fork/inference_main.py +++ b/src/so_vits_svc_fork/inference_main.py @@ -145,6 +145,21 @@ f"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}" ) + # the model realtime coef is somewhat significantly low only in the first inference + # there could be no better way to warm up the model than to do a dummy inference + # (there are not differences in the behavior of the model between the first and the later inferences) + # so we do a dummy inference to warm up the model (1 second of audio) + LOG.info("Warming up the model...") + svc_model.infer( + speaker=speaker, + transpose=transpose, + auto_predict_f0=auto_predict_f0, + cluster_infer_ratio=cluster_infer_ratio, + noise_scale=noise_scale, + f0_method=f0_method, + audio=np.zeros(svc_model.target_sample, dtype=np.float32), + ) + def callback( indata: np.ndarray, outdata: np.ndarray,
{"golden_diff": "diff --git a/src/so_vits_svc_fork/inference_main.py b/src/so_vits_svc_fork/inference_main.py\n--- a/src/so_vits_svc_fork/inference_main.py\n+++ b/src/so_vits_svc_fork/inference_main.py\n@@ -145,6 +145,21 @@\n f\"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}\"\n )\n \n+ # the model realtime coef is somewhat significantly low only in the first inference\n+ # there could be no better way to warm up the model than to do a dummy inference\n+ # (there are not differences in the behavior of the model between the first and the later inferences)\n+ # so we do a dummy inference to warm up the model (1 second of audio)\n+ LOG.info(\"Warming up the model...\")\n+ svc_model.infer(\n+ speaker=speaker,\n+ transpose=transpose,\n+ auto_predict_f0=auto_predict_f0,\n+ cluster_infer_ratio=cluster_infer_ratio,\n+ noise_scale=noise_scale,\n+ f0_method=f0_method,\n+ audio=np.zeros(svc_model.target_sample, dtype=np.float32),\n+ )\n+\n def callback(\n indata: np.ndarray,\n outdata: np.ndarray,\n", "issue": "Prepare model for inference\n**Is your feature request related to a problem? Please describe.**\r\nThe first time inference occurs takes the longest, where as the next inferences afterwards are faster. Since normally the first time is in the sounddevice callback, it's likely that audio will not be processed in time and will end up delayed.\r\n\r\n**Describe the solution you'd like**\r\nAfter loading the model, run an initial inference with some dummy data, perhaps torch.zeros of appropriate sizes.\r\n\r\n**Additional context**\r\nOn my computer with a RTX 3050, the first time inference takes about 3 seconds to complete. Otherwise I get a Realtime coef of ~28\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Literal\n\nimport librosa\nimport numpy as np\nimport soundfile\nimport torch\n\nfrom .inference.infer_tool import RealtimeVC, RealtimeVC2, Svc\n\nLOG = getLogger(__name__)\n\n\ndef infer(\n *,\n # paths\n input_path: Path | str,\n output_path: Path | str,\n model_path: Path | str,\n config_path: Path | str,\n # svc config\n speaker: int | str,\n cluster_model_path: Path | str | None = None,\n transpose: int = 0,\n auto_predict_f0: bool = False,\n cluster_infer_ratio: float = 0,\n noise_scale: float = 0.4,\n f0_method: Literal[\"crepe\", \"parselmouth\", \"dio\", \"harvest\"] = \"crepe\",\n # slice config\n db_thresh: int = -40,\n pad_seconds: float = 0.5,\n chunk_seconds: float = 0.5,\n absolute_thresh: bool = False,\n device: Literal[\"cpu\", \"cuda\"] = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n):\n model_path = Path(model_path)\n output_path = Path(output_path)\n input_path = Path(input_path)\n config_path = Path(config_path)\n cluster_model_path = Path(cluster_model_path) if cluster_model_path else None\n svc_model = Svc(\n net_g_path=model_path.as_posix(),\n config_path=config_path.as_posix(),\n cluster_model_path=cluster_model_path.as_posix()\n if cluster_model_path\n else None,\n device=device,\n )\n\n audio, _ = librosa.load(input_path, sr=svc_model.target_sample)\n audio = svc_model.infer_silence(\n audio.astype(np.float32),\n speaker=speaker,\n transpose=transpose,\n auto_predict_f0=auto_predict_f0,\n cluster_infer_ratio=cluster_infer_ratio,\n noise_scale=noise_scale,\n f0_method=f0_method,\n db_thresh=db_thresh,\n pad_seconds=pad_seconds,\n chunk_seconds=chunk_seconds,\n absolute_thresh=absolute_thresh,\n )\n\n soundfile.write(output_path, audio, svc_model.target_sample)\n\n\ndef realtime(\n *,\n # paths\n model_path: Path | str,\n config_path: Path | str,\n # svc config\n speaker: str,\n cluster_model_path: Path | str | None = None,\n transpose: int = 0,\n auto_predict_f0: bool = False,\n cluster_infer_ratio: float = 0,\n noise_scale: float = 0.4,\n f0_method: Literal[\"crepe\", \"parselmouth\", \"dio\", \"harvest\"] = \"crepe\",\n # slice config\n db_thresh: int = -40,\n pad_seconds: float = 0.5,\n chunk_seconds: float = 0.5,\n # realtime config\n crossfade_seconds: float = 0.05,\n block_seconds: float = 0.5,\n version: int = 2,\n input_device: int | str | None = None,\n output_device: int | str | None = None,\n device: Literal[\"cpu\", \"cuda\"] = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n):\n import sounddevice as sd\n\n model_path = Path(model_path)\n config_path = Path(config_path)\n cluster_model_path = Path(cluster_model_path) if cluster_model_path else None\n svc_model = Svc(\n net_g_path=model_path.as_posix(),\n config_path=config_path.as_posix(),\n cluster_model_path=cluster_model_path.as_posix()\n if cluster_model_path\n else None,\n device=device,\n )\n if version == 1:\n model = RealtimeVC(\n svc_model=svc_model,\n crossfade_len=int(crossfade_seconds * svc_model.target_sample),\n )\n else:\n model = RealtimeVC2(\n svc_model=svc_model,\n )\n\n # LOG all device info\n devices = sd.query_devices()\n LOG.info(f\"Device: {devices}\")\n if isinstance(input_device, str):\n input_device_candidates = [\n i for i, d in enumerate(devices) if d[\"name\"] == input_device\n ]\n if len(input_device_candidates) == 0:\n LOG.warning(f\"Input device {input_device} not found, using default\")\n input_device = None\n else:\n input_device = input_device_candidates[0]\n if isinstance(output_device, str):\n output_device_candidates = [\n i for i, d in enumerate(devices) if d[\"name\"] == output_device\n ]\n if len(output_device_candidates) == 0:\n LOG.warning(f\"Output device {output_device} not found, using default\")\n output_device = None\n else:\n output_device = output_device_candidates[0]\n if input_device is None or input_device >= len(devices):\n input_device = sd.default.device[0]\n if output_device is None or output_device >= len(devices):\n output_device = sd.default.device[1]\n LOG.info(\n f\"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}\"\n )\n\n def callback(\n indata: np.ndarray,\n outdata: np.ndarray,\n frames: int,\n time: int,\n status: sd.CallbackFlags,\n ) -> None:\n LOG.debug(\n f\"Frames: {frames}, Status: {status}, Shape: {indata.shape}, Time: {time}\"\n )\n\n kwargs = dict(\n input_audio=indata.mean(axis=1).astype(np.float32),\n # svc config\n speaker=speaker,\n transpose=transpose,\n auto_predict_f0=auto_predict_f0,\n cluster_infer_ratio=cluster_infer_ratio,\n noise_scale=noise_scale,\n f0_method=f0_method,\n # slice config\n db_thresh=db_thresh,\n # pad_seconds=pad_seconds,\n chunk_seconds=chunk_seconds,\n )\n if version == 1:\n kwargs[\"pad_seconds\"] = pad_seconds\n outdata[:] = model.process(\n **kwargs,\n ).reshape(-1, 1)\n\n with sd.Stream(\n device=(input_device, output_device),\n channels=1,\n callback=callback,\n samplerate=svc_model.target_sample,\n blocksize=int(block_seconds * svc_model.target_sample),\n ):\n while True:\n sd.sleep(1)\n", "path": "src/so_vits_svc_fork/inference_main.py"}], "after_files": [{"content": "from __future__ import annotations\n\nfrom logging import getLogger\nfrom pathlib import Path\nfrom typing import Literal\n\nimport librosa\nimport numpy as np\nimport soundfile\nimport torch\n\nfrom .inference.infer_tool import RealtimeVC, RealtimeVC2, Svc\n\nLOG = getLogger(__name__)\n\n\ndef infer(\n *,\n # paths\n input_path: Path | str,\n output_path: Path | str,\n model_path: Path | str,\n config_path: Path | str,\n # svc config\n speaker: int | str,\n cluster_model_path: Path | str | None = None,\n transpose: int = 0,\n auto_predict_f0: bool = False,\n cluster_infer_ratio: float = 0,\n noise_scale: float = 0.4,\n f0_method: Literal[\"crepe\", \"parselmouth\", \"dio\", \"harvest\"] = \"crepe\",\n # slice config\n db_thresh: int = -40,\n pad_seconds: float = 0.5,\n chunk_seconds: float = 0.5,\n absolute_thresh: bool = False,\n device: Literal[\"cpu\", \"cuda\"] = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n):\n model_path = Path(model_path)\n output_path = Path(output_path)\n input_path = Path(input_path)\n config_path = Path(config_path)\n cluster_model_path = Path(cluster_model_path) if cluster_model_path else None\n svc_model = Svc(\n net_g_path=model_path.as_posix(),\n config_path=config_path.as_posix(),\n cluster_model_path=cluster_model_path.as_posix()\n if cluster_model_path\n else None,\n device=device,\n )\n\n audio, _ = librosa.load(input_path, sr=svc_model.target_sample)\n audio = svc_model.infer_silence(\n audio.astype(np.float32),\n speaker=speaker,\n transpose=transpose,\n auto_predict_f0=auto_predict_f0,\n cluster_infer_ratio=cluster_infer_ratio,\n noise_scale=noise_scale,\n f0_method=f0_method,\n db_thresh=db_thresh,\n pad_seconds=pad_seconds,\n chunk_seconds=chunk_seconds,\n absolute_thresh=absolute_thresh,\n )\n\n soundfile.write(output_path, audio, svc_model.target_sample)\n\n\ndef realtime(\n *,\n # paths\n model_path: Path | str,\n config_path: Path | str,\n # svc config\n speaker: str,\n cluster_model_path: Path | str | None = None,\n transpose: int = 0,\n auto_predict_f0: bool = False,\n cluster_infer_ratio: float = 0,\n noise_scale: float = 0.4,\n f0_method: Literal[\"crepe\", \"parselmouth\", \"dio\", \"harvest\"] = \"crepe\",\n # slice config\n db_thresh: int = -40,\n pad_seconds: float = 0.5,\n chunk_seconds: float = 0.5,\n # realtime config\n crossfade_seconds: float = 0.05,\n block_seconds: float = 0.5,\n version: int = 2,\n input_device: int | str | None = None,\n output_device: int | str | None = None,\n device: Literal[\"cpu\", \"cuda\"] = \"cuda\" if torch.cuda.is_available() else \"cpu\",\n):\n import sounddevice as sd\n\n model_path = Path(model_path)\n config_path = Path(config_path)\n cluster_model_path = Path(cluster_model_path) if cluster_model_path else None\n svc_model = Svc(\n net_g_path=model_path.as_posix(),\n config_path=config_path.as_posix(),\n cluster_model_path=cluster_model_path.as_posix()\n if cluster_model_path\n else None,\n device=device,\n )\n if version == 1:\n model = RealtimeVC(\n svc_model=svc_model,\n crossfade_len=int(crossfade_seconds * svc_model.target_sample),\n )\n else:\n model = RealtimeVC2(\n svc_model=svc_model,\n )\n\n # LOG all device info\n devices = sd.query_devices()\n LOG.info(f\"Device: {devices}\")\n if isinstance(input_device, str):\n input_device_candidates = [\n i for i, d in enumerate(devices) if d[\"name\"] == input_device\n ]\n if len(input_device_candidates) == 0:\n LOG.warning(f\"Input device {input_device} not found, using default\")\n input_device = None\n else:\n input_device = input_device_candidates[0]\n if isinstance(output_device, str):\n output_device_candidates = [\n i for i, d in enumerate(devices) if d[\"name\"] == output_device\n ]\n if len(output_device_candidates) == 0:\n LOG.warning(f\"Output device {output_device} not found, using default\")\n output_device = None\n else:\n output_device = output_device_candidates[0]\n if input_device is None or input_device >= len(devices):\n input_device = sd.default.device[0]\n if output_device is None or output_device >= len(devices):\n output_device = sd.default.device[1]\n LOG.info(\n f\"Input Device: {devices[input_device]['name']}, Output Device: {devices[output_device]['name']}\"\n )\n\n # the model realtime coef is somewhat significantly low only in the first inference\n # there could be no better way to warm up the model than to do a dummy inference\n # (there are not differences in the behavior of the model between the first and the later inferences)\n # so we do a dummy inference to warm up the model (1 second of audio)\n LOG.info(\"Warming up the model...\")\n svc_model.infer(\n speaker=speaker,\n transpose=transpose,\n auto_predict_f0=auto_predict_f0,\n cluster_infer_ratio=cluster_infer_ratio,\n noise_scale=noise_scale,\n f0_method=f0_method,\n audio=np.zeros(svc_model.target_sample, dtype=np.float32),\n )\n\n def callback(\n indata: np.ndarray,\n outdata: np.ndarray,\n frames: int,\n time: int,\n status: sd.CallbackFlags,\n ) -> None:\n LOG.debug(\n f\"Frames: {frames}, Status: {status}, Shape: {indata.shape}, Time: {time}\"\n )\n\n kwargs = dict(\n input_audio=indata.mean(axis=1).astype(np.float32),\n # svc config\n speaker=speaker,\n transpose=transpose,\n auto_predict_f0=auto_predict_f0,\n cluster_infer_ratio=cluster_infer_ratio,\n noise_scale=noise_scale,\n f0_method=f0_method,\n # slice config\n db_thresh=db_thresh,\n # pad_seconds=pad_seconds,\n chunk_seconds=chunk_seconds,\n )\n if version == 1:\n kwargs[\"pad_seconds\"] = pad_seconds\n outdata[:] = model.process(\n **kwargs,\n ).reshape(-1, 1)\n\n with sd.Stream(\n device=(input_device, output_device),\n channels=1,\n callback=callback,\n samplerate=svc_model.target_sample,\n blocksize=int(block_seconds * svc_model.target_sample),\n ):\n while True:\n sd.sleep(1)\n", "path": "src/so_vits_svc_fork/inference_main.py"}]}
2,300
295
gh_patches_debug_7274
rasdani/github-patches
git_diff
cupy__cupy-186
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make cupy.sort support arrays with rank two or more. # Background Arrays sorted with `cupy.sort` operation have some properties such as dtype, rank, sorting axis and C/F-contiguousness. Currently, `cupy.sort` supports sorting arrays only with the rank of one because of its implementation reason, see #55. # Problem This issue addresses a problem that makes `cupy.sort` support sorting arrays with the rank of two or more, with the last axis and C-contiguousness. # Approach **Rank two** For an array with the rank of two, ``` [[4, 3] [2, 1]] ``` treating the array as flattened one, `[4, 3, 2 ,1]`, and providing the following comparator in pseudo code to underlying Thrust library: ``` if floor(i / 2) < floor(j / 2) then return true; else if floor(i / 2) > floor(j / 2) then return false; else return data[i] < data[j]; ``` where `i` and `j` are array indices, and `data[i]` represents `i` th element of array `data`, we get the C-contiguous array sorted with the last axis. ``` [[3, 4] [1, 2]] ``` **Rank N** Generalized to the rank of N with shape `(d_0, d_1, ..., d_n-1)`, the following comparator works: ``` if floor(i / d_n-1) < floor(j / d_n-1) then return true; else if floor(i / d_n-1) > floor(j / d_n-1) then return false; else return data[i] < data[j]; ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `cupy/sorting/sort.py` Content: ``` 1 import cupy 2 import numpy 3 4 if cupy.cuda.thrust_enabled: 5 from cupy.cuda import thrust 6 7 8 def sort(a): 9 """Returns a sorted copy of an array with a stable sorting algorithm. 10 11 Args: 12 a (cupy.ndarray): Array to be sorted. 13 14 Returns: 15 cupy.ndarray: Array of the same type and shape as ``a``. 16 17 .. note:: 18 For its implementation reason, ``cupy.sort`` currently supports only 19 arrays with their rank of one and does not support ``axis``, ``kind`` 20 and ``order`` parameters that ``numpy.sort`` does support. 21 22 .. seealso:: :func:`numpy.sort` 23 24 """ 25 ret = a.copy() 26 ret.sort() 27 return ret 28 29 30 def lexsort(keys): 31 """Perform an indirect sort using an array of keys. 32 33 Args: 34 keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped 35 arrays. The ``k`` different "rows" to be sorted. The last row is 36 the primary sort key. 37 38 Returns: 39 cupy.ndarray: Array of indices that sort the keys. 40 41 .. note:: 42 For its implementation reason, ``cupy.lexsort`` currently supports only 43 keys with their rank of one or two and does not support ``axis`` 44 parameter that ``numpy.lexsort`` supports. 45 46 .. seealso:: :func:`numpy.lexsort` 47 48 """ 49 50 # TODO(takagi): Support axis argument. 51 52 if not cupy.cuda.thrust_enabled: 53 raise RuntimeError('Thrust is needed to use cupy.lexsort. Please ' 54 'install CUDA Toolkit with Thrust then reinstall ' 55 'CuPy after uninstalling it.') 56 57 if keys.ndim == (): 58 # as numpy.lexsort() raises 59 raise TypeError('need sequence of keys with len > 0 in lexsort') 60 61 if keys.ndim == 1: 62 return 0 63 64 # TODO(takagi): Support ranks of three or more. 65 if keys.ndim > 2: 66 raise NotImplementedError('Keys with the rank of three or more is not ' 67 'supported in lexsort') 68 69 idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp) 70 k = keys._shape[0] 71 n = keys._shape[1] 72 thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n) 73 74 return idx_array 75 76 77 def argsort(a): 78 """Return the indices that would sort an array with a stable sorting. 79 80 Args: 81 a (cupy.ndarray): Array to sort. 82 83 Returns: 84 cupy.ndarray: Array of indices that sort ``a``. 85 86 .. note:: 87 For its implementation reason, ``cupy.argsort`` currently supports only 88 arrays with their rank of one and does not support ``axis``, ``kind`` 89 and ``order`` parameters that ``numpy.argsort`` supports. 90 91 .. seealso:: :func:`numpy.argsort` 92 93 """ 94 return a.argsort() 95 96 97 # TODO(okuta): Implement msort 98 99 100 # TODO(okuta): Implement sort_complex 101 102 103 # TODO(okuta): Implement partition 104 105 106 # TODO(okuta): Implement argpartition 107 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cupy/sorting/sort.py b/cupy/sorting/sort.py --- a/cupy/sorting/sort.py +++ b/cupy/sorting/sort.py @@ -15,9 +15,9 @@ cupy.ndarray: Array of the same type and shape as ``a``. .. note:: - For its implementation reason, ``cupy.sort`` currently supports only - arrays with their rank of one and does not support ``axis``, ``kind`` - and ``order`` parameters that ``numpy.sort`` does support. + For its implementation reason, ``cupy.sort`` currently does not support + ``axis``, ``kind`` and ``order`` parameters that ``numpy.sort`` does + support. .. seealso:: :func:`numpy.sort`
{"golden_diff": "diff --git a/cupy/sorting/sort.py b/cupy/sorting/sort.py\n--- a/cupy/sorting/sort.py\n+++ b/cupy/sorting/sort.py\n@@ -15,9 +15,9 @@\n cupy.ndarray: Array of the same type and shape as ``a``.\n \n .. note::\n- For its implementation reason, ``cupy.sort`` currently supports only\n- arrays with their rank of one and does not support ``axis``, ``kind``\n- and ``order`` parameters that ``numpy.sort`` does support.\n+ For its implementation reason, ``cupy.sort`` currently does not support\n+ ``axis``, ``kind`` and ``order`` parameters that ``numpy.sort`` does\n+ support.\n \n .. seealso:: :func:`numpy.sort`\n", "issue": "Make cupy.sort support arrays with rank two or more.\n# Background\r\nArrays sorted with `cupy.sort` operation have some properties such as dtype, rank, sorting axis and C/F-contiguousness. Currently, `cupy.sort` supports sorting arrays only with the rank of one because of its implementation reason, see #55.\r\n\r\n# Problem\r\nThis issue addresses a problem that makes `cupy.sort` support sorting arrays with the rank of two or more, with the last axis and C-contiguousness.\r\n\r\n# Approach\r\n\r\n**Rank two**\r\n\r\nFor an array with the rank of two, \r\n\r\n```\r\n[[4, 3]\r\n [2, 1]]\r\n```\r\n\r\ntreating the array as flattened one, `[4, 3, 2 ,1]`, and providing the following comparator in pseudo code to underlying Thrust library:\r\n\r\n```\r\nif floor(i / 2) < floor(j / 2) then return true;\r\nelse if floor(i / 2) > floor(j / 2) then return false;\r\nelse return data[i] < data[j];\r\n```\r\n\r\nwhere `i` and `j` are array indices, and `data[i]` represents `i` th element of array `data`,\r\n\r\nwe get the C-contiguous array sorted with the last axis.\r\n\r\n```\r\n[[3, 4]\r\n [1, 2]]\r\n```\r\n\r\n**Rank N**\r\n\r\nGeneralized to the rank of N with shape `(d_0, d_1, ..., d_n-1)`, the following comparator works:\r\n\r\n```\r\nif floor(i / d_n-1) < floor(j / d_n-1) then return true;\r\nelse if floor(i / d_n-1) > floor(j / d_n-1) then return false;\r\nelse return data[i] < data[j];\r\n```\r\n\n", "before_files": [{"content": "import cupy\nimport numpy\n\nif cupy.cuda.thrust_enabled:\n from cupy.cuda import thrust\n\n\ndef sort(a):\n \"\"\"Returns a sorted copy of an array with a stable sorting algorithm.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, ``cupy.sort`` currently supports only\n arrays with their rank of one and does not support ``axis``, ``kind``\n and ``order`` parameters that ``numpy.sort`` does support.\n\n .. seealso:: :func:`numpy.sort`\n\n \"\"\"\n ret = a.copy()\n ret.sort()\n return ret\n\n\ndef lexsort(keys):\n \"\"\"Perform an indirect sort using an array of keys.\n\n Args:\n keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped\n arrays. The ``k`` different \"rows\" to be sorted. The last row is\n the primary sort key.\n\n Returns:\n cupy.ndarray: Array of indices that sort the keys.\n\n .. note::\n For its implementation reason, ``cupy.lexsort`` currently supports only\n keys with their rank of one or two and does not support ``axis``\n parameter that ``numpy.lexsort`` supports.\n\n .. seealso:: :func:`numpy.lexsort`\n\n \"\"\"\n\n # TODO(takagi): Support axis argument.\n\n if not cupy.cuda.thrust_enabled:\n raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '\n 'install CUDA Toolkit with Thrust then reinstall '\n 'CuPy after uninstalling it.')\n\n if keys.ndim == ():\n # as numpy.lexsort() raises\n raise TypeError('need sequence of keys with len > 0 in lexsort')\n\n if keys.ndim == 1:\n return 0\n\n # TODO(takagi): Support ranks of three or more.\n if keys.ndim > 2:\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)\n\n return idx_array\n\n\ndef argsort(a):\n \"\"\"Return the indices that would sort an array with a stable sorting.\n\n Args:\n a (cupy.ndarray): Array to sort.\n\n Returns:\n cupy.ndarray: Array of indices that sort ``a``.\n\n .. note::\n For its implementation reason, ``cupy.argsort`` currently supports only\n arrays with their rank of one and does not support ``axis``, ``kind``\n and ``order`` parameters that ``numpy.argsort`` supports.\n\n .. seealso:: :func:`numpy.argsort`\n\n \"\"\"\n return a.argsort()\n\n\n# TODO(okuta): Implement msort\n\n\n# TODO(okuta): Implement sort_complex\n\n\n# TODO(okuta): Implement partition\n\n\n# TODO(okuta): Implement argpartition\n", "path": "cupy/sorting/sort.py"}], "after_files": [{"content": "import cupy\nimport numpy\n\nif cupy.cuda.thrust_enabled:\n from cupy.cuda import thrust\n\n\ndef sort(a):\n \"\"\"Returns a sorted copy of an array with a stable sorting algorithm.\n\n Args:\n a (cupy.ndarray): Array to be sorted.\n\n Returns:\n cupy.ndarray: Array of the same type and shape as ``a``.\n\n .. note::\n For its implementation reason, ``cupy.sort`` currently does not support\n ``axis``, ``kind`` and ``order`` parameters that ``numpy.sort`` does\n support.\n\n .. seealso:: :func:`numpy.sort`\n\n \"\"\"\n ret = a.copy()\n ret.sort()\n return ret\n\n\ndef lexsort(keys):\n \"\"\"Perform an indirect sort using an array of keys.\n\n Args:\n keys (cupy.ndarray): ``(k, N)`` array containing ``k`` ``(N,)``-shaped\n arrays. The ``k`` different \"rows\" to be sorted. The last row is\n the primary sort key.\n\n Returns:\n cupy.ndarray: Array of indices that sort the keys.\n\n .. note::\n For its implementation reason, ``cupy.lexsort`` currently supports only\n keys with their rank of one or two and does not support ``axis``\n parameter that ``numpy.lexsort`` supports.\n\n .. seealso:: :func:`numpy.lexsort`\n\n \"\"\"\n\n # TODO(takagi): Support axis argument.\n\n if not cupy.cuda.thrust_enabled:\n raise RuntimeError('Thrust is needed to use cupy.lexsort. Please '\n 'install CUDA Toolkit with Thrust then reinstall '\n 'CuPy after uninstalling it.')\n\n if keys.ndim == ():\n # as numpy.lexsort() raises\n raise TypeError('need sequence of keys with len > 0 in lexsort')\n\n if keys.ndim == 1:\n return 0\n\n # TODO(takagi): Support ranks of three or more.\n if keys.ndim > 2:\n raise NotImplementedError('Keys with the rank of three or more is not '\n 'supported in lexsort')\n\n idx_array = cupy.ndarray(keys._shape[1:], dtype=numpy.intp)\n k = keys._shape[0]\n n = keys._shape[1]\n thrust.lexsort(keys.dtype, idx_array.data.ptr, keys.data.ptr, k, n)\n\n return idx_array\n\n\ndef argsort(a):\n \"\"\"Return the indices that would sort an array with a stable sorting.\n\n Args:\n a (cupy.ndarray): Array to sort.\n\n Returns:\n cupy.ndarray: Array of indices that sort ``a``.\n\n .. note::\n For its implementation reason, ``cupy.argsort`` currently supports only\n arrays with their rank of one and does not support ``axis``, ``kind``\n and ``order`` parameters that ``numpy.argsort`` supports.\n\n .. seealso:: :func:`numpy.argsort`\n\n \"\"\"\n return a.argsort()\n\n\n# TODO(okuta): Implement msort\n\n\n# TODO(okuta): Implement sort_complex\n\n\n# TODO(okuta): Implement partition\n\n\n# TODO(okuta): Implement argpartition\n", "path": "cupy/sorting/sort.py"}]}
1,552
176
gh_patches_debug_28265
rasdani/github-patches
git_diff
Lightning-AI__pytorch-lightning-424
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support for retain_graph=True **Is your feature request related to a problem? Please describe.** Some models require retain_graph=True, but it's not possible to set it in the .backward() call inside of Trainer.__run_training_batch(...) **Describe the solution you'd like** Add train_graph member function the LightningModule have the trainer read this option and then pass it into the .backward() call. **Describe alternatives you've considered** Driving a version of Trainer to support retain_graph=True is tough because __run_training_batch and other functions are name-mangled. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pytorch_lightning/trainer/train_loop_mixin.py` Content: ``` 1 import numpy as np 2 3 try: 4 from apex import amp 5 6 APEX_AVAILABLE = True 7 except ImportError: 8 APEX_AVAILABLE = False 9 10 11 class TrainerTrainLoopMixin(object): 12 13 def train(self): 14 # run all epochs 15 for epoch_nb in range(self.current_epoch, self.max_nb_epochs): 16 # set seed for distributed sampler (enables shuffling for each epoch) 17 if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'): 18 self.get_train_dataloader().sampler.set_epoch(epoch_nb) 19 20 # get model 21 model = self.get_model() 22 23 # update training progress in trainer and model 24 model.current_epoch = epoch_nb 25 self.current_epoch = epoch_nb 26 self.total_batches = self.nb_training_batches + self.nb_val_batches 27 self.batch_loss_value = 0 # accumulated grads 28 29 # limit the number of batches to 1 in fast_dev_run 30 if self.fast_dev_run: 31 self.total_batches = 1 32 33 # init progress_bar when requested 34 if self.show_progress_bar: 35 nb_iterations = self.total_batches 36 37 # for iterable train loader, the progress bar never ends 38 if self.is_iterable_train_dataloader: 39 nb_iterations = float('inf') 40 self.progress_bar.reset(nb_iterations) 41 42 # changing gradient according accumulation_scheduler 43 self.accumulation_scheduler.on_epoch_begin(epoch_nb, self) 44 45 # ----------------- 46 # RUN TNG EPOCH 47 # ----------------- 48 self.run_training_epoch() 49 50 # update LR schedulers 51 if self.lr_schedulers is not None: 52 for lr_scheduler in self.lr_schedulers: 53 lr_scheduler.step(self.current_epoch) 54 55 # early stopping 56 met_min_epochs = epoch_nb > self.min_nb_epochs 57 if self.enable_early_stop and (met_min_epochs or self.fast_dev_run): 58 should_stop = self.early_stop_callback.on_epoch_end(epoch=epoch_nb, 59 logs=self.callback_metrics) 60 # stop training 61 stop = should_stop and met_min_epochs 62 if stop: 63 return 64 65 if self.logger is not None: 66 self.logger.finalize("success") 67 68 def run_training_epoch(self): 69 # before epoch hook 70 if self.is_function_implemented('on_epoch_start'): 71 model = self.get_model() 72 model.on_epoch_start() 73 74 # run epoch 75 for batch_nb, batch in enumerate(self.get_train_dataloader()): 76 self.batch_nb = batch_nb 77 78 model = self.get_model() 79 model.global_step = self.global_step 80 81 # --------------- 82 # RUN TRAIN STEP 83 # --------------- 84 output = self.run_training_batch(batch, batch_nb) 85 batch_result, grad_norm_dic, batch_step_metrics = output 86 87 # when returning -1 from train_step, we end epoch early 88 early_stop_epoch = batch_result == -1 89 90 # --------------- 91 # RUN VAL STEP 92 # --------------- 93 is_val_check_batch = (batch_nb + 1) % self.val_check_batch == 0 94 can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0 95 should_check_val = ((is_val_check_batch or early_stop_epoch) and can_check_epoch) 96 97 # fast_dev_run always forces val checking after train batch 98 if self.fast_dev_run or should_check_val: 99 self.run_evaluation(test=self.testing) 100 101 # when logs should be saved 102 should_save_log = (batch_nb + 1) % self.log_save_interval == 0 or early_stop_epoch 103 if should_save_log or self.fast_dev_run: 104 if self.proc_rank == 0 and self.logger is not None: 105 self.logger.save() 106 107 # when metrics should be logged 108 should_log_metrics = batch_nb % self.row_log_interval == 0 or early_stop_epoch 109 if should_log_metrics or self.fast_dev_run: 110 # logs user requested information to logger 111 self.log_metrics(batch_step_metrics, grad_norm_dic) 112 113 self.global_step += 1 114 self.total_batch_nb += 1 115 116 # end epoch early 117 # stop when the flag is changed or we've gone past the amount 118 # requested in the batches 119 if early_stop_epoch or self.fast_dev_run: 120 break 121 122 # stop epoch if we limited nb batches 123 met_batch_limit = batch_nb >= self.nb_training_batches 124 if met_batch_limit: 125 break 126 127 # epoch end hook 128 if self.is_function_implemented('on_epoch_end'): 129 model = self.get_model() 130 model.on_epoch_end() 131 132 def run_training_batch(self, batch, batch_nb): 133 # track grad norms 134 grad_norm_dic = {} 135 136 # track all metrics for callbacks 137 all_callback_metrics = [] 138 139 # track metrics to log 140 all_log_metrics = [] 141 142 if batch is None: 143 return 0, grad_norm_dic 144 145 # hook 146 if self.is_function_implemented('on_batch_start'): 147 model_ref = self.get_model() 148 response = model_ref.on_batch_start(batch) 149 150 if response == -1: 151 return -1, grad_norm_dic 152 153 if self.show_progress_bar: 154 self.progress_bar.update(1) 155 156 # call training_step once per optimizer 157 for opt_idx, optimizer in enumerate(self.optimizers): 158 159 # wrap the forward step in a closure so second order methods work 160 def optimizer_closure(): 161 # forward pass 162 output = self.training_forward(batch, batch_nb, opt_idx) 163 closure_loss, progress_bar_metrics, log_metrics, callback_metrics = output 164 165 # track metrics for callbacks 166 all_callback_metrics.append(callback_metrics) 167 168 # track progress bar metrics 169 self.add_tqdm_metrics(progress_bar_metrics) 170 all_log_metrics.append(log_metrics) 171 172 # accumulate loss 173 # (if accumulate_grad_batches = 1 no effect) 174 closure_loss = closure_loss / self.accumulate_grad_batches 175 176 # backward pass 177 if self.use_amp: 178 with amp.scale_loss(closure_loss, optimizer) as scaled_loss: 179 scaled_loss.backward() 180 else: 181 closure_loss.backward() 182 183 # insert after step hook 184 if self.is_function_implemented('on_after_backward'): 185 model_ref = self.get_model() 186 model_ref.on_after_backward() 187 188 return closure_loss 189 190 # calculate loss 191 loss = optimizer_closure() 192 193 # nan grads 194 if self.print_nan_grads: 195 self.print_nan_gradients() 196 197 # track total loss for logging (avoid mem leaks) 198 self.batch_loss_value += loss.item() 199 200 # gradient update with accumulated gradients 201 if (self.batch_nb + 1) % self.accumulate_grad_batches == 0: 202 203 # track gradient norms when requested 204 if batch_nb % self.row_log_interval == 0: 205 if self.track_grad_norm > 0: 206 model = self.get_model() 207 grad_norm_dic = model.grad_norm(self.track_grad_norm) 208 209 # clip gradients 210 self.clip_gradients() 211 212 # calls .step(), .zero_grad() 213 # override function to modify this behavior 214 model = self.get_model() 215 model.optimizer_step(self.current_epoch, batch_nb, 216 optimizer, opt_idx, optimizer_closure) 217 218 # calculate running loss for display 219 self.running_loss.append(self.batch_loss_value) 220 self.batch_loss_value = 0 221 self.avg_loss = np.mean(self.running_loss[-100:]) 222 223 # update progress bar 224 if self.show_progress_bar: 225 # add model specific metrics 226 tqdm_metrics = self.training_tqdm_dict 227 self.progress_bar.set_postfix(**tqdm_metrics) 228 229 # activate batch end hook 230 if self.is_function_implemented('on_batch_end'): 231 model = self.get_model() 232 model.on_batch_end() 233 234 # collapse all metrics into one dict 235 all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()} 236 237 # track all metrics for callbacks 238 self.callback_metrics = {k: v for d in all_callback_metrics for k, v in d.items()} 239 240 return 0, grad_norm_dic, all_log_metrics 241 242 def training_forward(self, batch, batch_nb, opt_idx): 243 """ 244 Handle forward for each training case (distributed, single gpu, etc...) 245 :param batch: 246 :param batch_nb: 247 :return: 248 """ 249 # --------------- 250 # FORWARD 251 # --------------- 252 # enable not needing to add opt_idx to training_step 253 args = [batch, batch_nb] 254 if len(self.optimizers) > 1: 255 args.append(opt_idx) 256 257 if self.use_ddp or self.use_ddp2: 258 output = self.model(*args) 259 elif self.use_dp: 260 output = self.model(*args) 261 elif self.single_gpu: 262 gpu_id = 0 263 if type(self.data_parallel_device_ids) is list: 264 gpu_id = self.data_parallel_device_ids[0] 265 batch = self.transfer_batch_to_gpu(batch, gpu_id) 266 args[0] = batch 267 output = self.model.training_step(*args) 268 269 else: 270 output = self.model.training_step(*args) 271 272 # format and reduce outputs accordingly 273 output = self.process_output(output, train=True) 274 loss, progress_bar_metrics, log_metrics, callback_metrics = output 275 return loss, progress_bar_metrics, log_metrics, callback_metrics 276 ``` Path: `pytorch_lightning/root_module/hooks.py` Content: ``` 1 import torch 2 3 4 class ModelHooks(torch.nn.Module): 5 6 def on_sanity_check_start(self): 7 """ 8 Called before starting evaluate 9 :return: 10 """ 11 pass 12 13 def on_batch_start(self, batch): 14 pass 15 16 def on_batch_end(self): 17 pass 18 19 def on_epoch_start(self): 20 pass 21 22 def on_epoch_end(self): 23 pass 24 25 def on_pre_performance_check(self): 26 pass 27 28 def on_post_performance_check(self): 29 pass 30 31 def on_before_zero_grad(self, optimizer): 32 """ 33 Called after optimizer.step() and before optimizer.zero_grad() 34 35 for optimizer in optimizers: 36 optimizer.step() 37 model.on_before_zero_grad(optimizer) # < ---- called here 38 optimizer.zero_grad 39 40 :param optimizer: 41 :return: 42 """ 43 pass 44 45 def on_after_backward(self): 46 """ 47 Called after loss.backward() and before optimizers do anything 48 :return: 49 """ 50 pass 51 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pytorch_lightning/root_module/hooks.py b/pytorch_lightning/root_module/hooks.py --- a/pytorch_lightning/root_module/hooks.py +++ b/pytorch_lightning/root_module/hooks.py @@ -1,6 +1,14 @@ import torch +try: + from apex import amp + + APEX_AVAILABLE = True +except ImportError: + APEX_AVAILABLE = False + + class ModelHooks(torch.nn.Module): def on_sanity_check_start(self): @@ -48,3 +56,17 @@ :return: """ pass + + def backward(self, use_amp, loss, optimizer): + """ + Override backward with your own implementation if you need to + :param use_amp: Whether amp was requested or not + :param loss: Loss is already scaled by accumulated grads + :param optimizer: Current optimizer being used + :return: + """ + if use_amp: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() diff --git a/pytorch_lightning/trainer/train_loop_mixin.py b/pytorch_lightning/trainer/train_loop_mixin.py --- a/pytorch_lightning/trainer/train_loop_mixin.py +++ b/pytorch_lightning/trainer/train_loop_mixin.py @@ -174,11 +174,9 @@ closure_loss = closure_loss / self.accumulate_grad_batches # backward pass - if self.use_amp: - with amp.scale_loss(closure_loss, optimizer) as scaled_loss: - scaled_loss.backward() - else: - closure_loss.backward() + # done in hook so user can overwrite if needed + model_ref = self.get_model() + model_ref.backward(self.use_amp, closure_loss, optimizer) # insert after step hook if self.is_function_implemented('on_after_backward'):
{"golden_diff": "diff --git a/pytorch_lightning/root_module/hooks.py b/pytorch_lightning/root_module/hooks.py\n--- a/pytorch_lightning/root_module/hooks.py\n+++ b/pytorch_lightning/root_module/hooks.py\n@@ -1,6 +1,14 @@\n import torch\n \n \n+try:\n+ from apex import amp\n+\n+ APEX_AVAILABLE = True\n+except ImportError:\n+ APEX_AVAILABLE = False\n+\n+\n class ModelHooks(torch.nn.Module):\n \n def on_sanity_check_start(self):\n@@ -48,3 +56,17 @@\n :return:\n \"\"\"\n pass\n+\n+ def backward(self, use_amp, loss, optimizer):\n+ \"\"\"\n+ Override backward with your own implementation if you need to\n+ :param use_amp: Whether amp was requested or not\n+ :param loss: Loss is already scaled by accumulated grads\n+ :param optimizer: Current optimizer being used\n+ :return:\n+ \"\"\"\n+ if use_amp:\n+ with amp.scale_loss(loss, optimizer) as scaled_loss:\n+ scaled_loss.backward()\n+ else:\n+ loss.backward()\ndiff --git a/pytorch_lightning/trainer/train_loop_mixin.py b/pytorch_lightning/trainer/train_loop_mixin.py\n--- a/pytorch_lightning/trainer/train_loop_mixin.py\n+++ b/pytorch_lightning/trainer/train_loop_mixin.py\n@@ -174,11 +174,9 @@\n closure_loss = closure_loss / self.accumulate_grad_batches\n \n # backward pass\n- if self.use_amp:\n- with amp.scale_loss(closure_loss, optimizer) as scaled_loss:\n- scaled_loss.backward()\n- else:\n- closure_loss.backward()\n+ # done in hook so user can overwrite if needed\n+ model_ref = self.get_model()\n+ model_ref.backward(self.use_amp, closure_loss, optimizer)\n \n # insert after step hook\n if self.is_function_implemented('on_after_backward'):\n", "issue": "Support for retain_graph=True\n**Is your feature request related to a problem? Please describe.**\r\nSome models require retain_graph=True, but it's not possible to set it in the .backward() call inside of Trainer.__run_training_batch(...)\r\n\r\n**Describe the solution you'd like**\r\nAdd train_graph member function the LightningModule have the trainer read this option and then pass it into the .backward() call.\r\n\r\n**Describe alternatives you've considered**\r\nDriving a version of Trainer to support retain_graph=True is tough because __run_training_batch and other functions are name-mangled.\r\n\r\n\n", "before_files": [{"content": "import numpy as np\n\ntry:\n from apex import amp\n\n APEX_AVAILABLE = True\nexcept ImportError:\n APEX_AVAILABLE = False\n\n\nclass TrainerTrainLoopMixin(object):\n\n def train(self):\n # run all epochs\n for epoch_nb in range(self.current_epoch, self.max_nb_epochs):\n # set seed for distributed sampler (enables shuffling for each epoch)\n if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):\n self.get_train_dataloader().sampler.set_epoch(epoch_nb)\n\n # get model\n model = self.get_model()\n\n # update training progress in trainer and model\n model.current_epoch = epoch_nb\n self.current_epoch = epoch_nb\n self.total_batches = self.nb_training_batches + self.nb_val_batches\n self.batch_loss_value = 0 # accumulated grads\n\n # limit the number of batches to 1 in fast_dev_run\n if self.fast_dev_run:\n self.total_batches = 1\n\n # init progress_bar when requested\n if self.show_progress_bar:\n nb_iterations = self.total_batches\n\n # for iterable train loader, the progress bar never ends\n if self.is_iterable_train_dataloader:\n nb_iterations = float('inf')\n self.progress_bar.reset(nb_iterations)\n\n # changing gradient according accumulation_scheduler\n self.accumulation_scheduler.on_epoch_begin(epoch_nb, self)\n\n # -----------------\n # RUN TNG EPOCH\n # -----------------\n self.run_training_epoch()\n\n # update LR schedulers\n if self.lr_schedulers is not None:\n for lr_scheduler in self.lr_schedulers:\n lr_scheduler.step(self.current_epoch)\n\n # early stopping\n met_min_epochs = epoch_nb > self.min_nb_epochs\n if self.enable_early_stop and (met_min_epochs or self.fast_dev_run):\n should_stop = self.early_stop_callback.on_epoch_end(epoch=epoch_nb,\n logs=self.callback_metrics)\n # stop training\n stop = should_stop and met_min_epochs\n if stop:\n return\n\n if self.logger is not None:\n self.logger.finalize(\"success\")\n\n def run_training_epoch(self):\n # before epoch hook\n if self.is_function_implemented('on_epoch_start'):\n model = self.get_model()\n model.on_epoch_start()\n\n # run epoch\n for batch_nb, batch in enumerate(self.get_train_dataloader()):\n self.batch_nb = batch_nb\n\n model = self.get_model()\n model.global_step = self.global_step\n\n # ---------------\n # RUN TRAIN STEP\n # ---------------\n output = self.run_training_batch(batch, batch_nb)\n batch_result, grad_norm_dic, batch_step_metrics = output\n\n # when returning -1 from train_step, we end epoch early\n early_stop_epoch = batch_result == -1\n\n # ---------------\n # RUN VAL STEP\n # ---------------\n is_val_check_batch = (batch_nb + 1) % self.val_check_batch == 0\n can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0\n should_check_val = ((is_val_check_batch or early_stop_epoch) and can_check_epoch)\n\n # fast_dev_run always forces val checking after train batch\n if self.fast_dev_run or should_check_val:\n self.run_evaluation(test=self.testing)\n\n # when logs should be saved\n should_save_log = (batch_nb + 1) % self.log_save_interval == 0 or early_stop_epoch\n if should_save_log or self.fast_dev_run:\n if self.proc_rank == 0 and self.logger is not None:\n self.logger.save()\n\n # when metrics should be logged\n should_log_metrics = batch_nb % self.row_log_interval == 0 or early_stop_epoch\n if should_log_metrics or self.fast_dev_run:\n # logs user requested information to logger\n self.log_metrics(batch_step_metrics, grad_norm_dic)\n\n self.global_step += 1\n self.total_batch_nb += 1\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if early_stop_epoch or self.fast_dev_run:\n break\n\n # stop epoch if we limited nb batches\n met_batch_limit = batch_nb >= self.nb_training_batches\n if met_batch_limit:\n break\n\n # epoch end hook\n if self.is_function_implemented('on_epoch_end'):\n model = self.get_model()\n model.on_epoch_end()\n\n def run_training_batch(self, batch, batch_nb):\n # track grad norms\n grad_norm_dic = {}\n\n # track all metrics for callbacks\n all_callback_metrics = []\n\n # track metrics to log\n all_log_metrics = []\n\n if batch is None:\n return 0, grad_norm_dic\n\n # hook\n if self.is_function_implemented('on_batch_start'):\n model_ref = self.get_model()\n response = model_ref.on_batch_start(batch)\n\n if response == -1:\n return -1, grad_norm_dic\n\n if self.show_progress_bar:\n self.progress_bar.update(1)\n\n # call training_step once per optimizer\n for opt_idx, optimizer in enumerate(self.optimizers):\n\n # wrap the forward step in a closure so second order methods work\n def optimizer_closure():\n # forward pass\n output = self.training_forward(batch, batch_nb, opt_idx)\n closure_loss, progress_bar_metrics, log_metrics, callback_metrics = output\n\n # track metrics for callbacks\n all_callback_metrics.append(callback_metrics)\n\n # track progress bar metrics\n self.add_tqdm_metrics(progress_bar_metrics)\n all_log_metrics.append(log_metrics)\n\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n closure_loss = closure_loss / self.accumulate_grad_batches\n\n # backward pass\n if self.use_amp:\n with amp.scale_loss(closure_loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n closure_loss.backward()\n\n # insert after step hook\n if self.is_function_implemented('on_after_backward'):\n model_ref = self.get_model()\n model_ref.on_after_backward()\n\n return closure_loss\n\n # calculate loss\n loss = optimizer_closure()\n\n # nan grads\n if self.print_nan_grads:\n self.print_nan_gradients()\n\n # track total loss for logging (avoid mem leaks)\n self.batch_loss_value += loss.item()\n\n # gradient update with accumulated gradients\n if (self.batch_nb + 1) % self.accumulate_grad_batches == 0:\n\n # track gradient norms when requested\n if batch_nb % self.row_log_interval == 0:\n if self.track_grad_norm > 0:\n model = self.get_model()\n grad_norm_dic = model.grad_norm(self.track_grad_norm)\n\n # clip gradients\n self.clip_gradients()\n\n # calls .step(), .zero_grad()\n # override function to modify this behavior\n model = self.get_model()\n model.optimizer_step(self.current_epoch, batch_nb,\n optimizer, opt_idx, optimizer_closure)\n\n # calculate running loss for display\n self.running_loss.append(self.batch_loss_value)\n self.batch_loss_value = 0\n self.avg_loss = np.mean(self.running_loss[-100:])\n\n # update progress bar\n if self.show_progress_bar:\n # add model specific metrics\n tqdm_metrics = self.training_tqdm_dict\n self.progress_bar.set_postfix(**tqdm_metrics)\n\n # activate batch end hook\n if self.is_function_implemented('on_batch_end'):\n model = self.get_model()\n model.on_batch_end()\n\n # collapse all metrics into one dict\n all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}\n\n # track all metrics for callbacks\n self.callback_metrics = {k: v for d in all_callback_metrics for k, v in d.items()}\n\n return 0, grad_norm_dic, all_log_metrics\n\n def training_forward(self, batch, batch_nb, opt_idx):\n \"\"\"\n Handle forward for each training case (distributed, single gpu, etc...)\n :param batch:\n :param batch_nb:\n :return:\n \"\"\"\n # ---------------\n # FORWARD\n # ---------------\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_nb]\n if len(self.optimizers) > 1:\n args.append(opt_idx)\n\n if self.use_ddp or self.use_ddp2:\n output = self.model(*args)\n elif self.use_dp:\n output = self.model(*args)\n elif self.single_gpu:\n gpu_id = 0\n if type(self.data_parallel_device_ids) is list:\n gpu_id = self.data_parallel_device_ids[0]\n batch = self.transfer_batch_to_gpu(batch, gpu_id)\n args[0] = batch\n output = self.model.training_step(*args)\n\n else:\n output = self.model.training_step(*args)\n\n # format and reduce outputs accordingly\n output = self.process_output(output, train=True)\n loss, progress_bar_metrics, log_metrics, callback_metrics = output\n return loss, progress_bar_metrics, log_metrics, callback_metrics\n", "path": "pytorch_lightning/trainer/train_loop_mixin.py"}, {"content": "import torch\n\n\nclass ModelHooks(torch.nn.Module):\n\n def on_sanity_check_start(self):\n \"\"\"\n Called before starting evaluate\n :return:\n \"\"\"\n pass\n\n def on_batch_start(self, batch):\n pass\n\n def on_batch_end(self):\n pass\n\n def on_epoch_start(self):\n pass\n\n def on_epoch_end(self):\n pass\n\n def on_pre_performance_check(self):\n pass\n\n def on_post_performance_check(self):\n pass\n\n def on_before_zero_grad(self, optimizer):\n \"\"\"\n Called after optimizer.step() and before optimizer.zero_grad()\n\n for optimizer in optimizers:\n optimizer.step()\n model.on_before_zero_grad(optimizer) # < ---- called here\n optimizer.zero_grad\n\n :param optimizer:\n :return:\n \"\"\"\n pass\n\n def on_after_backward(self):\n \"\"\"\n Called after loss.backward() and before optimizers do anything\n :return:\n \"\"\"\n pass\n", "path": "pytorch_lightning/root_module/hooks.py"}], "after_files": [{"content": "import numpy as np\n\ntry:\n from apex import amp\n\n APEX_AVAILABLE = True\nexcept ImportError:\n APEX_AVAILABLE = False\n\n\nclass TrainerTrainLoopMixin(object):\n\n def train(self):\n # run all epochs\n for epoch_nb in range(self.current_epoch, self.max_nb_epochs):\n # set seed for distributed sampler (enables shuffling for each epoch)\n if self.use_ddp and hasattr(self.get_train_dataloader().sampler, 'set_epoch'):\n self.get_train_dataloader().sampler.set_epoch(epoch_nb)\n\n # get model\n model = self.get_model()\n\n # update training progress in trainer and model\n model.current_epoch = epoch_nb\n self.current_epoch = epoch_nb\n self.total_batches = self.nb_training_batches + self.nb_val_batches\n self.batch_loss_value = 0 # accumulated grads\n\n # limit the number of batches to 1 in fast_dev_run\n if self.fast_dev_run:\n self.total_batches = 1\n\n # init progress_bar when requested\n if self.show_progress_bar:\n nb_iterations = self.total_batches\n\n # for iterable train loader, the progress bar never ends\n if self.is_iterable_train_dataloader:\n nb_iterations = float('inf')\n self.progress_bar.reset(nb_iterations)\n\n # changing gradient according accumulation_scheduler\n self.accumulation_scheduler.on_epoch_begin(epoch_nb, self)\n\n # -----------------\n # RUN TNG EPOCH\n # -----------------\n self.run_training_epoch()\n\n # update LR schedulers\n if self.lr_schedulers is not None:\n for lr_scheduler in self.lr_schedulers:\n lr_scheduler.step(self.current_epoch)\n\n # early stopping\n met_min_epochs = epoch_nb > self.min_nb_epochs\n if self.enable_early_stop and (met_min_epochs or self.fast_dev_run):\n should_stop = self.early_stop_callback.on_epoch_end(epoch=epoch_nb,\n logs=self.callback_metrics)\n # stop training\n stop = should_stop and met_min_epochs\n if stop:\n return\n\n if self.logger is not None:\n self.logger.finalize(\"success\")\n\n def run_training_epoch(self):\n # before epoch hook\n if self.is_function_implemented('on_epoch_start'):\n model = self.get_model()\n model.on_epoch_start()\n\n # run epoch\n for batch_nb, batch in enumerate(self.get_train_dataloader()):\n self.batch_nb = batch_nb\n\n model = self.get_model()\n model.global_step = self.global_step\n\n # ---------------\n # RUN TRAIN STEP\n # ---------------\n output = self.run_training_batch(batch, batch_nb)\n batch_result, grad_norm_dic, batch_step_metrics = output\n\n # when returning -1 from train_step, we end epoch early\n early_stop_epoch = batch_result == -1\n\n # ---------------\n # RUN VAL STEP\n # ---------------\n is_val_check_batch = (batch_nb + 1) % self.val_check_batch == 0\n can_check_epoch = (self.current_epoch + 1) % self.check_val_every_n_epoch == 0\n should_check_val = ((is_val_check_batch or early_stop_epoch) and can_check_epoch)\n\n # fast_dev_run always forces val checking after train batch\n if self.fast_dev_run or should_check_val:\n self.run_evaluation(test=self.testing)\n\n # when logs should be saved\n should_save_log = (batch_nb + 1) % self.log_save_interval == 0 or early_stop_epoch\n if should_save_log or self.fast_dev_run:\n if self.proc_rank == 0 and self.logger is not None:\n self.logger.save()\n\n # when metrics should be logged\n should_log_metrics = batch_nb % self.row_log_interval == 0 or early_stop_epoch\n if should_log_metrics or self.fast_dev_run:\n # logs user requested information to logger\n self.log_metrics(batch_step_metrics, grad_norm_dic)\n\n self.global_step += 1\n self.total_batch_nb += 1\n\n # end epoch early\n # stop when the flag is changed or we've gone past the amount\n # requested in the batches\n if early_stop_epoch or self.fast_dev_run:\n break\n\n # stop epoch if we limited nb batches\n met_batch_limit = batch_nb >= self.nb_training_batches\n if met_batch_limit:\n break\n\n # epoch end hook\n if self.is_function_implemented('on_epoch_end'):\n model = self.get_model()\n model.on_epoch_end()\n\n def run_training_batch(self, batch, batch_nb):\n # track grad norms\n grad_norm_dic = {}\n\n # track all metrics for callbacks\n all_callback_metrics = []\n\n # track metrics to log\n all_log_metrics = []\n\n if batch is None:\n return 0, grad_norm_dic\n\n # hook\n if self.is_function_implemented('on_batch_start'):\n model_ref = self.get_model()\n response = model_ref.on_batch_start(batch)\n\n if response == -1:\n return -1, grad_norm_dic\n\n if self.show_progress_bar:\n self.progress_bar.update(1)\n\n # call training_step once per optimizer\n for opt_idx, optimizer in enumerate(self.optimizers):\n\n # wrap the forward step in a closure so second order methods work\n def optimizer_closure():\n # forward pass\n output = self.training_forward(batch, batch_nb, opt_idx)\n closure_loss, progress_bar_metrics, log_metrics, callback_metrics = output\n\n # track metrics for callbacks\n all_callback_metrics.append(callback_metrics)\n\n # track progress bar metrics\n self.add_tqdm_metrics(progress_bar_metrics)\n all_log_metrics.append(log_metrics)\n\n # accumulate loss\n # (if accumulate_grad_batches = 1 no effect)\n closure_loss = closure_loss / self.accumulate_grad_batches\n\n # backward pass\n # done in hook so user can overwrite if needed\n model_ref = self.get_model()\n model_ref.backward(self.use_amp, closure_loss, optimizer)\n\n # insert after step hook\n if self.is_function_implemented('on_after_backward'):\n model_ref = self.get_model()\n model_ref.on_after_backward()\n\n return closure_loss\n\n # calculate loss\n loss = optimizer_closure()\n\n # nan grads\n if self.print_nan_grads:\n self.print_nan_gradients()\n\n # track total loss for logging (avoid mem leaks)\n self.batch_loss_value += loss.item()\n\n # gradient update with accumulated gradients\n if (self.batch_nb + 1) % self.accumulate_grad_batches == 0:\n\n # track gradient norms when requested\n if batch_nb % self.row_log_interval == 0:\n if self.track_grad_norm > 0:\n model = self.get_model()\n grad_norm_dic = model.grad_norm(self.track_grad_norm)\n\n # clip gradients\n self.clip_gradients()\n\n # calls .step(), .zero_grad()\n # override function to modify this behavior\n model = self.get_model()\n model.optimizer_step(self.current_epoch, batch_nb,\n optimizer, opt_idx, optimizer_closure)\n\n # calculate running loss for display\n self.running_loss.append(self.batch_loss_value)\n self.batch_loss_value = 0\n self.avg_loss = np.mean(self.running_loss[-100:])\n\n # update progress bar\n if self.show_progress_bar:\n # add model specific metrics\n tqdm_metrics = self.training_tqdm_dict\n self.progress_bar.set_postfix(**tqdm_metrics)\n\n # activate batch end hook\n if self.is_function_implemented('on_batch_end'):\n model = self.get_model()\n model.on_batch_end()\n\n # collapse all metrics into one dict\n all_log_metrics = {k: v for d in all_log_metrics for k, v in d.items()}\n\n # track all metrics for callbacks\n self.callback_metrics = {k: v for d in all_callback_metrics for k, v in d.items()}\n\n return 0, grad_norm_dic, all_log_metrics\n\n def training_forward(self, batch, batch_nb, opt_idx):\n \"\"\"\n Handle forward for each training case (distributed, single gpu, etc...)\n :param batch:\n :param batch_nb:\n :return:\n \"\"\"\n # ---------------\n # FORWARD\n # ---------------\n # enable not needing to add opt_idx to training_step\n args = [batch, batch_nb]\n if len(self.optimizers) > 1:\n args.append(opt_idx)\n\n if self.use_ddp or self.use_ddp2:\n output = self.model(*args)\n elif self.use_dp:\n output = self.model(*args)\n elif self.single_gpu:\n gpu_id = 0\n if type(self.data_parallel_device_ids) is list:\n gpu_id = self.data_parallel_device_ids[0]\n batch = self.transfer_batch_to_gpu(batch, gpu_id)\n args[0] = batch\n output = self.model.training_step(*args)\n\n else:\n output = self.model.training_step(*args)\n\n # format and reduce outputs accordingly\n output = self.process_output(output, train=True)\n loss, progress_bar_metrics, log_metrics, callback_metrics = output\n return loss, progress_bar_metrics, log_metrics, callback_metrics\n", "path": "pytorch_lightning/trainer/train_loop_mixin.py"}, {"content": "import torch\n\n\ntry:\n from apex import amp\n\n APEX_AVAILABLE = True\nexcept ImportError:\n APEX_AVAILABLE = False\n\n\nclass ModelHooks(torch.nn.Module):\n\n def on_sanity_check_start(self):\n \"\"\"\n Called before starting evaluate\n :return:\n \"\"\"\n pass\n\n def on_batch_start(self, batch):\n pass\n\n def on_batch_end(self):\n pass\n\n def on_epoch_start(self):\n pass\n\n def on_epoch_end(self):\n pass\n\n def on_pre_performance_check(self):\n pass\n\n def on_post_performance_check(self):\n pass\n\n def on_before_zero_grad(self, optimizer):\n \"\"\"\n Called after optimizer.step() and before optimizer.zero_grad()\n\n for optimizer in optimizers:\n optimizer.step()\n model.on_before_zero_grad(optimizer) # < ---- called here\n optimizer.zero_grad\n\n :param optimizer:\n :return:\n \"\"\"\n pass\n\n def on_after_backward(self):\n \"\"\"\n Called after loss.backward() and before optimizers do anything\n :return:\n \"\"\"\n pass\n\n def backward(self, use_amp, loss, optimizer):\n \"\"\"\n Override backward with your own implementation if you need to\n :param use_amp: Whether amp was requested or not\n :param loss: Loss is already scaled by accumulated grads\n :param optimizer: Current optimizer being used\n :return:\n \"\"\"\n if use_amp:\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n", "path": "pytorch_lightning/root_module/hooks.py"}]}
3,443
429
gh_patches_debug_34051
rasdani/github-patches
git_diff
Netflix__lemur-4595
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Session timeout can be reduced Currently the JWT Session Token currently has an excessive session timeout. The length of the timeout can be reduced to lower the risk of an attacker gaining access. If a user leaves their computer unattended, a nearby attacker could access the user’s computer and any open applications. Automatically logging a user out after an extended period of inactivity can limit the time that an attacker could make use of any hijacked sessions. References: [OWASP's Session Management Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html) [OWASP's Page on Session Timeout](https://owasp.org/www-community/Session_Timeout) --- The referenced issue was found via a pen test conducted in collaboration with [Infor](https://www.infor.com/) and [Cobalt.io](https://www.cobalt.io/) --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `lemur/auth/service.py` Content: ``` 1 """ 2 .. module: lemur.auth.service 3 :platform: Unix 4 :synopsis: This module contains all of the authentication duties for 5 lemur 6 :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more 7 :license: Apache, see LICENSE for more details. 8 .. moduleauthor:: Kevin Glisson <[email protected]> 9 10 """ 11 import jwt 12 import json 13 import binascii 14 15 from functools import wraps 16 from datetime import datetime, timedelta 17 18 from flask import g, current_app, jsonify, request 19 20 from flask_restful import Resource 21 from flask_principal import identity_loaded, RoleNeed, UserNeed 22 23 from flask_principal import Identity, identity_changed 24 25 from cryptography.hazmat.backends import default_backend 26 from cryptography.hazmat.primitives import serialization 27 from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers 28 29 from lemur.users import service as user_service 30 from lemur.api_keys import service as api_key_service 31 from lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed 32 33 34 def get_rsa_public_key(n, e): 35 """ 36 Retrieve an RSA public key based on a module and exponent as provided by the JWKS format. 37 38 :param n: 39 :param e: 40 :return: a RSA Public Key in PEM format 41 """ 42 n = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(n, "utf-8"))), 16) 43 e = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(e, "utf-8"))), 16) 44 45 pub = RSAPublicNumbers(e, n).public_key(default_backend()) 46 return pub.public_bytes( 47 encoding=serialization.Encoding.PEM, 48 format=serialization.PublicFormat.SubjectPublicKeyInfo, 49 ) 50 51 52 def create_token(user, aid=None, ttl=None): 53 """ 54 Create a valid JWT for a given user/api key, this token is then used to authenticate 55 sessions until the token expires. 56 57 :param user: 58 :return: 59 """ 60 expiration_delta = timedelta( 61 days=int(current_app.config.get("LEMUR_TOKEN_EXPIRATION", 1)) 62 ) 63 payload = {"iat": datetime.utcnow(), "exp": datetime.utcnow() + expiration_delta} 64 65 # Handle Just a User ID & User Object. 66 if isinstance(user, int): 67 payload["sub"] = user 68 else: 69 payload["sub"] = user.id 70 if aid is not None: 71 payload["aid"] = aid 72 # Custom TTLs are only supported on Access Keys. 73 if ttl is not None and aid is not None: 74 # Tokens that are forever until revoked. 75 if ttl == -1: 76 del payload["exp"] 77 else: 78 payload["exp"] = datetime.utcnow() + timedelta(days=ttl) 79 token = jwt.encode(payload, current_app.config["LEMUR_TOKEN_SECRET"]) 80 return token 81 82 83 def login_required(f): 84 """ 85 Validates the JWT and ensures that is has not expired and the user is still active. 86 87 :param f: 88 :return: 89 """ 90 91 @wraps(f) 92 def decorated_function(*args, **kwargs): 93 if not request.headers.get("Authorization"): 94 response = jsonify(message="Missing authorization header") 95 response.status_code = 401 96 return response 97 98 try: 99 token = request.headers.get("Authorization").split()[1] 100 except Exception as e: 101 return dict(message="Token is invalid"), 403 102 103 try: 104 header_data = fetch_token_header(token) 105 payload = jwt.decode(token, current_app.config["LEMUR_TOKEN_SECRET"], algorithms=[header_data["alg"]]) 106 except jwt.DecodeError: 107 return dict(message="Token is invalid"), 403 108 except jwt.ExpiredSignatureError: 109 return dict(message="Token has expired"), 403 110 except jwt.InvalidTokenError: 111 return dict(message="Token is invalid"), 403 112 113 if "aid" in payload: 114 access_key = api_key_service.get(payload["aid"]) 115 if access_key.revoked: 116 return dict(message="Token has been revoked"), 403 117 if access_key.ttl != -1: 118 current_time = datetime.utcnow() 119 # API key uses days 120 expired_time = datetime.fromtimestamp(access_key.issued_at) + timedelta(days=access_key.ttl) 121 if current_time >= expired_time: 122 return dict(message="Token has expired"), 403 123 if access_key.application_name: 124 g.caller_application = access_key.application_name 125 126 user = user_service.get(payload["sub"]) 127 128 if not user.active: 129 return dict(message="User is not currently active"), 403 130 131 g.current_user = user 132 133 if not g.current_user: 134 return dict(message="You are not logged in"), 403 135 136 # Tell Flask-Principal the identity changed 137 identity_changed.send( 138 current_app._get_current_object(), identity=Identity(g.current_user.id) 139 ) 140 141 return f(*args, **kwargs) 142 143 return decorated_function 144 145 146 def fetch_token_header(token): 147 """ 148 Fetch the header out of the JWT token. 149 150 :param token: 151 :return: :raise jwt.DecodeError: 152 """ 153 token = token.encode("utf-8") 154 try: 155 signing_input, crypto_segment = token.rsplit(b".", 1) 156 header_segment, payload_segment = signing_input.split(b".", 1) 157 except ValueError: 158 raise jwt.DecodeError("Not enough segments") 159 160 try: 161 return json.loads(jwt.utils.base64url_decode(header_segment).decode("utf-8")) 162 except TypeError as e: 163 current_app.logger.exception(e) 164 raise jwt.DecodeError("Invalid header padding") 165 166 167 @identity_loaded.connect 168 def on_identity_loaded(sender, identity): 169 """ 170 Sets the identity of a given option, assigns additional permissions based on 171 the role that the user is a part of. 172 173 :param sender: 174 :param identity: 175 """ 176 # load the user 177 user = user_service.get(identity.id) 178 179 # add the UserNeed to the identity 180 identity.provides.add(UserNeed(identity.id)) 181 182 # identity with the roles that the user provides 183 if hasattr(user, "roles"): 184 for role in user.roles: 185 identity.provides.add(RoleNeed(role.name)) 186 identity.provides.add(RoleMemberNeed(role.id)) 187 188 # apply ownership for authorities 189 if hasattr(user, "authorities"): 190 for authority in user.authorities: 191 identity.provides.add(AuthorityCreatorNeed(authority.id)) 192 193 g.user = user 194 195 196 class AuthenticatedResource(Resource): 197 """ 198 Inherited by all resources that need to be protected by authentication. 199 """ 200 201 method_decorators = [login_required] 202 203 def __init__(self): 204 super(AuthenticatedResource, self).__init__() 205 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/lemur/auth/service.py b/lemur/auth/service.py --- a/lemur/auth/service.py +++ b/lemur/auth/service.py @@ -8,27 +8,23 @@ .. moduleauthor:: Kevin Glisson <[email protected]> """ -import jwt import json -import binascii - -from functools import wraps from datetime import datetime, timedelta +from functools import wraps -from flask import g, current_app, jsonify, request - -from flask_restful import Resource -from flask_principal import identity_loaded, RoleNeed, UserNeed - -from flask_principal import Identity, identity_changed - +import binascii +import jwt from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers +from flask import g, current_app, jsonify, request +from flask_principal import Identity, identity_changed +from flask_principal import identity_loaded, RoleNeed, UserNeed +from flask_restful import Resource -from lemur.users import service as user_service from lemur.api_keys import service as api_key_service from lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed +from lemur.users import service as user_service def get_rsa_public_key(n, e): @@ -57,9 +53,21 @@ :param user: :return: """ - expiration_delta = timedelta( - days=int(current_app.config.get("LEMUR_TOKEN_EXPIRATION", 1)) - ) + expiration_delta = timedelta(days=1) + custom_expiry = current_app.config.get("LEMUR_TOKEN_EXPIRATION") + if custom_expiry: + if isinstance(custom_expiry, str) and custom_expiry.endswith("m"): + expiration_delta = timedelta( + minutes=int(custom_expiry.rstrip("m")) + ) + elif isinstance(custom_expiry, str) and custom_expiry.endswith("h"): + expiration_delta = timedelta( + hours=int(custom_expiry.rstrip("h")) + ) + else: + expiration_delta = timedelta( + days=int(custom_expiry) + ) payload = {"iat": datetime.utcnow(), "exp": datetime.utcnow() + expiration_delta} # Handle Just a User ID & User Object.
{"golden_diff": "diff --git a/lemur/auth/service.py b/lemur/auth/service.py\n--- a/lemur/auth/service.py\n+++ b/lemur/auth/service.py\n@@ -8,27 +8,23 @@\n .. moduleauthor:: Kevin Glisson <[email protected]>\n \n \"\"\"\n-import jwt\n import json\n-import binascii\n-\n-from functools import wraps\n from datetime import datetime, timedelta\n+from functools import wraps\n \n-from flask import g, current_app, jsonify, request\n-\n-from flask_restful import Resource\n-from flask_principal import identity_loaded, RoleNeed, UserNeed\n-\n-from flask_principal import Identity, identity_changed\n-\n+import binascii\n+import jwt\n from cryptography.hazmat.backends import default_backend\n from cryptography.hazmat.primitives import serialization\n from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n+from flask import g, current_app, jsonify, request\n+from flask_principal import Identity, identity_changed\n+from flask_principal import identity_loaded, RoleNeed, UserNeed\n+from flask_restful import Resource\n \n-from lemur.users import service as user_service\n from lemur.api_keys import service as api_key_service\n from lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed\n+from lemur.users import service as user_service\n \n \n def get_rsa_public_key(n, e):\n@@ -57,9 +53,21 @@\n :param user:\n :return:\n \"\"\"\n- expiration_delta = timedelta(\n- days=int(current_app.config.get(\"LEMUR_TOKEN_EXPIRATION\", 1))\n- )\n+ expiration_delta = timedelta(days=1)\n+ custom_expiry = current_app.config.get(\"LEMUR_TOKEN_EXPIRATION\")\n+ if custom_expiry:\n+ if isinstance(custom_expiry, str) and custom_expiry.endswith(\"m\"):\n+ expiration_delta = timedelta(\n+ minutes=int(custom_expiry.rstrip(\"m\"))\n+ )\n+ elif isinstance(custom_expiry, str) and custom_expiry.endswith(\"h\"):\n+ expiration_delta = timedelta(\n+ hours=int(custom_expiry.rstrip(\"h\"))\n+ )\n+ else:\n+ expiration_delta = timedelta(\n+ days=int(custom_expiry)\n+ )\n payload = {\"iat\": datetime.utcnow(), \"exp\": datetime.utcnow() + expiration_delta}\n \n # Handle Just a User ID & User Object.\n", "issue": "Session timeout can be reduced\nCurrently the JWT Session Token currently has an excessive session timeout. The length of the timeout can be reduced to lower the risk of an attacker gaining access.\r\n\r\nIf a user leaves their computer unattended, a nearby attacker could access the user\u2019s computer and any open applications. Automatically logging a user out after an extended period of inactivity can limit the time that an attacker could make use of any hijacked sessions.\r\n\r\nReferences:\r\n[OWASP's Session Management Cheat Sheet](https://cheatsheetseries.owasp.org/cheatsheets/Session_Management_Cheat_Sheet.html)\r\n[OWASP's Page on Session Timeout](https://owasp.org/www-community/Session_Timeout)\r\n\r\n---\r\n\r\nThe referenced issue was found via a pen test conducted in collaboration with [Infor](https://www.infor.com/) and [Cobalt.io](https://www.cobalt.io/)\n", "before_files": [{"content": "\"\"\"\n.. module: lemur.auth.service\n :platform: Unix\n :synopsis: This module contains all of the authentication duties for\n lemur\n :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nimport jwt\nimport json\nimport binascii\n\nfrom functools import wraps\nfrom datetime import datetime, timedelta\n\nfrom flask import g, current_app, jsonify, request\n\nfrom flask_restful import Resource\nfrom flask_principal import identity_loaded, RoleNeed, UserNeed\n\nfrom flask_principal import Identity, identity_changed\n\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n\nfrom lemur.users import service as user_service\nfrom lemur.api_keys import service as api_key_service\nfrom lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed\n\n\ndef get_rsa_public_key(n, e):\n \"\"\"\n Retrieve an RSA public key based on a module and exponent as provided by the JWKS format.\n\n :param n:\n :param e:\n :return: a RSA Public Key in PEM format\n \"\"\"\n n = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(n, \"utf-8\"))), 16)\n e = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(e, \"utf-8\"))), 16)\n\n pub = RSAPublicNumbers(e, n).public_key(default_backend())\n return pub.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n\n\ndef create_token(user, aid=None, ttl=None):\n \"\"\"\n Create a valid JWT for a given user/api key, this token is then used to authenticate\n sessions until the token expires.\n\n :param user:\n :return:\n \"\"\"\n expiration_delta = timedelta(\n days=int(current_app.config.get(\"LEMUR_TOKEN_EXPIRATION\", 1))\n )\n payload = {\"iat\": datetime.utcnow(), \"exp\": datetime.utcnow() + expiration_delta}\n\n # Handle Just a User ID & User Object.\n if isinstance(user, int):\n payload[\"sub\"] = user\n else:\n payload[\"sub\"] = user.id\n if aid is not None:\n payload[\"aid\"] = aid\n # Custom TTLs are only supported on Access Keys.\n if ttl is not None and aid is not None:\n # Tokens that are forever until revoked.\n if ttl == -1:\n del payload[\"exp\"]\n else:\n payload[\"exp\"] = datetime.utcnow() + timedelta(days=ttl)\n token = jwt.encode(payload, current_app.config[\"LEMUR_TOKEN_SECRET\"])\n return token\n\n\ndef login_required(f):\n \"\"\"\n Validates the JWT and ensures that is has not expired and the user is still active.\n\n :param f:\n :return:\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not request.headers.get(\"Authorization\"):\n response = jsonify(message=\"Missing authorization header\")\n response.status_code = 401\n return response\n\n try:\n token = request.headers.get(\"Authorization\").split()[1]\n except Exception as e:\n return dict(message=\"Token is invalid\"), 403\n\n try:\n header_data = fetch_token_header(token)\n payload = jwt.decode(token, current_app.config[\"LEMUR_TOKEN_SECRET\"], algorithms=[header_data[\"alg\"]])\n except jwt.DecodeError:\n return dict(message=\"Token is invalid\"), 403\n except jwt.ExpiredSignatureError:\n return dict(message=\"Token has expired\"), 403\n except jwt.InvalidTokenError:\n return dict(message=\"Token is invalid\"), 403\n\n if \"aid\" in payload:\n access_key = api_key_service.get(payload[\"aid\"])\n if access_key.revoked:\n return dict(message=\"Token has been revoked\"), 403\n if access_key.ttl != -1:\n current_time = datetime.utcnow()\n # API key uses days\n expired_time = datetime.fromtimestamp(access_key.issued_at) + timedelta(days=access_key.ttl)\n if current_time >= expired_time:\n return dict(message=\"Token has expired\"), 403\n if access_key.application_name:\n g.caller_application = access_key.application_name\n\n user = user_service.get(payload[\"sub\"])\n\n if not user.active:\n return dict(message=\"User is not currently active\"), 403\n\n g.current_user = user\n\n if not g.current_user:\n return dict(message=\"You are not logged in\"), 403\n\n # Tell Flask-Principal the identity changed\n identity_changed.send(\n current_app._get_current_object(), identity=Identity(g.current_user.id)\n )\n\n return f(*args, **kwargs)\n\n return decorated_function\n\n\ndef fetch_token_header(token):\n \"\"\"\n Fetch the header out of the JWT token.\n\n :param token:\n :return: :raise jwt.DecodeError:\n \"\"\"\n token = token.encode(\"utf-8\")\n try:\n signing_input, crypto_segment = token.rsplit(b\".\", 1)\n header_segment, payload_segment = signing_input.split(b\".\", 1)\n except ValueError:\n raise jwt.DecodeError(\"Not enough segments\")\n\n try:\n return json.loads(jwt.utils.base64url_decode(header_segment).decode(\"utf-8\"))\n except TypeError as e:\n current_app.logger.exception(e)\n raise jwt.DecodeError(\"Invalid header padding\")\n\n\n@identity_loaded.connect\ndef on_identity_loaded(sender, identity):\n \"\"\"\n Sets the identity of a given option, assigns additional permissions based on\n the role that the user is a part of.\n\n :param sender:\n :param identity:\n \"\"\"\n # load the user\n user = user_service.get(identity.id)\n\n # add the UserNeed to the identity\n identity.provides.add(UserNeed(identity.id))\n\n # identity with the roles that the user provides\n if hasattr(user, \"roles\"):\n for role in user.roles:\n identity.provides.add(RoleNeed(role.name))\n identity.provides.add(RoleMemberNeed(role.id))\n\n # apply ownership for authorities\n if hasattr(user, \"authorities\"):\n for authority in user.authorities:\n identity.provides.add(AuthorityCreatorNeed(authority.id))\n\n g.user = user\n\n\nclass AuthenticatedResource(Resource):\n \"\"\"\n Inherited by all resources that need to be protected by authentication.\n \"\"\"\n\n method_decorators = [login_required]\n\n def __init__(self):\n super(AuthenticatedResource, self).__init__()\n", "path": "lemur/auth/service.py"}], "after_files": [{"content": "\"\"\"\n.. module: lemur.auth.service\n :platform: Unix\n :synopsis: This module contains all of the authentication duties for\n lemur\n :copyright: (c) 2018 by Netflix Inc., see AUTHORS for more\n :license: Apache, see LICENSE for more details.\n.. moduleauthor:: Kevin Glisson <[email protected]>\n\n\"\"\"\nimport json\nfrom datetime import datetime, timedelta\nfrom functools import wraps\n\nimport binascii\nimport jwt\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\nfrom flask import g, current_app, jsonify, request\nfrom flask_principal import Identity, identity_changed\nfrom flask_principal import identity_loaded, RoleNeed, UserNeed\nfrom flask_restful import Resource\n\nfrom lemur.api_keys import service as api_key_service\nfrom lemur.auth.permissions import AuthorityCreatorNeed, RoleMemberNeed\nfrom lemur.users import service as user_service\n\n\ndef get_rsa_public_key(n, e):\n \"\"\"\n Retrieve an RSA public key based on a module and exponent as provided by the JWKS format.\n\n :param n:\n :param e:\n :return: a RSA Public Key in PEM format\n \"\"\"\n n = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(n, \"utf-8\"))), 16)\n e = int(binascii.hexlify(jwt.utils.base64url_decode(bytes(e, \"utf-8\"))), 16)\n\n pub = RSAPublicNumbers(e, n).public_key(default_backend())\n return pub.public_bytes(\n encoding=serialization.Encoding.PEM,\n format=serialization.PublicFormat.SubjectPublicKeyInfo,\n )\n\n\ndef create_token(user, aid=None, ttl=None):\n \"\"\"\n Create a valid JWT for a given user/api key, this token is then used to authenticate\n sessions until the token expires.\n\n :param user:\n :return:\n \"\"\"\n expiration_delta = timedelta(days=1)\n custom_expiry = current_app.config.get(\"LEMUR_TOKEN_EXPIRATION\")\n if custom_expiry:\n if isinstance(custom_expiry, str) and custom_expiry.endswith(\"m\"):\n expiration_delta = timedelta(\n minutes=int(custom_expiry.rstrip(\"m\"))\n )\n elif isinstance(custom_expiry, str) and custom_expiry.endswith(\"h\"):\n expiration_delta = timedelta(\n hours=int(custom_expiry.rstrip(\"h\"))\n )\n else:\n expiration_delta = timedelta(\n days=int(custom_expiry)\n )\n payload = {\"iat\": datetime.utcnow(), \"exp\": datetime.utcnow() + expiration_delta}\n\n # Handle Just a User ID & User Object.\n if isinstance(user, int):\n payload[\"sub\"] = user\n else:\n payload[\"sub\"] = user.id\n if aid is not None:\n payload[\"aid\"] = aid\n # Custom TTLs are only supported on Access Keys.\n if ttl is not None and aid is not None:\n # Tokens that are forever until revoked.\n if ttl == -1:\n del payload[\"exp\"]\n else:\n payload[\"exp\"] = datetime.utcnow() + timedelta(days=ttl)\n token = jwt.encode(payload, current_app.config[\"LEMUR_TOKEN_SECRET\"])\n return token\n\n\ndef login_required(f):\n \"\"\"\n Validates the JWT and ensures that is has not expired and the user is still active.\n\n :param f:\n :return:\n \"\"\"\n\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not request.headers.get(\"Authorization\"):\n response = jsonify(message=\"Missing authorization header\")\n response.status_code = 401\n return response\n\n try:\n token = request.headers.get(\"Authorization\").split()[1]\n except Exception as e:\n return dict(message=\"Token is invalid\"), 403\n\n try:\n header_data = fetch_token_header(token)\n payload = jwt.decode(token, current_app.config[\"LEMUR_TOKEN_SECRET\"], algorithms=[header_data[\"alg\"]])\n except jwt.DecodeError:\n return dict(message=\"Token is invalid\"), 403\n except jwt.ExpiredSignatureError:\n return dict(message=\"Token has expired\"), 403\n except jwt.InvalidTokenError:\n return dict(message=\"Token is invalid\"), 403\n\n if \"aid\" in payload:\n access_key = api_key_service.get(payload[\"aid\"])\n if access_key.revoked:\n return dict(message=\"Token has been revoked\"), 403\n if access_key.ttl != -1:\n current_time = datetime.utcnow()\n # API key uses days\n expired_time = datetime.fromtimestamp(access_key.issued_at) + timedelta(days=access_key.ttl)\n if current_time >= expired_time:\n return dict(message=\"Token has expired\"), 403\n if access_key.application_name:\n g.caller_application = access_key.application_name\n\n user = user_service.get(payload[\"sub\"])\n\n if not user.active:\n return dict(message=\"User is not currently active\"), 403\n\n g.current_user = user\n\n if not g.current_user:\n return dict(message=\"You are not logged in\"), 403\n\n # Tell Flask-Principal the identity changed\n identity_changed.send(\n current_app._get_current_object(), identity=Identity(g.current_user.id)\n )\n\n return f(*args, **kwargs)\n\n return decorated_function\n\n\ndef fetch_token_header(token):\n \"\"\"\n Fetch the header out of the JWT token.\n\n :param token:\n :return: :raise jwt.DecodeError:\n \"\"\"\n token = token.encode(\"utf-8\")\n try:\n signing_input, crypto_segment = token.rsplit(b\".\", 1)\n header_segment, payload_segment = signing_input.split(b\".\", 1)\n except ValueError:\n raise jwt.DecodeError(\"Not enough segments\")\n\n try:\n return json.loads(jwt.utils.base64url_decode(header_segment).decode(\"utf-8\"))\n except TypeError as e:\n current_app.logger.exception(e)\n raise jwt.DecodeError(\"Invalid header padding\")\n\n\n@identity_loaded.connect\ndef on_identity_loaded(sender, identity):\n \"\"\"\n Sets the identity of a given option, assigns additional permissions based on\n the role that the user is a part of.\n\n :param sender:\n :param identity:\n \"\"\"\n # load the user\n user = user_service.get(identity.id)\n\n # add the UserNeed to the identity\n identity.provides.add(UserNeed(identity.id))\n\n # identity with the roles that the user provides\n if hasattr(user, \"roles\"):\n for role in user.roles:\n identity.provides.add(RoleNeed(role.name))\n identity.provides.add(RoleMemberNeed(role.id))\n\n # apply ownership for authorities\n if hasattr(user, \"authorities\"):\n for authority in user.authorities:\n identity.provides.add(AuthorityCreatorNeed(authority.id))\n\n g.user = user\n\n\nclass AuthenticatedResource(Resource):\n \"\"\"\n Inherited by all resources that need to be protected by authentication.\n \"\"\"\n\n method_decorators = [login_required]\n\n def __init__(self):\n super(AuthenticatedResource, self).__init__()\n", "path": "lemur/auth/service.py"}]}
2,429
495
gh_patches_debug_14378
rasdani/github-patches
git_diff
Lightning-AI__torchmetrics-1649
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Pearson Correlation Coefficient raises error when 2D tensor but single task ## 🐛 Bug I have a regression based modelling repository where the predictions can be multi-output or single-output based on configuration. My network outputs `[n_samples, n_tasks]` where `n_task` varies according to the task. If `n_task` is 1 then trying, `torchmetrics.functional.pearson_corrcoef(predictions, targets)` gives the error, ```bash ValueError: Expected argument `num_outputs` to match the second dimension of input, but got 1 and 1 ``` Changing the output shape for a single task specifically just so as to fit the metric function does not seem like a good solution. I think a simple change should be able to fix it. My current workout around, ```python import torchmetrics.functional as Fm # predictions are [n, 1] for single task/output Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0]) ``` There are other metrics that handle this, ```python metrics = { "mse": Fm.mean_squared_error(predictions, targets, squared=True), "rmse": Fm.mean_squared_error(predictions, targets, squared=False), "mae": Fm.mean_absolute_error(predictions, targets), "r2": Fm.r2_score(predictions, targets, multioutput="raw_values"), "mape": Fm.mean_absolute_percentage_error(predictions, targets), # TODO: Raise issue on torchmetrics "pcc": ( Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0]) ), } ``` <!-- A clear and concise description of what the bug is. --> ### To Reproduce Steps to reproduce the behavior... <!-- If you have a code sample, error messages, stack traces, please provide it here as well --> <details> <summary>Code sample</summary> <!-- Ideally attach a minimal code sample to reproduce the decried issue. Minimal means having the shortest code but still preserving the bug. --> </details> ### Expected behavior <!-- A clear and concise description of what you expected to happen. --> ### Environment - TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): - Python & PyTorch Version (e.g., 1.0): - Any other relevant information such as OS (e.g., Linux): ### Additional context <!-- Add any other context about the problem here. --> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/torchmetrics/functional/regression/utils.py` Content: ``` 1 # Copyright The Lightning team. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 from torch import Tensor 15 16 17 def _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) -> None: 18 """Check that predictions and target have the correct shape, else raise error.""" 19 if preds.ndim > 2 or target.ndim > 2: 20 raise ValueError( 21 f"Expected both predictions and target to be either 1- or 2-dimensional tensors," 22 f" but got {target.ndim} and {preds.ndim}." 23 ) 24 if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]): 25 raise ValueError( 26 f"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}" 27 f" and {preds.shape[1]}." 28 ) 29 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/torchmetrics/functional/regression/utils.py b/src/torchmetrics/functional/regression/utils.py --- a/src/torchmetrics/functional/regression/utils.py +++ b/src/torchmetrics/functional/regression/utils.py @@ -21,7 +21,9 @@ f"Expected both predictions and target to be either 1- or 2-dimensional tensors," f" but got {target.ndim} and {preds.ndim}." ) - if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]): + cond1 = num_outputs == 1 and not (preds.ndim == 1 or preds.shape[1] == 1) + cond2 = num_outputs > 1 and num_outputs != preds.shape[1] + if cond1 or cond2: raise ValueError( f"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}" f" and {preds.shape[1]}."
{"golden_diff": "diff --git a/src/torchmetrics/functional/regression/utils.py b/src/torchmetrics/functional/regression/utils.py\n--- a/src/torchmetrics/functional/regression/utils.py\n+++ b/src/torchmetrics/functional/regression/utils.py\n@@ -21,7 +21,9 @@\n f\"Expected both predictions and target to be either 1- or 2-dimensional tensors,\"\n f\" but got {target.ndim} and {preds.ndim}.\"\n )\n- if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):\n+ cond1 = num_outputs == 1 and not (preds.ndim == 1 or preds.shape[1] == 1)\n+ cond2 = num_outputs > 1 and num_outputs != preds.shape[1]\n+ if cond1 or cond2:\n raise ValueError(\n f\"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}\"\n f\" and {preds.shape[1]}.\"\n", "issue": "Pearson Correlation Coefficient raises error when 2D tensor but single task\n## \ud83d\udc1b Bug\r\n\r\nI have a regression based modelling repository where the predictions can be multi-output or single-output based on configuration. My network outputs `[n_samples, n_tasks]` where `n_task` varies according to the task. If `n_task` is 1 then trying, `torchmetrics.functional.pearson_corrcoef(predictions, targets)` gives the error,\r\n\r\n```bash\r\nValueError: Expected argument `num_outputs` to match the second dimension of input, but got 1 and 1\r\n```\r\n\r\nChanging the output shape for a single task specifically just so as to fit the metric function does not seem like a good solution. I think a simple change should be able to fix it.\r\nMy current workout around,\r\n```python\r\nimport torchmetrics.functional as Fm\r\n\r\n# predictions are [n, 1] for single task/output\r\nFm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])\r\n```\r\n\r\nThere are other metrics that handle this,\r\n```python\r\n metrics = {\r\n \"mse\": Fm.mean_squared_error(predictions, targets, squared=True),\r\n \"rmse\": Fm.mean_squared_error(predictions, targets, squared=False),\r\n \"mae\": Fm.mean_absolute_error(predictions, targets),\r\n \"r2\": Fm.r2_score(predictions, targets, multioutput=\"raw_values\"),\r\n \"mape\": Fm.mean_absolute_percentage_error(predictions, targets),\r\n # TODO: Raise issue on torchmetrics\r\n \"pcc\": (\r\n Fm.pearson_corrcoef(predictions, targets) if predictions.shape[1] > 1 else\r\n Fm.pearson_corrcoef(predictions[:, 0], targets[:, 0])\r\n ),\r\n }\r\n```\r\n\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n\r\n### To Reproduce\r\n\r\nSteps to reproduce the behavior...\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>Code sample</summary>\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source):\r\n- Python & PyTorch Version (e.g., 1.0):\r\n- Any other relevant information such as OS (e.g., Linux):\r\n\r\n### Additional context\r\n\r\n<!-- Add any other context about the problem here. -->\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom torch import Tensor\n\n\ndef _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) -> None:\n \"\"\"Check that predictions and target have the correct shape, else raise error.\"\"\"\n if preds.ndim > 2 or target.ndim > 2:\n raise ValueError(\n f\"Expected both predictions and target to be either 1- or 2-dimensional tensors,\"\n f\" but got {target.ndim} and {preds.ndim}.\"\n )\n if (num_outputs == 1 and preds.ndim != 1) or (num_outputs > 1 and num_outputs != preds.shape[1]):\n raise ValueError(\n f\"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}\"\n f\" and {preds.shape[1]}.\"\n )\n", "path": "src/torchmetrics/functional/regression/utils.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom torch import Tensor\n\n\ndef _check_data_shape_to_num_outputs(preds: Tensor, target: Tensor, num_outputs: int) -> None:\n \"\"\"Check that predictions and target have the correct shape, else raise error.\"\"\"\n if preds.ndim > 2 or target.ndim > 2:\n raise ValueError(\n f\"Expected both predictions and target to be either 1- or 2-dimensional tensors,\"\n f\" but got {target.ndim} and {preds.ndim}.\"\n )\n cond1 = num_outputs == 1 and not (preds.ndim == 1 or preds.shape[1] == 1)\n cond2 = num_outputs > 1 and num_outputs != preds.shape[1]\n if cond1 or cond2:\n raise ValueError(\n f\"Expected argument `num_outputs` to match the second dimension of input, but got {num_outputs}\"\n f\" and {preds.shape[1]}.\"\n )\n", "path": "src/torchmetrics/functional/regression/utils.py"}]}
1,188
231
gh_patches_debug_27807
rasdani/github-patches
git_diff
nilearn__nilearn-2214
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Python 3.5 deprecation FutureWarning in Nilearn 0.6.0 Python 3.5 will be EOL'd in September 2020. I will add a FutureWarning before release of Nilearn 0.6.0 stable, and we can drop support for it for Nilearn 0.8.0 stable. @GaelVaroquaux --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `nilearn/__init__.py` Content: ``` 1 """ 2 Machine Learning module for NeuroImaging in python 3 -------------------------------------------------- 4 5 Documentation is available in the docstrings and online at 6 http://nilearn.github.io. 7 8 Contents 9 -------- 10 Nilearn aims at simplifying the use of the scikit-learn package in the context of 11 neuroimaging. It provides specific input/output functions, algorithms and 12 visualization tools. 13 14 Submodules 15 --------- 16 datasets --- Utilities to download NeuroImaging datasets 17 decoding --- Decoding tools and algorithms 18 decomposition --- Includes a subject level variant of the ICA 19 algorithm called Canonical ICA 20 connectome --- Set of tools for computing functional connectivity matrices 21 and for sparse multi-subjects learning of Gaussian graphical models 22 image --- Set of functions defining mathematical operations 23 working on Niimg-like objects 24 input_data --- includes scikit-learn tranformers and tools to 25 preprocess neuro-imaging data 26 masking --- Utilities to compute and operate on brain masks 27 mass_univariate --- Defines a Massively Univariate Linear Model 28 estimated with OLS and permutation test 29 plotting --- Plotting code for nilearn 30 region --- Set of functions for extracting region-defined 31 signals, clustering methods, connected regions extraction 32 signal --- Set of preprocessing functions for time series 33 """ 34 35 import gzip 36 import sys 37 import warnings 38 import os 39 40 from distutils.version import LooseVersion 41 42 from .version import _check_module_dependencies, __version__ 43 44 # Workaround issue discovered in intel-openmp 2019.5: 45 # https://github.com/ContinuumIO/anaconda-issues/issues/11294 46 # 47 # see also https://github.com/scikit-learn/scikit-learn/pull/15020 48 os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") 49 50 def _py2_deprecation_warning(): 51 py2_warning = ('Python2 support is deprecated and will be removed in ' 52 'the next release. Consider switching to Python 3.6 or 3.7.' 53 ) 54 warnings.filterwarnings('once', message=py2_warning) 55 warnings.warn(message=py2_warning, 56 category=DeprecationWarning, 57 stacklevel=3, 58 ) 59 60 def _py34_deprecation_warning(): 61 py34_warning = ('Python 3.4 support is deprecated and will be removed in ' 62 'the next release. Consider switching to Python 3.6 or 3.7.' 63 ) 64 warnings.filterwarnings('once', message=py34_warning) 65 warnings.warn(message=py34_warning, 66 category=DeprecationWarning, 67 stacklevel=3, 68 ) 69 70 71 def _python_deprecation_warnings(): 72 if sys.version_info.major == 2: 73 _py2_deprecation_warning() 74 elif sys.version_info.major == 3 and sys.version_info.minor == 4: 75 _py34_deprecation_warning() 76 77 78 _check_module_dependencies() 79 _python_deprecation_warnings() 80 81 # Temporary work around to address formatting issues in doc tests 82 # with NumPy 1.14. NumPy had made more consistent str/repr formatting 83 # of numpy arrays. Hence we print the options to old versions. 84 import numpy as np 85 if LooseVersion(np.__version__) >= LooseVersion("1.14"): 86 # See issue #1600 in nilearn for reason to add try and except 87 try: 88 from ._utils.testing import is_nose_running 89 if is_nose_running(): 90 np.set_printoptions(legacy='1.13') 91 except ImportError: 92 pass 93 94 # Monkey-patch gzip to have faster reads on large gzip files 95 if hasattr(gzip.GzipFile, 'max_read_chunk'): 96 gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb 97 98 # Boolean controlling the default globbing technique when using check_niimg 99 # and the os.path.expanduser usage in CacheMixin. 100 # Default value it True, set it to False to completely deactivate this 101 # behavior. 102 EXPAND_PATH_WILDCARDS = True 103 104 # Boolean controlling whether the joblib caches should be 105 # flushed if the version of certain modules changes (eg nibabel, as it 106 # does not respect the backward compatibility in some of its internal 107 # structures 108 # This is used in nilearn._utils.cache_mixin 109 CHECK_CACHE_VERSION = True 110 111 # list all submodules available in nilearn and version 112 __all__ = ['datasets', 'decoding', 'decomposition', 'connectome', 113 'image', 'input_data', 'masking', 'mass_univariate', 'plotting', 114 'region', 'signal', 'surface', 'parcellations', '__version__'] 115 116 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/nilearn/__init__.py b/nilearn/__init__.py --- a/nilearn/__init__.py +++ b/nilearn/__init__.py @@ -47,32 +47,21 @@ # see also https://github.com/scikit-learn/scikit-learn/pull/15020 os.environ.setdefault("KMP_INIT_AT_FORK", "FALSE") -def _py2_deprecation_warning(): - py2_warning = ('Python2 support is deprecated and will be removed in ' - 'the next release. Consider switching to Python 3.6 or 3.7.' - ) - warnings.filterwarnings('once', message=py2_warning) - warnings.warn(message=py2_warning, - category=DeprecationWarning, - stacklevel=3, - ) -def _py34_deprecation_warning(): - py34_warning = ('Python 3.4 support is deprecated and will be removed in ' - 'the next release. Consider switching to Python 3.6 or 3.7.' - ) - warnings.filterwarnings('once', message=py34_warning) - warnings.warn(message=py34_warning, - category=DeprecationWarning, +def _py35_deprecation_warning(): + py35_warning = ('Python 3.5 support is deprecated and will be removed in ' + 'a future release. Consider switching to Python 3.6 or 3.7' + ) + warnings.filterwarnings('once', message=py35_warning) + warnings.warn(message=py35_warning, + category=FutureWarning, stacklevel=3, ) def _python_deprecation_warnings(): - if sys.version_info.major == 2: - _py2_deprecation_warning() - elif sys.version_info.major == 3 and sys.version_info.minor == 4: - _py34_deprecation_warning() + if sys.version_info.major == 3 and sys.version_info.minor == 5: + _py35_deprecation_warning() _check_module_dependencies()
{"golden_diff": "diff --git a/nilearn/__init__.py b/nilearn/__init__.py\n--- a/nilearn/__init__.py\n+++ b/nilearn/__init__.py\n@@ -47,32 +47,21 @@\n # see also https://github.com/scikit-learn/scikit-learn/pull/15020\n os.environ.setdefault(\"KMP_INIT_AT_FORK\", \"FALSE\")\n \n-def _py2_deprecation_warning():\n- py2_warning = ('Python2 support is deprecated and will be removed in '\n- 'the next release. Consider switching to Python 3.6 or 3.7.'\n- )\n- warnings.filterwarnings('once', message=py2_warning)\n- warnings.warn(message=py2_warning,\n- category=DeprecationWarning,\n- stacklevel=3,\n- )\n \n-def _py34_deprecation_warning():\n- py34_warning = ('Python 3.4 support is deprecated and will be removed in '\n- 'the next release. Consider switching to Python 3.6 or 3.7.'\n- )\n- warnings.filterwarnings('once', message=py34_warning)\n- warnings.warn(message=py34_warning,\n- category=DeprecationWarning,\n+def _py35_deprecation_warning():\n+ py35_warning = ('Python 3.5 support is deprecated and will be removed in '\n+ 'a future release. Consider switching to Python 3.6 or 3.7'\n+ )\n+ warnings.filterwarnings('once', message=py35_warning)\n+ warnings.warn(message=py35_warning,\n+ category=FutureWarning,\n stacklevel=3,\n )\n \n \n def _python_deprecation_warnings():\n- if sys.version_info.major == 2:\n- _py2_deprecation_warning()\n- elif sys.version_info.major == 3 and sys.version_info.minor == 4:\n- _py34_deprecation_warning()\n+ if sys.version_info.major == 3 and sys.version_info.minor == 5:\n+ _py35_deprecation_warning()\n \n \n _check_module_dependencies()\n", "issue": "Python 3.5 deprecation FutureWarning in Nilearn 0.6.0\nPython 3.5 will be EOL'd in September 2020. I will add a FutureWarning before release of Nilearn 0.6.0 stable, and we can drop support for it for Nilearn 0.8.0 stable.\r\n@GaelVaroquaux \n", "before_files": [{"content": "\"\"\"\nMachine Learning module for NeuroImaging in python\n--------------------------------------------------\n\nDocumentation is available in the docstrings and online at\nhttp://nilearn.github.io.\n\nContents\n--------\nNilearn aims at simplifying the use of the scikit-learn package in the context of\nneuroimaging. It provides specific input/output functions, algorithms and\nvisualization tools.\n\nSubmodules\n---------\ndatasets --- Utilities to download NeuroImaging datasets\ndecoding --- Decoding tools and algorithms\ndecomposition --- Includes a subject level variant of the ICA\n algorithm called Canonical ICA\nconnectome --- Set of tools for computing functional connectivity matrices\n and for sparse multi-subjects learning of Gaussian graphical models\nimage --- Set of functions defining mathematical operations\n working on Niimg-like objects\ninput_data --- includes scikit-learn tranformers and tools to\n preprocess neuro-imaging data\nmasking --- Utilities to compute and operate on brain masks\nmass_univariate --- Defines a Massively Univariate Linear Model\n estimated with OLS and permutation test\nplotting --- Plotting code for nilearn\nregion --- Set of functions for extracting region-defined\n signals, clustering methods, connected regions extraction\nsignal --- Set of preprocessing functions for time series\n\"\"\"\n\nimport gzip\nimport sys\nimport warnings\nimport os\n\nfrom distutils.version import LooseVersion\n\nfrom .version import _check_module_dependencies, __version__\n\n# Workaround issue discovered in intel-openmp 2019.5:\n# https://github.com/ContinuumIO/anaconda-issues/issues/11294\n#\n# see also https://github.com/scikit-learn/scikit-learn/pull/15020\nos.environ.setdefault(\"KMP_INIT_AT_FORK\", \"FALSE\")\n\ndef _py2_deprecation_warning():\n py2_warning = ('Python2 support is deprecated and will be removed in '\n 'the next release. Consider switching to Python 3.6 or 3.7.'\n )\n warnings.filterwarnings('once', message=py2_warning)\n warnings.warn(message=py2_warning,\n category=DeprecationWarning,\n stacklevel=3,\n )\n\ndef _py34_deprecation_warning():\n py34_warning = ('Python 3.4 support is deprecated and will be removed in '\n 'the next release. Consider switching to Python 3.6 or 3.7.'\n )\n warnings.filterwarnings('once', message=py34_warning)\n warnings.warn(message=py34_warning,\n category=DeprecationWarning,\n stacklevel=3,\n )\n\n\ndef _python_deprecation_warnings():\n if sys.version_info.major == 2:\n _py2_deprecation_warning()\n elif sys.version_info.major == 3 and sys.version_info.minor == 4:\n _py34_deprecation_warning()\n\n\n_check_module_dependencies()\n_python_deprecation_warnings()\n\n# Temporary work around to address formatting issues in doc tests\n# with NumPy 1.14. NumPy had made more consistent str/repr formatting\n# of numpy arrays. Hence we print the options to old versions.\nimport numpy as np\nif LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n # See issue #1600 in nilearn for reason to add try and except\n try:\n from ._utils.testing import is_nose_running\n if is_nose_running():\n np.set_printoptions(legacy='1.13')\n except ImportError:\n pass\n\n# Monkey-patch gzip to have faster reads on large gzip files\nif hasattr(gzip.GzipFile, 'max_read_chunk'):\n gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb\n\n# Boolean controlling the default globbing technique when using check_niimg\n# and the os.path.expanduser usage in CacheMixin.\n# Default value it True, set it to False to completely deactivate this\n# behavior.\nEXPAND_PATH_WILDCARDS = True\n\n# Boolean controlling whether the joblib caches should be\n# flushed if the version of certain modules changes (eg nibabel, as it\n# does not respect the backward compatibility in some of its internal\n# structures\n# This is used in nilearn._utils.cache_mixin\nCHECK_CACHE_VERSION = True\n\n# list all submodules available in nilearn and version\n__all__ = ['datasets', 'decoding', 'decomposition', 'connectome',\n 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',\n 'region', 'signal', 'surface', 'parcellations', '__version__']\n\n", "path": "nilearn/__init__.py"}], "after_files": [{"content": "\"\"\"\nMachine Learning module for NeuroImaging in python\n--------------------------------------------------\n\nDocumentation is available in the docstrings and online at\nhttp://nilearn.github.io.\n\nContents\n--------\nNilearn aims at simplifying the use of the scikit-learn package in the context of\nneuroimaging. It provides specific input/output functions, algorithms and\nvisualization tools.\n\nSubmodules\n---------\ndatasets --- Utilities to download NeuroImaging datasets\ndecoding --- Decoding tools and algorithms\ndecomposition --- Includes a subject level variant of the ICA\n algorithm called Canonical ICA\nconnectome --- Set of tools for computing functional connectivity matrices\n and for sparse multi-subjects learning of Gaussian graphical models\nimage --- Set of functions defining mathematical operations\n working on Niimg-like objects\ninput_data --- includes scikit-learn tranformers and tools to\n preprocess neuro-imaging data\nmasking --- Utilities to compute and operate on brain masks\nmass_univariate --- Defines a Massively Univariate Linear Model\n estimated with OLS and permutation test\nplotting --- Plotting code for nilearn\nregion --- Set of functions for extracting region-defined\n signals, clustering methods, connected regions extraction\nsignal --- Set of preprocessing functions for time series\n\"\"\"\n\nimport gzip\nimport sys\nimport warnings\nimport os\n\nfrom distutils.version import LooseVersion\n\nfrom .version import _check_module_dependencies, __version__\n\n# Workaround issue discovered in intel-openmp 2019.5:\n# https://github.com/ContinuumIO/anaconda-issues/issues/11294\n#\n# see also https://github.com/scikit-learn/scikit-learn/pull/15020\nos.environ.setdefault(\"KMP_INIT_AT_FORK\", \"FALSE\")\n\n\ndef _py35_deprecation_warning():\n py35_warning = ('Python 3.5 support is deprecated and will be removed in '\n 'a future release. Consider switching to Python 3.6 or 3.7'\n )\n warnings.filterwarnings('once', message=py35_warning)\n warnings.warn(message=py35_warning,\n category=FutureWarning,\n stacklevel=3,\n )\n\n\ndef _python_deprecation_warnings():\n if sys.version_info.major == 3 and sys.version_info.minor == 5:\n _py35_deprecation_warning()\n\n\n_check_module_dependencies()\n_python_deprecation_warnings()\n\n# Temporary work around to address formatting issues in doc tests\n# with NumPy 1.14. NumPy had made more consistent str/repr formatting\n# of numpy arrays. Hence we print the options to old versions.\nimport numpy as np\nif LooseVersion(np.__version__) >= LooseVersion(\"1.14\"):\n # See issue #1600 in nilearn for reason to add try and except\n try:\n from ._utils.testing import is_nose_running\n if is_nose_running():\n np.set_printoptions(legacy='1.13')\n except ImportError:\n pass\n\n# Monkey-patch gzip to have faster reads on large gzip files\nif hasattr(gzip.GzipFile, 'max_read_chunk'):\n gzip.GzipFile.max_read_chunk = 100 * 1024 * 1024 # 100Mb\n\n# Boolean controlling the default globbing technique when using check_niimg\n# and the os.path.expanduser usage in CacheMixin.\n# Default value it True, set it to False to completely deactivate this\n# behavior.\nEXPAND_PATH_WILDCARDS = True\n\n# Boolean controlling whether the joblib caches should be\n# flushed if the version of certain modules changes (eg nibabel, as it\n# does not respect the backward compatibility in some of its internal\n# structures\n# This is used in nilearn._utils.cache_mixin\nCHECK_CACHE_VERSION = True\n\n# list all submodules available in nilearn and version\n__all__ = ['datasets', 'decoding', 'decomposition', 'connectome',\n 'image', 'input_data', 'masking', 'mass_univariate', 'plotting',\n 'region', 'signal', 'surface', 'parcellations', '__version__']\n\n", "path": "nilearn/__init__.py"}]}
1,599
468
gh_patches_debug_350
rasdani/github-patches
git_diff
scikit-image__scikit-image-1124
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- NameError on del version when init has ImportError In `__init__.py`, `del version` gives `NameError` when `ImportError` happens. ``` try: from .version import version as __version__ except ImportError: __version__ = "unbuilt-dev" del version ``` should be ``` try: from .version import version as __version__ except ImportError: __version__ = "unbuilt-dev" else: del version ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `skimage/__init__.py` Content: ``` 1 """Image Processing SciKit (Toolbox for SciPy) 2 3 ``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image 4 processing and computer vision. 5 6 The main package of ``skimage`` only provides a few utilities for converting 7 between image data types; for most features, you need to import one of the 8 following subpackages: 9 10 Subpackages 11 ----------- 12 color 13 Color space conversion. 14 data 15 Test images and example data. 16 draw 17 Drawing primitives (lines, text, etc.) that operate on NumPy arrays. 18 exposure 19 Image intensity adjustment, e.g., histogram equalization, etc. 20 feature 21 Feature detection and extraction, e.g., texture analysis corners, etc. 22 filter 23 Sharpening, edge finding, rank filters, thresholding, etc. 24 graph 25 Graph-theoretic operations, e.g., shortest paths. 26 io 27 Reading, saving, and displaying images and video. 28 measure 29 Measurement of image properties, e.g., similarity and contours. 30 morphology 31 Morphological operations, e.g., opening or skeletonization. 32 novice 33 Simplified interface for teaching purposes. 34 restoration 35 Restoration algorithms, e.g., deconvolution algorithms, denoising, etc. 36 segmentation 37 Partitioning an image into multiple regions. 38 transform 39 Geometric and other transforms, e.g., rotation or the Radon transform. 40 util 41 Generic utilities. 42 viewer 43 A simple graphical user interface for visualizing results and exploring 44 parameters. 45 46 Utility Functions 47 ----------------- 48 img_as_float 49 Convert an image to floating point format, with values in [0, 1]. 50 img_as_uint 51 Convert an image to unsigned integer format, with values in [0, 65535]. 52 img_as_int 53 Convert an image to signed integer format, with values in [-32768, 32767]. 54 img_as_ubyte 55 Convert an image to unsigned byte format, with values in [0, 255]. 56 57 """ 58 59 import os.path as _osp 60 import imp as _imp 61 import functools as _functools 62 import warnings as _warnings 63 from skimage._shared.utils import deprecated as _deprecated 64 65 pkg_dir = _osp.abspath(_osp.dirname(__file__)) 66 data_dir = _osp.join(pkg_dir, 'data') 67 68 try: 69 from .version import version as __version__ 70 except ImportError: 71 __version__ = "unbuilt-dev" 72 del version 73 74 75 try: 76 _imp.find_module('nose') 77 except ImportError: 78 def _test(verbose=False): 79 """This would run all unit tests, but nose couldn't be 80 imported so the test suite can not run. 81 """ 82 raise ImportError("Could not load nose. Unit tests not available.") 83 84 def _doctest(verbose=False): 85 """This would run all doc tests, but nose couldn't be 86 imported so the test suite can not run. 87 """ 88 raise ImportError("Could not load nose. Doctests not available.") 89 else: 90 def _test(doctest=False, verbose=False): 91 """Run all unit tests.""" 92 import nose 93 args = ['', pkg_dir, '--exe', '--ignore-files=^_test'] 94 if verbose: 95 args.extend(['-v', '-s']) 96 if doctest: 97 args.extend(['--with-doctest', '--ignore-files=^\.', 98 '--ignore-files=^setup\.py$$', '--ignore-files=test']) 99 # Make sure warnings do not break the doc tests 100 with _warnings.catch_warnings(): 101 _warnings.simplefilter("ignore") 102 success = nose.run('skimage', argv=args) 103 else: 104 success = nose.run('skimage', argv=args) 105 # Return sys.exit code 106 if success: 107 return 0 108 else: 109 return 1 110 111 112 # do not use `test` as function name as this leads to a recursion problem with 113 # the nose test suite 114 test = _test 115 test_verbose = _functools.partial(test, verbose=True) 116 test_verbose.__doc__ = test.__doc__ 117 doctest = _functools.partial(test, doctest=True) 118 doctest.__doc__ = doctest.__doc__ 119 doctest_verbose = _functools.partial(test, doctest=True, verbose=True) 120 doctest_verbose.__doc__ = doctest.__doc__ 121 122 123 class _Log(Warning): 124 pass 125 126 127 class _FakeLog(object): 128 def __init__(self, name): 129 """ 130 Parameters 131 ---------- 132 name : str 133 Name of the log. 134 repeat : bool 135 Whether to print repeating messages more than once (False by 136 default). 137 """ 138 self._name = name 139 140 warnings.simplefilter("always", _Log) 141 142 self._warnings = _warnings 143 144 def _warn(self, msg, wtype): 145 self._warnings.warn('%s: %s' % (wtype, msg), _Log) 146 147 def debug(self, msg): 148 self._warn(msg, 'DEBUG') 149 150 def info(self, msg): 151 self._warn(msg, 'INFO') 152 153 def warning(self, msg): 154 self._warn(msg, 'WARNING') 155 156 warn = warning 157 158 def error(self, msg): 159 self._warn(msg, 'ERROR') 160 161 def critical(self, msg): 162 self._warn(msg, 'CRITICAL') 163 164 def addHandler(*args): 165 pass 166 167 def setLevel(*args): 168 pass 169 170 171 from .util.dtype import * 172 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/skimage/__init__.py b/skimage/__init__.py --- a/skimage/__init__.py +++ b/skimage/__init__.py @@ -69,7 +69,8 @@ from .version import version as __version__ except ImportError: __version__ = "unbuilt-dev" -del version +else: + del version try:
{"golden_diff": "diff --git a/skimage/__init__.py b/skimage/__init__.py\n--- a/skimage/__init__.py\n+++ b/skimage/__init__.py\n@@ -69,7 +69,8 @@\n from .version import version as __version__\n except ImportError:\n __version__ = \"unbuilt-dev\"\n-del version\n+else:\n+ del version\n \n \n try:\n", "issue": "NameError on del version when init has ImportError\nIn `__init__.py`, `del version` gives `NameError` when `ImportError` happens.\n\n```\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\ndel version\n```\n\nshould be\n\n```\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\nelse:\n del version\n```\n\n", "before_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilter\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\nimport imp as _imp\nimport functools as _functools\nimport warnings as _warnings\nfrom skimage._shared.utils import deprecated as _deprecated\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\ndel version\n\n\ntry:\n _imp.find_module('nose')\nexcept ImportError:\n def _test(verbose=False):\n \"\"\"This would run all unit tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n\n def _doctest(verbose=False):\n \"\"\"This would run all doc tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Doctests not available.\")\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import nose\n args = ['', pkg_dir, '--exe', '--ignore-files=^_test']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--with-doctest', '--ignore-files=^\\.',\n '--ignore-files=^setup\\.py$$', '--ignore-files=test'])\n # Make sure warnings do not break the doc tests\n with _warnings.catch_warnings():\n _warnings.simplefilter(\"ignore\")\n success = nose.run('skimage', argv=args)\n else:\n success = nose.run('skimage', argv=args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = _functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = _functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = _functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\nclass _Log(Warning):\n pass\n\n\nclass _FakeLog(object):\n def __init__(self, name):\n \"\"\"\n Parameters\n ----------\n name : str\n Name of the log.\n repeat : bool\n Whether to print repeating messages more than once (False by\n default).\n \"\"\"\n self._name = name\n\n warnings.simplefilter(\"always\", _Log)\n\n self._warnings = _warnings\n\n def _warn(self, msg, wtype):\n self._warnings.warn('%s: %s' % (wtype, msg), _Log)\n\n def debug(self, msg):\n self._warn(msg, 'DEBUG')\n\n def info(self, msg):\n self._warn(msg, 'INFO')\n\n def warning(self, msg):\n self._warn(msg, 'WARNING')\n\n warn = warning\n\n def error(self, msg):\n self._warn(msg, 'ERROR')\n\n def critical(self, msg):\n self._warn(msg, 'CRITICAL')\n\n def addHandler(*args):\n pass\n\n def setLevel(*args):\n pass\n\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}], "after_files": [{"content": "\"\"\"Image Processing SciKit (Toolbox for SciPy)\n\n``scikit-image`` (a.k.a. ``skimage``) is a collection of algorithms for image\nprocessing and computer vision.\n\nThe main package of ``skimage`` only provides a few utilities for converting\nbetween image data types; for most features, you need to import one of the\nfollowing subpackages:\n\nSubpackages\n-----------\ncolor\n Color space conversion.\ndata\n Test images and example data.\ndraw\n Drawing primitives (lines, text, etc.) that operate on NumPy arrays.\nexposure\n Image intensity adjustment, e.g., histogram equalization, etc.\nfeature\n Feature detection and extraction, e.g., texture analysis corners, etc.\nfilter\n Sharpening, edge finding, rank filters, thresholding, etc.\ngraph\n Graph-theoretic operations, e.g., shortest paths.\nio\n Reading, saving, and displaying images and video.\nmeasure\n Measurement of image properties, e.g., similarity and contours.\nmorphology\n Morphological operations, e.g., opening or skeletonization.\nnovice\n Simplified interface for teaching purposes.\nrestoration\n Restoration algorithms, e.g., deconvolution algorithms, denoising, etc.\nsegmentation\n Partitioning an image into multiple regions.\ntransform\n Geometric and other transforms, e.g., rotation or the Radon transform.\nutil\n Generic utilities.\nviewer\n A simple graphical user interface for visualizing results and exploring\n parameters.\n\nUtility Functions\n-----------------\nimg_as_float\n Convert an image to floating point format, with values in [0, 1].\nimg_as_uint\n Convert an image to unsigned integer format, with values in [0, 65535].\nimg_as_int\n Convert an image to signed integer format, with values in [-32768, 32767].\nimg_as_ubyte\n Convert an image to unsigned byte format, with values in [0, 255].\n\n\"\"\"\n\nimport os.path as _osp\nimport imp as _imp\nimport functools as _functools\nimport warnings as _warnings\nfrom skimage._shared.utils import deprecated as _deprecated\n\npkg_dir = _osp.abspath(_osp.dirname(__file__))\ndata_dir = _osp.join(pkg_dir, 'data')\n\ntry:\n from .version import version as __version__\nexcept ImportError:\n __version__ = \"unbuilt-dev\"\nelse:\n del version\n\n\ntry:\n _imp.find_module('nose')\nexcept ImportError:\n def _test(verbose=False):\n \"\"\"This would run all unit tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Unit tests not available.\")\n\n def _doctest(verbose=False):\n \"\"\"This would run all doc tests, but nose couldn't be\n imported so the test suite can not run.\n \"\"\"\n raise ImportError(\"Could not load nose. Doctests not available.\")\nelse:\n def _test(doctest=False, verbose=False):\n \"\"\"Run all unit tests.\"\"\"\n import nose\n args = ['', pkg_dir, '--exe', '--ignore-files=^_test']\n if verbose:\n args.extend(['-v', '-s'])\n if doctest:\n args.extend(['--with-doctest', '--ignore-files=^\\.',\n '--ignore-files=^setup\\.py$$', '--ignore-files=test'])\n # Make sure warnings do not break the doc tests\n with _warnings.catch_warnings():\n _warnings.simplefilter(\"ignore\")\n success = nose.run('skimage', argv=args)\n else:\n success = nose.run('skimage', argv=args)\n # Return sys.exit code\n if success:\n return 0\n else:\n return 1\n\n\n# do not use `test` as function name as this leads to a recursion problem with\n# the nose test suite\ntest = _test\ntest_verbose = _functools.partial(test, verbose=True)\ntest_verbose.__doc__ = test.__doc__\ndoctest = _functools.partial(test, doctest=True)\ndoctest.__doc__ = doctest.__doc__\ndoctest_verbose = _functools.partial(test, doctest=True, verbose=True)\ndoctest_verbose.__doc__ = doctest.__doc__\n\n\nclass _Log(Warning):\n pass\n\n\nclass _FakeLog(object):\n def __init__(self, name):\n \"\"\"\n Parameters\n ----------\n name : str\n Name of the log.\n repeat : bool\n Whether to print repeating messages more than once (False by\n default).\n \"\"\"\n self._name = name\n\n warnings.simplefilter(\"always\", _Log)\n\n self._warnings = _warnings\n\n def _warn(self, msg, wtype):\n self._warnings.warn('%s: %s' % (wtype, msg), _Log)\n\n def debug(self, msg):\n self._warn(msg, 'DEBUG')\n\n def info(self, msg):\n self._warn(msg, 'INFO')\n\n def warning(self, msg):\n self._warn(msg, 'WARNING')\n\n warn = warning\n\n def error(self, msg):\n self._warn(msg, 'ERROR')\n\n def critical(self, msg):\n self._warn(msg, 'CRITICAL')\n\n def addHandler(*args):\n pass\n\n def setLevel(*args):\n pass\n\n\nfrom .util.dtype import *\n", "path": "skimage/__init__.py"}]}
1,935
89
gh_patches_debug_21874
rasdani/github-patches
git_diff
streamlink__streamlink-3459
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- No man page with pip install ### Checklist - [ ] This is a bug report. - [x] This is a feature request. - [ ] This is a plugin (improvement) request. - [x] I have read the contribution guidelines. ### Description When installing streamlink with pip, no man page gets installed ### Expected / Actual behavior a man page gets installed during installation of streamlink with pip ### Reproduction steps / Explicit stream URLs to test 1. ``pip install --user streamlink`` 2. ``man streamlink`` 3. ``No manual entry for streamlink`` 4. I get the same results when using ``pip install streamlink`` ### Logs ``` [cli][debug] OS: Linux-4.13.0-43-generic-x86_64-with-Ubuntu-17.10-artful [cli][debug] Python: 3.6.3 [cli][debug] Streamlink: 0.12.1 [cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0) usage: streamlink [OPTIONS] <URL> [STREAM] ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `setup.py` Content: ``` 1 #!/usr/bin/env python 2 import codecs 3 from os import environ, path 4 from sys import argv, path as sys_path 5 6 from setuptools import find_packages, setup 7 8 import versioneer 9 10 11 deps = [ 12 "requests>=2.21.0,<3.0", 13 "isodate", 14 "websocket-client", 15 # Support for SOCKS proxies 16 "PySocks!=1.5.7,>=1.5.6", 17 ] 18 19 # for encrypted streams 20 if environ.get("STREAMLINK_USE_PYCRYPTO"): 21 deps.append("pycrypto") 22 else: 23 # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6 24 deps.append("pycryptodome>=3.4.3,<4") 25 26 # for localization 27 if environ.get("STREAMLINK_USE_PYCOUNTRY"): 28 deps.append("pycountry") 29 else: 30 deps.append("iso-639") 31 deps.append("iso3166") 32 33 # When we build an egg for the Win32 bootstrap we don"t want dependency 34 # information built into it. 35 if environ.get("NO_DEPS"): 36 deps = [] 37 38 this_directory = path.abspath(path.dirname(__file__)) 39 srcdir = path.join(this_directory, "src/") 40 sys_path.insert(0, srcdir) 41 42 with codecs.open(path.join(this_directory, "README.md"), 'r', "utf8") as f: 43 long_description = f.read() 44 45 46 def is_wheel_for_windows(): 47 if "bdist_wheel" in argv: 48 names = ["win32", "win-amd64", "cygwin"] 49 length = len(argv) 50 for pos in range(argv.index("bdist_wheel") + 1, length): 51 if argv[pos] == "--plat-name" and pos + 1 < length: 52 return argv[pos + 1] in names 53 elif argv[pos][:12] == "--plat-name=": 54 return argv[pos][12:] in names 55 return False 56 57 58 entry_points = { 59 "console_scripts": ["streamlink=streamlink_cli.main:main"] 60 } 61 62 if is_wheel_for_windows(): 63 entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"] 64 65 66 setup(name="streamlink", 67 version=versioneer.get_version(), 68 cmdclass=versioneer.get_cmdclass(), 69 description="Streamlink is a command-line utility that extracts streams " 70 "from various services and pipes them into a video player of " 71 "choice.", 72 long_description=long_description, 73 long_description_content_type="text/markdown", 74 url="https://github.com/streamlink/streamlink", 75 project_urls={ 76 "Documentation": "https://streamlink.github.io/", 77 "Tracker": "https://github.com/streamlink/streamlink/issues", 78 "Source": "https://github.com/streamlink/streamlink", 79 "Funding": "https://opencollective.com/streamlink" 80 }, 81 author="Streamlink", 82 # temp until we have a mailing list / global email 83 author_email="[email protected]", 84 license="Simplified BSD", 85 packages=find_packages("src"), 86 package_dir={"": "src"}, 87 entry_points=entry_points, 88 install_requires=deps, 89 test_suite="tests", 90 python_requires=">=3.6, <4", 91 classifiers=["Development Status :: 5 - Production/Stable", 92 "License :: OSI Approved :: BSD License", 93 "Environment :: Console", 94 "Intended Audience :: End Users/Desktop", 95 "Operating System :: POSIX", 96 "Operating System :: Microsoft :: Windows", 97 "Operating System :: MacOS", 98 "Programming Language :: Python :: 3", 99 "Programming Language :: Python :: 3 :: Only", 100 "Programming Language :: Python :: 3.6", 101 "Programming Language :: Python :: 3.7", 102 "Programming Language :: Python :: 3.8", 103 "Programming Language :: Python :: 3.9", 104 "Topic :: Internet :: WWW/HTTP", 105 "Topic :: Multimedia :: Sound/Audio", 106 "Topic :: Multimedia :: Video", 107 "Topic :: Utilities"]) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/setup.py b/setup.py --- a/setup.py +++ b/setup.py @@ -8,6 +8,7 @@ import versioneer +data_files = [] deps = [ "requests>=2.21.0,<3.0", "isodate", @@ -63,6 +64,19 @@ entry_points["gui_scripts"] = ["streamlinkw=streamlink_cli.main:main"] +additional_files = [ + ("share/man/man1", ["docs/_build/man/streamlink.1"]) +] + +for destdir, srcfiles in additional_files: + files = [] + for srcfile in srcfiles: + if path.exists(srcfile): + files.append(srcfile) + if files: + data_files.append((destdir, files)) + + setup(name="streamlink", version=versioneer.get_version(), cmdclass=versioneer.get_cmdclass(), @@ -85,6 +99,7 @@ packages=find_packages("src"), package_dir={"": "src"}, entry_points=entry_points, + data_files=data_files, install_requires=deps, test_suite="tests", python_requires=">=3.6, <4",
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -8,6 +8,7 @@\n import versioneer\n \n \n+data_files = []\n deps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n@@ -63,6 +64,19 @@\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n \n \n+additional_files = [\n+ (\"share/man/man1\", [\"docs/_build/man/streamlink.1\"])\n+]\n+\n+for destdir, srcfiles in additional_files:\n+ files = []\n+ for srcfile in srcfiles:\n+ if path.exists(srcfile):\n+ files.append(srcfile)\n+ if files:\n+ data_files.append((destdir, files))\n+\n+\n setup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n@@ -85,6 +99,7 @@\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n+ data_files=data_files,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n", "issue": "No man page with pip install\n### Checklist\r\n\r\n- [ ] This is a bug report.\r\n- [x] This is a feature request.\r\n- [ ] This is a plugin (improvement) request.\r\n- [x] I have read the contribution guidelines.\r\n\r\n### Description\r\n\r\nWhen installing streamlink with pip, no man page gets installed\r\n\r\n### Expected / Actual behavior\r\n\r\na man page gets installed during installation of streamlink with pip\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n1. ``pip install --user streamlink``\r\n2. ``man streamlink``\r\n3. ``No manual entry for streamlink``\r\n4. I get the same results when using ``pip install streamlink``\r\n\r\n### Logs\r\n\r\n```\r\n[cli][debug] OS: Linux-4.13.0-43-generic-x86_64-with-Ubuntu-17.10-artful\r\n[cli][debug] Python: 3.6.3\r\n[cli][debug] Streamlink: 0.12.1\r\n[cli][debug] Requests(2.18.4), Socks(1.6.7), Websocket(0.47.0)\r\nusage: streamlink [OPTIONS] <URL> [STREAM]\r\n\r\n```\n", "before_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport codecs\nfrom os import environ, path\nfrom sys import argv, path as sys_path\n\nfrom setuptools import find_packages, setup\n\nimport versioneer\n\n\ndata_files = []\ndeps = [\n \"requests>=2.21.0,<3.0\",\n \"isodate\",\n \"websocket-client\",\n # Support for SOCKS proxies\n \"PySocks!=1.5.7,>=1.5.6\",\n]\n\n# for encrypted streams\nif environ.get(\"STREAMLINK_USE_PYCRYPTO\"):\n deps.append(\"pycrypto\")\nelse:\n # this version of pycryptodome is known to work and has a Windows wheel for py2.7, py3.3-3.6\n deps.append(\"pycryptodome>=3.4.3,<4\")\n\n# for localization\nif environ.get(\"STREAMLINK_USE_PYCOUNTRY\"):\n deps.append(\"pycountry\")\nelse:\n deps.append(\"iso-639\")\n deps.append(\"iso3166\")\n\n# When we build an egg for the Win32 bootstrap we don\"t want dependency\n# information built into it.\nif environ.get(\"NO_DEPS\"):\n deps = []\n\nthis_directory = path.abspath(path.dirname(__file__))\nsrcdir = path.join(this_directory, \"src/\")\nsys_path.insert(0, srcdir)\n\nwith codecs.open(path.join(this_directory, \"README.md\"), 'r', \"utf8\") as f:\n long_description = f.read()\n\n\ndef is_wheel_for_windows():\n if \"bdist_wheel\" in argv:\n names = [\"win32\", \"win-amd64\", \"cygwin\"]\n length = len(argv)\n for pos in range(argv.index(\"bdist_wheel\") + 1, length):\n if argv[pos] == \"--plat-name\" and pos + 1 < length:\n return argv[pos + 1] in names\n elif argv[pos][:12] == \"--plat-name=\":\n return argv[pos][12:] in names\n return False\n\n\nentry_points = {\n \"console_scripts\": [\"streamlink=streamlink_cli.main:main\"]\n}\n\nif is_wheel_for_windows():\n entry_points[\"gui_scripts\"] = [\"streamlinkw=streamlink_cli.main:main\"]\n\n\nadditional_files = [\n (\"share/man/man1\", [\"docs/_build/man/streamlink.1\"])\n]\n\nfor destdir, srcfiles in additional_files:\n files = []\n for srcfile in srcfiles:\n if path.exists(srcfile):\n files.append(srcfile)\n if files:\n data_files.append((destdir, files))\n\n\nsetup(name=\"streamlink\",\n version=versioneer.get_version(),\n cmdclass=versioneer.get_cmdclass(),\n description=\"Streamlink is a command-line utility that extracts streams \"\n \"from various services and pipes them into a video player of \"\n \"choice.\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/streamlink/streamlink\",\n project_urls={\n \"Documentation\": \"https://streamlink.github.io/\",\n \"Tracker\": \"https://github.com/streamlink/streamlink/issues\",\n \"Source\": \"https://github.com/streamlink/streamlink\",\n \"Funding\": \"https://opencollective.com/streamlink\"\n },\n author=\"Streamlink\",\n # temp until we have a mailing list / global email\n author_email=\"[email protected]\",\n license=\"Simplified BSD\",\n packages=find_packages(\"src\"),\n package_dir={\"\": \"src\"},\n entry_points=entry_points,\n data_files=data_files,\n install_requires=deps,\n test_suite=\"tests\",\n python_requires=\">=3.6, <4\",\n classifiers=[\"Development Status :: 5 - Production/Stable\",\n \"License :: OSI Approved :: BSD License\",\n \"Environment :: Console\",\n \"Intended Audience :: End Users/Desktop\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: MacOS\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Multimedia :: Sound/Audio\",\n \"Topic :: Multimedia :: Video\",\n \"Topic :: Utilities\"])\n", "path": "setup.py"}]}
1,631
272
gh_patches_debug_6747
rasdani/github-patches
git_diff
sanic-org__sanic-2770
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Circular import in target file accidentally triggers 'No module named ... found' ### Is there an existing issue for this? - [X] I have searched the existing issues ### Describe the bug While developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following: ``` ImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py) ``` In this case my module I pass to the sanic server is `app:app`, from within `/api`. ### Code snippet _No response_ ### Expected Behavior I had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import. It would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error). ### How do you run Sanic? Sanic CLI ### Operating System Windows (Docker, Python:3.11) ### Sanic Version 23.3 ### Additional context _No response_ Circular import in target file accidentally triggers 'No module named ... found' ### Is there an existing issue for this? - [X] I have searched the existing issues ### Describe the bug While developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following: ``` ImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py) ``` In this case my module I pass to the sanic server is `app:app`, from within `/api`. ### Code snippet _No response_ ### Expected Behavior I had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import. It would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error). ### How do you run Sanic? Sanic CLI ### Operating System Windows (Docker, Python:3.11) ### Sanic Version 23.3 ### Additional context _No response_ --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sanic/cli/app.py` Content: ``` 1 import os 2 import shutil 3 import sys 4 5 from argparse import Namespace 6 from functools import partial 7 from textwrap import indent 8 from typing import List, Union 9 10 from sanic.app import Sanic 11 from sanic.application.logo import get_logo 12 from sanic.cli.arguments import Group 13 from sanic.cli.base import SanicArgumentParser, SanicHelpFormatter 14 from sanic.cli.inspector import make_inspector_parser 15 from sanic.cli.inspector_client import InspectorClient 16 from sanic.log import error_logger 17 from sanic.worker.loader import AppLoader 18 19 20 class SanicCLI: 21 DESCRIPTION = indent( 22 f""" 23 {get_logo(True)} 24 25 To start running a Sanic application, provide a path to the module, where 26 app is a Sanic() instance in the global scope: 27 28 $ sanic path.to.server:app 29 30 If the Sanic instance variable is called 'app', you can leave off the last 31 part, and only provide a path to the module where the instance is: 32 33 $ sanic path.to.server 34 35 Or, a path to a callable that returns a Sanic() instance: 36 37 $ sanic path.to.factory:create_app 38 39 Or, a path to a directory to run as a simple HTTP server: 40 41 $ sanic ./path/to/static 42 """, 43 prefix=" ", 44 ) 45 46 def __init__(self) -> None: 47 width = shutil.get_terminal_size().columns 48 self.parser = SanicArgumentParser( 49 prog="sanic", 50 description=self.DESCRIPTION, 51 formatter_class=lambda prog: SanicHelpFormatter( 52 prog, 53 max_help_position=36 if width > 96 else 24, 54 indent_increment=4, 55 width=None, 56 ), 57 ) 58 self.parser._positionals.title = "Required\n========\n Positional" 59 self.parser._optionals.title = "Optional\n========\n General" 60 self.main_process = ( 61 os.environ.get("SANIC_RELOADER_PROCESS", "") != "true" 62 ) 63 self.args: Namespace = Namespace() 64 self.groups: List[Group] = [] 65 self.inspecting = False 66 67 def attach(self): 68 if len(sys.argv) > 1 and sys.argv[1] == "inspect": 69 self.inspecting = True 70 self.parser.description = get_logo(True) 71 make_inspector_parser(self.parser) 72 return 73 74 for group in Group._registry: 75 instance = group.create(self.parser) 76 instance.attach() 77 self.groups.append(instance) 78 79 def run(self, parse_args=None): 80 if self.inspecting: 81 self._inspector() 82 return 83 84 legacy_version = False 85 if not parse_args: 86 # This is to provide backwards compat -v to display version 87 legacy_version = len(sys.argv) == 2 and sys.argv[-1] == "-v" 88 parse_args = ["--version"] if legacy_version else None 89 elif parse_args == ["-v"]: 90 parse_args = ["--version"] 91 92 if not legacy_version: 93 parsed, unknown = self.parser.parse_known_args(args=parse_args) 94 if unknown and parsed.factory: 95 for arg in unknown: 96 if arg.startswith("--"): 97 self.parser.add_argument(arg.split("=")[0]) 98 99 self.args = self.parser.parse_args(args=parse_args) 100 self._precheck() 101 app_loader = AppLoader( 102 self.args.target, self.args.factory, self.args.simple, self.args 103 ) 104 105 try: 106 app = self._get_app(app_loader) 107 kwargs = self._build_run_kwargs() 108 except ValueError as e: 109 error_logger.exception(f"Failed to run app: {e}") 110 else: 111 for http_version in self.args.http: 112 app.prepare(**kwargs, version=http_version) 113 if self.args.single: 114 serve = Sanic.serve_single 115 else: 116 serve = partial(Sanic.serve, app_loader=app_loader) 117 serve(app) 118 119 def _inspector(self): 120 args = sys.argv[2:] 121 self.args, unknown = self.parser.parse_known_args(args=args) 122 if unknown: 123 for arg in unknown: 124 if arg.startswith("--"): 125 try: 126 key, value = arg.split("=") 127 key = key.lstrip("-") 128 except ValueError: 129 value = False if arg.startswith("--no-") else True 130 key = ( 131 arg.replace("--no-", "") 132 .lstrip("-") 133 .replace("-", "_") 134 ) 135 setattr(self.args, key, value) 136 137 kwargs = {**self.args.__dict__} 138 host = kwargs.pop("host") 139 port = kwargs.pop("port") 140 secure = kwargs.pop("secure") 141 raw = kwargs.pop("raw") 142 action = kwargs.pop("action") or "info" 143 api_key = kwargs.pop("api_key") 144 positional = kwargs.pop("positional", None) 145 if action == "<custom>" and positional: 146 action = positional[0] 147 if len(positional) > 1: 148 kwargs["args"] = positional[1:] 149 InspectorClient(host, port, secure, raw, api_key).do(action, **kwargs) 150 151 def _precheck(self): 152 # Custom TLS mismatch handling for better diagnostics 153 if self.main_process and ( 154 # one of cert/key missing 155 bool(self.args.cert) != bool(self.args.key) 156 # new and old style self.args used together 157 or self.args.tls 158 and self.args.cert 159 # strict host checking without certs would always fail 160 or self.args.tlshost 161 and not self.args.tls 162 and not self.args.cert 163 ): 164 self.parser.print_usage(sys.stderr) 165 message = ( 166 "TLS certificates must be specified by either of:\n" 167 " --cert certdir/fullchain.pem --key certdir/privkey.pem\n" 168 " --tls certdir (equivalent to the above)" 169 ) 170 error_logger.error(message) 171 sys.exit(1) 172 173 def _get_app(self, app_loader: AppLoader): 174 try: 175 app = app_loader.load() 176 except ImportError as e: 177 if app_loader.module_name.startswith(e.name): # type: ignore 178 error_logger.error( 179 f"No module named {e.name} found.\n" 180 " Example File: project/sanic_server.py -> app\n" 181 " Example Module: project.sanic_server.app" 182 ) 183 sys.exit(1) 184 else: 185 raise e 186 return app 187 188 def _build_run_kwargs(self): 189 for group in self.groups: 190 group.prepare(self.args) 191 ssl: Union[None, dict, str, list] = [] 192 if self.args.tlshost: 193 ssl.append(None) 194 if self.args.cert is not None or self.args.key is not None: 195 ssl.append(dict(cert=self.args.cert, key=self.args.key)) 196 if self.args.tls: 197 ssl += self.args.tls 198 if not ssl: 199 ssl = None 200 elif len(ssl) == 1 and ssl[0] is not None: 201 # Use only one cert, no TLSSelector. 202 ssl = ssl[0] 203 204 kwargs = { 205 "access_log": self.args.access_log, 206 "coffee": self.args.coffee, 207 "debug": self.args.debug, 208 "fast": self.args.fast, 209 "host": self.args.host, 210 "motd": self.args.motd, 211 "noisy_exceptions": self.args.noisy_exceptions, 212 "port": self.args.port, 213 "ssl": ssl, 214 "unix": self.args.unix, 215 "verbosity": self.args.verbosity or 0, 216 "workers": self.args.workers, 217 "auto_tls": self.args.auto_tls, 218 "single_process": self.args.single, 219 } 220 221 for maybe_arg in ("auto_reload", "dev"): 222 if getattr(self.args, maybe_arg, False): 223 kwargs[maybe_arg] = True 224 225 if self.args.path: 226 kwargs["auto_reload"] = True 227 kwargs["reload_dir"] = self.args.path 228 229 return kwargs 230 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sanic/cli/app.py b/sanic/cli/app.py --- a/sanic/cli/app.py +++ b/sanic/cli/app.py @@ -180,6 +180,10 @@ " Example File: project/sanic_server.py -> app\n" " Example Module: project.sanic_server.app" ) + error_logger.error( + "\nThe error below might have caused the above one:\n" + f"{e.msg}" + ) sys.exit(1) else: raise e
{"golden_diff": "diff --git a/sanic/cli/app.py b/sanic/cli/app.py\n--- a/sanic/cli/app.py\n+++ b/sanic/cli/app.py\n@@ -180,6 +180,10 @@\n \" Example File: project/sanic_server.py -> app\\n\"\n \" Example Module: project.sanic_server.app\"\n )\n+ error_logger.error(\n+ \"\\nThe error below might have caused the above one:\\n\"\n+ f\"{e.msg}\"\n+ )\n sys.exit(1)\n else:\n raise e\n", "issue": "Circular import in target file accidentally triggers 'No module named ... found'\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe the bug\n\nWhile developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following:\r\n```\r\nImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py)\r\n```\r\nIn this case my module I pass to the sanic server is `app:app`, from within `/api`.\n\n### Code snippet\n\n_No response_\n\n### Expected Behavior\n\nI had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import.\r\n\r\nIt would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error).\n\n### How do you run Sanic?\n\nSanic CLI\n\n### Operating System\n\nWindows (Docker, Python:3.11)\n\n### Sanic Version\n\n23.3\n\n### Additional context\n\n_No response_\nCircular import in target file accidentally triggers 'No module named ... found'\n### Is there an existing issue for this?\n\n- [X] I have searched the existing issues\n\n### Describe the bug\n\nWhile developing it appears that I accidentally caused a circular import which Python traces back to starting in the file I have my app in. As a result, Python outputs an error such as the following:\r\n```\r\nImportError: cannot import name 'constants' from partially initialized module 'app' (most likely due to a circular import) (/api/app/__init__.py)\r\n```\r\nIn this case my module I pass to the sanic server is `app:app`, from within `/api`.\n\n### Code snippet\n\n_No response_\n\n### Expected Behavior\n\nI had this in the back of my mind the entire time, but found it very difficult to troubleshoot due to Sanic swallowing the error. As a result I ended up the rabbit hole of accidental breaking changes and tried commenting out different changes. An hour later I finally found the right import.\r\n\r\nIt would help if Sanic continued to output the specific import error, on the off-chance that it isn't an incorrectly setup module. The alternative would be to use more fine-grained `importlib` and manually call some functions rather than use their help functions. As a result there should be a different call which finds the file (an `ImportError` here hints at an incorrectly setup module), than the one which loads it (user error).\n\n### How do you run Sanic?\n\nSanic CLI\n\n### Operating System\n\nWindows (Docker, Python:3.11)\n\n### Sanic Version\n\n23.3\n\n### Additional context\n\n_No response_\n", "before_files": [{"content": "import os\nimport shutil\nimport sys\n\nfrom argparse import Namespace\nfrom functools import partial\nfrom textwrap import indent\nfrom typing import List, Union\n\nfrom sanic.app import Sanic\nfrom sanic.application.logo import get_logo\nfrom sanic.cli.arguments import Group\nfrom sanic.cli.base import SanicArgumentParser, SanicHelpFormatter\nfrom sanic.cli.inspector import make_inspector_parser\nfrom sanic.cli.inspector_client import InspectorClient\nfrom sanic.log import error_logger\nfrom sanic.worker.loader import AppLoader\n\n\nclass SanicCLI:\n DESCRIPTION = indent(\n f\"\"\"\n{get_logo(True)}\n\nTo start running a Sanic application, provide a path to the module, where\napp is a Sanic() instance in the global scope:\n\n $ sanic path.to.server:app\n\nIf the Sanic instance variable is called 'app', you can leave off the last\npart, and only provide a path to the module where the instance is:\n\n $ sanic path.to.server\n\nOr, a path to a callable that returns a Sanic() instance:\n\n $ sanic path.to.factory:create_app\n\nOr, a path to a directory to run as a simple HTTP server:\n\n $ sanic ./path/to/static\n\"\"\",\n prefix=\" \",\n )\n\n def __init__(self) -> None:\n width = shutil.get_terminal_size().columns\n self.parser = SanicArgumentParser(\n prog=\"sanic\",\n description=self.DESCRIPTION,\n formatter_class=lambda prog: SanicHelpFormatter(\n prog,\n max_help_position=36 if width > 96 else 24,\n indent_increment=4,\n width=None,\n ),\n )\n self.parser._positionals.title = \"Required\\n========\\n Positional\"\n self.parser._optionals.title = \"Optional\\n========\\n General\"\n self.main_process = (\n os.environ.get(\"SANIC_RELOADER_PROCESS\", \"\") != \"true\"\n )\n self.args: Namespace = Namespace()\n self.groups: List[Group] = []\n self.inspecting = False\n\n def attach(self):\n if len(sys.argv) > 1 and sys.argv[1] == \"inspect\":\n self.inspecting = True\n self.parser.description = get_logo(True)\n make_inspector_parser(self.parser)\n return\n\n for group in Group._registry:\n instance = group.create(self.parser)\n instance.attach()\n self.groups.append(instance)\n\n def run(self, parse_args=None):\n if self.inspecting:\n self._inspector()\n return\n\n legacy_version = False\n if not parse_args:\n # This is to provide backwards compat -v to display version\n legacy_version = len(sys.argv) == 2 and sys.argv[-1] == \"-v\"\n parse_args = [\"--version\"] if legacy_version else None\n elif parse_args == [\"-v\"]:\n parse_args = [\"--version\"]\n\n if not legacy_version:\n parsed, unknown = self.parser.parse_known_args(args=parse_args)\n if unknown and parsed.factory:\n for arg in unknown:\n if arg.startswith(\"--\"):\n self.parser.add_argument(arg.split(\"=\")[0])\n\n self.args = self.parser.parse_args(args=parse_args)\n self._precheck()\n app_loader = AppLoader(\n self.args.target, self.args.factory, self.args.simple, self.args\n )\n\n try:\n app = self._get_app(app_loader)\n kwargs = self._build_run_kwargs()\n except ValueError as e:\n error_logger.exception(f\"Failed to run app: {e}\")\n else:\n for http_version in self.args.http:\n app.prepare(**kwargs, version=http_version)\n if self.args.single:\n serve = Sanic.serve_single\n else:\n serve = partial(Sanic.serve, app_loader=app_loader)\n serve(app)\n\n def _inspector(self):\n args = sys.argv[2:]\n self.args, unknown = self.parser.parse_known_args(args=args)\n if unknown:\n for arg in unknown:\n if arg.startswith(\"--\"):\n try:\n key, value = arg.split(\"=\")\n key = key.lstrip(\"-\")\n except ValueError:\n value = False if arg.startswith(\"--no-\") else True\n key = (\n arg.replace(\"--no-\", \"\")\n .lstrip(\"-\")\n .replace(\"-\", \"_\")\n )\n setattr(self.args, key, value)\n\n kwargs = {**self.args.__dict__}\n host = kwargs.pop(\"host\")\n port = kwargs.pop(\"port\")\n secure = kwargs.pop(\"secure\")\n raw = kwargs.pop(\"raw\")\n action = kwargs.pop(\"action\") or \"info\"\n api_key = kwargs.pop(\"api_key\")\n positional = kwargs.pop(\"positional\", None)\n if action == \"<custom>\" and positional:\n action = positional[0]\n if len(positional) > 1:\n kwargs[\"args\"] = positional[1:]\n InspectorClient(host, port, secure, raw, api_key).do(action, **kwargs)\n\n def _precheck(self):\n # Custom TLS mismatch handling for better diagnostics\n if self.main_process and (\n # one of cert/key missing\n bool(self.args.cert) != bool(self.args.key)\n # new and old style self.args used together\n or self.args.tls\n and self.args.cert\n # strict host checking without certs would always fail\n or self.args.tlshost\n and not self.args.tls\n and not self.args.cert\n ):\n self.parser.print_usage(sys.stderr)\n message = (\n \"TLS certificates must be specified by either of:\\n\"\n \" --cert certdir/fullchain.pem --key certdir/privkey.pem\\n\"\n \" --tls certdir (equivalent to the above)\"\n )\n error_logger.error(message)\n sys.exit(1)\n\n def _get_app(self, app_loader: AppLoader):\n try:\n app = app_loader.load()\n except ImportError as e:\n if app_loader.module_name.startswith(e.name): # type: ignore\n error_logger.error(\n f\"No module named {e.name} found.\\n\"\n \" Example File: project/sanic_server.py -> app\\n\"\n \" Example Module: project.sanic_server.app\"\n )\n sys.exit(1)\n else:\n raise e\n return app\n\n def _build_run_kwargs(self):\n for group in self.groups:\n group.prepare(self.args)\n ssl: Union[None, dict, str, list] = []\n if self.args.tlshost:\n ssl.append(None)\n if self.args.cert is not None or self.args.key is not None:\n ssl.append(dict(cert=self.args.cert, key=self.args.key))\n if self.args.tls:\n ssl += self.args.tls\n if not ssl:\n ssl = None\n elif len(ssl) == 1 and ssl[0] is not None:\n # Use only one cert, no TLSSelector.\n ssl = ssl[0]\n\n kwargs = {\n \"access_log\": self.args.access_log,\n \"coffee\": self.args.coffee,\n \"debug\": self.args.debug,\n \"fast\": self.args.fast,\n \"host\": self.args.host,\n \"motd\": self.args.motd,\n \"noisy_exceptions\": self.args.noisy_exceptions,\n \"port\": self.args.port,\n \"ssl\": ssl,\n \"unix\": self.args.unix,\n \"verbosity\": self.args.verbosity or 0,\n \"workers\": self.args.workers,\n \"auto_tls\": self.args.auto_tls,\n \"single_process\": self.args.single,\n }\n\n for maybe_arg in (\"auto_reload\", \"dev\"):\n if getattr(self.args, maybe_arg, False):\n kwargs[maybe_arg] = True\n\n if self.args.path:\n kwargs[\"auto_reload\"] = True\n kwargs[\"reload_dir\"] = self.args.path\n\n return kwargs\n", "path": "sanic/cli/app.py"}], "after_files": [{"content": "import os\nimport shutil\nimport sys\n\nfrom argparse import Namespace\nfrom functools import partial\nfrom textwrap import indent\nfrom typing import List, Union\n\nfrom sanic.app import Sanic\nfrom sanic.application.logo import get_logo\nfrom sanic.cli.arguments import Group\nfrom sanic.cli.base import SanicArgumentParser, SanicHelpFormatter\nfrom sanic.cli.inspector import make_inspector_parser\nfrom sanic.cli.inspector_client import InspectorClient\nfrom sanic.log import error_logger\nfrom sanic.worker.loader import AppLoader\n\n\nclass SanicCLI:\n DESCRIPTION = indent(\n f\"\"\"\n{get_logo(True)}\n\nTo start running a Sanic application, provide a path to the module, where\napp is a Sanic() instance in the global scope:\n\n $ sanic path.to.server:app\n\nIf the Sanic instance variable is called 'app', you can leave off the last\npart, and only provide a path to the module where the instance is:\n\n $ sanic path.to.server\n\nOr, a path to a callable that returns a Sanic() instance:\n\n $ sanic path.to.factory:create_app\n\nOr, a path to a directory to run as a simple HTTP server:\n\n $ sanic ./path/to/static\n\"\"\",\n prefix=\" \",\n )\n\n def __init__(self) -> None:\n width = shutil.get_terminal_size().columns\n self.parser = SanicArgumentParser(\n prog=\"sanic\",\n description=self.DESCRIPTION,\n formatter_class=lambda prog: SanicHelpFormatter(\n prog,\n max_help_position=36 if width > 96 else 24,\n indent_increment=4,\n width=None,\n ),\n )\n self.parser._positionals.title = \"Required\\n========\\n Positional\"\n self.parser._optionals.title = \"Optional\\n========\\n General\"\n self.main_process = (\n os.environ.get(\"SANIC_RELOADER_PROCESS\", \"\") != \"true\"\n )\n self.args: Namespace = Namespace()\n self.groups: List[Group] = []\n self.inspecting = False\n\n def attach(self):\n if len(sys.argv) > 1 and sys.argv[1] == \"inspect\":\n self.inspecting = True\n self.parser.description = get_logo(True)\n make_inspector_parser(self.parser)\n return\n\n for group in Group._registry:\n instance = group.create(self.parser)\n instance.attach()\n self.groups.append(instance)\n\n def run(self, parse_args=None):\n if self.inspecting:\n self._inspector()\n return\n\n legacy_version = False\n if not parse_args:\n # This is to provide backwards compat -v to display version\n legacy_version = len(sys.argv) == 2 and sys.argv[-1] == \"-v\"\n parse_args = [\"--version\"] if legacy_version else None\n elif parse_args == [\"-v\"]:\n parse_args = [\"--version\"]\n\n if not legacy_version:\n parsed, unknown = self.parser.parse_known_args(args=parse_args)\n if unknown and parsed.factory:\n for arg in unknown:\n if arg.startswith(\"--\"):\n self.parser.add_argument(arg.split(\"=\")[0])\n\n self.args = self.parser.parse_args(args=parse_args)\n self._precheck()\n app_loader = AppLoader(\n self.args.target, self.args.factory, self.args.simple, self.args\n )\n\n try:\n app = self._get_app(app_loader)\n kwargs = self._build_run_kwargs()\n except ValueError as e:\n error_logger.exception(f\"Failed to run app: {e}\")\n else:\n for http_version in self.args.http:\n app.prepare(**kwargs, version=http_version)\n if self.args.single:\n serve = Sanic.serve_single\n else:\n serve = partial(Sanic.serve, app_loader=app_loader)\n serve(app)\n\n def _inspector(self):\n args = sys.argv[2:]\n self.args, unknown = self.parser.parse_known_args(args=args)\n if unknown:\n for arg in unknown:\n if arg.startswith(\"--\"):\n try:\n key, value = arg.split(\"=\")\n key = key.lstrip(\"-\")\n except ValueError:\n value = False if arg.startswith(\"--no-\") else True\n key = (\n arg.replace(\"--no-\", \"\")\n .lstrip(\"-\")\n .replace(\"-\", \"_\")\n )\n setattr(self.args, key, value)\n\n kwargs = {**self.args.__dict__}\n host = kwargs.pop(\"host\")\n port = kwargs.pop(\"port\")\n secure = kwargs.pop(\"secure\")\n raw = kwargs.pop(\"raw\")\n action = kwargs.pop(\"action\") or \"info\"\n api_key = kwargs.pop(\"api_key\")\n positional = kwargs.pop(\"positional\", None)\n if action == \"<custom>\" and positional:\n action = positional[0]\n if len(positional) > 1:\n kwargs[\"args\"] = positional[1:]\n InspectorClient(host, port, secure, raw, api_key).do(action, **kwargs)\n\n def _precheck(self):\n # Custom TLS mismatch handling for better diagnostics\n if self.main_process and (\n # one of cert/key missing\n bool(self.args.cert) != bool(self.args.key)\n # new and old style self.args used together\n or self.args.tls\n and self.args.cert\n # strict host checking without certs would always fail\n or self.args.tlshost\n and not self.args.tls\n and not self.args.cert\n ):\n self.parser.print_usage(sys.stderr)\n message = (\n \"TLS certificates must be specified by either of:\\n\"\n \" --cert certdir/fullchain.pem --key certdir/privkey.pem\\n\"\n \" --tls certdir (equivalent to the above)\"\n )\n error_logger.error(message)\n sys.exit(1)\n\n def _get_app(self, app_loader: AppLoader):\n try:\n app = app_loader.load()\n except ImportError as e:\n if app_loader.module_name.startswith(e.name): # type: ignore\n error_logger.error(\n f\"No module named {e.name} found.\\n\"\n \" Example File: project/sanic_server.py -> app\\n\"\n \" Example Module: project.sanic_server.app\"\n )\n error_logger.error(\n \"\\nThe error below might have caused the above one:\\n\"\n f\"{e.msg}\"\n )\n sys.exit(1)\n else:\n raise e\n return app\n\n def _build_run_kwargs(self):\n for group in self.groups:\n group.prepare(self.args)\n ssl: Union[None, dict, str, list] = []\n if self.args.tlshost:\n ssl.append(None)\n if self.args.cert is not None or self.args.key is not None:\n ssl.append(dict(cert=self.args.cert, key=self.args.key))\n if self.args.tls:\n ssl += self.args.tls\n if not ssl:\n ssl = None\n elif len(ssl) == 1 and ssl[0] is not None:\n # Use only one cert, no TLSSelector.\n ssl = ssl[0]\n\n kwargs = {\n \"access_log\": self.args.access_log,\n \"coffee\": self.args.coffee,\n \"debug\": self.args.debug,\n \"fast\": self.args.fast,\n \"host\": self.args.host,\n \"motd\": self.args.motd,\n \"noisy_exceptions\": self.args.noisy_exceptions,\n \"port\": self.args.port,\n \"ssl\": ssl,\n \"unix\": self.args.unix,\n \"verbosity\": self.args.verbosity or 0,\n \"workers\": self.args.workers,\n \"auto_tls\": self.args.auto_tls,\n \"single_process\": self.args.single,\n }\n\n for maybe_arg in (\"auto_reload\", \"dev\"):\n if getattr(self.args, maybe_arg, False):\n kwargs[maybe_arg] = True\n\n if self.args.path:\n kwargs[\"auto_reload\"] = True\n kwargs[\"reload_dir\"] = self.args.path\n\n return kwargs\n", "path": "sanic/cli/app.py"}]}
3,251
120
gh_patches_debug_25782
rasdani/github-patches
git_diff
archlinux__archinstall-1851
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Incorrect paths used for steps in keyfile creation From issue #1828 opened by @Reflux0301 ### keyfile path https://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L190-L191 ❌ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_/.key</code> ✔️ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_.key</code> ### mkdir https://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L201 ❌ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_</code> ✔️ <code><i>target_path</i>/etc/cryptsetup-keys.d</code> ### chmod https://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L206 ❌ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_</code> ✔️ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_.key</code> ### crypttab https://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L209 ❌ <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_/.key</code> ✔️ <code>/etc/cryptsetup-keys.d/_name_.key</code> ### References - https://wiki.archlinux.org/title/Dm-crypt/System_configuration#Unlocking_with_a_keyfile - https://wiki.archlinux.org/title/Dm-crypt/Device_encryption#passphrase --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `archinstall/lib/luks.py` Content: ``` 1 from __future__ import annotations 2 3 import shlex 4 import time 5 from dataclasses import dataclass 6 from pathlib import Path 7 from typing import Optional, List 8 9 from . import disk 10 from .general import SysCommand, generate_password, SysCommandWorker 11 from .output import info, debug 12 from .exceptions import SysCallError, DiskError 13 from .storage import storage 14 15 16 @dataclass 17 class Luks2: 18 luks_dev_path: Path 19 mapper_name: Optional[str] = None 20 password: Optional[str] = None 21 key_file: Optional[Path] = None 22 auto_unmount: bool = False 23 24 # will be set internally after unlocking the device 25 _mapper_dev: Optional[Path] = None 26 27 @property 28 def mapper_dev(self) -> Optional[Path]: 29 if self.mapper_name: 30 return Path(f'/dev/mapper/{self.mapper_name}') 31 return None 32 33 def __post_init__(self): 34 if self.luks_dev_path is None: 35 raise ValueError('Partition must have a path set') 36 37 def __enter__(self): 38 self.unlock(self.key_file) 39 40 def __exit__(self, *args: str, **kwargs: str): 41 if self.auto_unmount: 42 self.lock() 43 44 def _default_key_file(self) -> Path: 45 return Path(f'/tmp/{self.luks_dev_path.name}.disk_pw') 46 47 def _password_bytes(self) -> bytes: 48 if not self.password: 49 raise ValueError('Password for luks2 device was not specified') 50 51 if isinstance(self.password, bytes): 52 return self.password 53 else: 54 return bytes(self.password, 'UTF-8') 55 56 def encrypt( 57 self, 58 key_size: int = 512, 59 hash_type: str = 'sha512', 60 iter_time: int = 10000, 61 key_file: Optional[Path] = None 62 ) -> Path: 63 info(f'Luks2 encrypting: {self.luks_dev_path}') 64 65 byte_password = self._password_bytes() 66 67 if not key_file: 68 if self.key_file: 69 key_file = self.key_file 70 else: 71 key_file = self._default_key_file() 72 73 with open(key_file, 'wb') as fh: 74 fh.write(byte_password) 75 76 cryptsetup_args = shlex.join([ 77 '/usr/bin/cryptsetup', 78 '--batch-mode', 79 '--verbose', 80 '--type', 'luks2', 81 '--pbkdf', 'argon2id', 82 '--hash', hash_type, 83 '--key-size', str(key_size), 84 '--iter-time', str(iter_time), 85 '--key-file', str(key_file), 86 '--use-urandom', 87 'luksFormat', str(self.luks_dev_path), 88 ]) 89 90 # Retry formatting the volume because archinstall can some times be too quick 91 # which generates a "Device /dev/sdX does not exist or access denied." between 92 # setting up partitions and us trying to encrypt it. 93 for retry_attempt in range(storage['DISK_RETRY_ATTEMPTS']): 94 try: 95 SysCommand(cryptsetup_args) 96 break 97 except SysCallError as err: 98 time.sleep(storage['DISK_TIMEOUTS']) 99 100 if retry_attempt != storage['DISK_RETRY_ATTEMPTS'] - 1: 101 continue 102 103 if err.exit_code == 1: 104 info(f'luks2 partition currently in use: {self.luks_dev_path}') 105 info('Attempting to unmount, crypt-close and trying encryption again') 106 107 self.lock() 108 # Then try again to set up the crypt-device 109 SysCommand(cryptsetup_args) 110 else: 111 raise DiskError(f'Could not encrypt volume "{self.luks_dev_path}": {err}') 112 113 return key_file 114 115 def _get_luks_uuid(self) -> str: 116 command = f'/usr/bin/cryptsetup luksUUID {self.luks_dev_path}' 117 118 try: 119 return SysCommand(command).decode().strip() # type: ignore 120 except SysCallError as err: 121 info(f'Unable to get UUID for Luks device: {self.luks_dev_path}') 122 raise err 123 124 def is_unlocked(self) -> bool: 125 return self.mapper_name is not None and Path(f'/dev/mapper/{self.mapper_name}').exists() 126 127 def unlock(self, key_file: Optional[Path] = None): 128 """ 129 Unlocks the luks device, an optional key file location for unlocking can be specified, 130 otherwise a default location for the key file will be used. 131 132 :param key_file: An alternative key file 133 :type key_file: Path 134 """ 135 debug(f'Unlocking luks2 device: {self.luks_dev_path}') 136 137 if not self.mapper_name: 138 raise ValueError('mapper name missing') 139 140 byte_password = self._password_bytes() 141 142 if not key_file: 143 if self.key_file: 144 key_file = self.key_file 145 else: 146 key_file = self._default_key_file() 147 148 with open(key_file, 'wb') as fh: 149 fh.write(byte_password) 150 151 wait_timer = time.time() 152 while Path(self.luks_dev_path).exists() is False and time.time() - wait_timer < 10: 153 time.sleep(0.025) 154 155 SysCommand(f'/usr/bin/cryptsetup open {self.luks_dev_path} {self.mapper_name} --key-file {key_file} --type luks2') 156 157 if not self.mapper_dev or not self.mapper_dev.is_symlink(): 158 raise DiskError(f'Failed to open luks2 device: {self.luks_dev_path}') 159 160 def lock(self): 161 disk.device_handler.umount(self.luks_dev_path) 162 163 # Get crypt-information about the device by doing a reverse lookup starting with the partition path 164 # For instance: /dev/sda 165 disk.device_handler.partprobe(self.luks_dev_path) 166 lsblk_info = disk.get_lsblk_info(self.luks_dev_path) 167 168 # For each child (sub-partition/sub-device) 169 for child in lsblk_info.children: 170 # Unmount the child location 171 for mountpoint in child.mountpoints: 172 debug(f'Unmounting {mountpoint}') 173 disk.device_handler.umount(mountpoint, recursive=True) 174 175 # And close it if possible. 176 debug(f"Closing crypt device {child.name}") 177 SysCommand(f"cryptsetup close {child.name}") 178 179 self._mapper_dev = None 180 181 def create_keyfile(self, target_path: Path, override: bool = False): 182 """ 183 Routine to create keyfiles, so it can be moved elsewhere 184 """ 185 if self.mapper_name is None: 186 raise ValueError('Mapper name must be provided') 187 188 # Once we store the key as ../xyzloop.key systemd-cryptsetup can 189 # automatically load this key if we name the device to "xyzloop" 190 key_file_path = target_path / 'etc/cryptsetup-keys.d/' / self.mapper_name 191 key_file = key_file_path / '.key' 192 crypttab_path = target_path / 'etc/crypttab' 193 194 if key_file.exists(): 195 if not override: 196 info(f'Key file {key_file} already exists, keeping existing') 197 return 198 else: 199 info(f'Key file {key_file} already exists, overriding') 200 201 key_file_path.mkdir(parents=True, exist_ok=True) 202 203 with open(key_file, "w") as keyfile: 204 keyfile.write(generate_password(length=512)) 205 206 key_file_path.chmod(0o400) 207 208 self._add_key(key_file) 209 self._crypttab(crypttab_path, key_file, options=["luks", "key-slot=1"]) 210 211 def _add_key(self, key_file: Path): 212 info(f'Adding additional key-file {key_file}') 213 214 command = f'/usr/bin/cryptsetup -q -v luksAddKey {self.luks_dev_path} {key_file}' 215 worker = SysCommandWorker(command, environment_vars={'LC_ALL': 'C'}) 216 pw_injected = False 217 218 while worker.is_alive(): 219 if b'Enter any existing passphrase' in worker and pw_injected is False: 220 worker.write(self._password_bytes()) 221 pw_injected = True 222 223 if worker.exit_code != 0: 224 raise DiskError(f'Could not add encryption key {key_file} to {self.luks_dev_path}: {worker.decode()}') 225 226 def _crypttab( 227 self, 228 crypttab_path: Path, 229 key_file: Path, 230 options: List[str] 231 ) -> None: 232 info(f'Adding crypttab entry for key {key_file}') 233 234 with open(crypttab_path, 'a') as crypttab: 235 opt = ','.join(options) 236 uuid = self._get_luks_uuid() 237 row = f"{self.mapper_name} UUID={uuid} {key_file} {opt}\n" 238 crypttab.write(row) 239 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py --- a/archinstall/lib/luks.py +++ b/archinstall/lib/luks.py @@ -187,8 +187,8 @@ # Once we store the key as ../xyzloop.key systemd-cryptsetup can # automatically load this key if we name the device to "xyzloop" - key_file_path = target_path / 'etc/cryptsetup-keys.d/' / self.mapper_name - key_file = key_file_path / '.key' + kf_path = Path(f'/etc/cryptsetup-keys.d/{self.mapper_name}.key') + key_file = target_path / kf_path.relative_to(kf_path.root) crypttab_path = target_path / 'etc/crypttab' if key_file.exists(): @@ -198,15 +198,15 @@ else: info(f'Key file {key_file} already exists, overriding') - key_file_path.mkdir(parents=True, exist_ok=True) + key_file.parent.mkdir(parents=True, exist_ok=True) with open(key_file, "w") as keyfile: keyfile.write(generate_password(length=512)) - key_file_path.chmod(0o400) + key_file.chmod(0o400) self._add_key(key_file) - self._crypttab(crypttab_path, key_file, options=["luks", "key-slot=1"]) + self._crypttab(crypttab_path, kf_path, options=["luks", "key-slot=1"]) def _add_key(self, key_file: Path): info(f'Adding additional key-file {key_file}')
{"golden_diff": "diff --git a/archinstall/lib/luks.py b/archinstall/lib/luks.py\n--- a/archinstall/lib/luks.py\n+++ b/archinstall/lib/luks.py\n@@ -187,8 +187,8 @@\n \n \t\t# Once we store the key as ../xyzloop.key systemd-cryptsetup can\n \t\t# automatically load this key if we name the device to \"xyzloop\"\n-\t\tkey_file_path = target_path / 'etc/cryptsetup-keys.d/' / self.mapper_name\n-\t\tkey_file = key_file_path / '.key'\n+\t\tkf_path = Path(f'/etc/cryptsetup-keys.d/{self.mapper_name}.key')\n+\t\tkey_file = target_path / kf_path.relative_to(kf_path.root)\n \t\tcrypttab_path = target_path / 'etc/crypttab'\n \n \t\tif key_file.exists():\n@@ -198,15 +198,15 @@\n \t\t\telse:\n \t\t\t\tinfo(f'Key file {key_file} already exists, overriding')\n \n-\t\tkey_file_path.mkdir(parents=True, exist_ok=True)\n+\t\tkey_file.parent.mkdir(parents=True, exist_ok=True)\n \n \t\twith open(key_file, \"w\") as keyfile:\n \t\t\tkeyfile.write(generate_password(length=512))\n \n-\t\tkey_file_path.chmod(0o400)\n+\t\tkey_file.chmod(0o400)\n \n \t\tself._add_key(key_file)\n-\t\tself._crypttab(crypttab_path, key_file, options=[\"luks\", \"key-slot=1\"])\n+\t\tself._crypttab(crypttab_path, kf_path, options=[\"luks\", \"key-slot=1\"])\n \n \tdef _add_key(self, key_file: Path):\n \t\tinfo(f'Adding additional key-file {key_file}')\n", "issue": "Incorrect paths used for steps in keyfile creation\nFrom issue #1828 opened by @Reflux0301\r\n\r\n### keyfile path\r\n\r\nhttps://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L190-L191\r\n\r\n\u274c <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_/.key</code>\r\n\u2714\ufe0f <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_.key</code>\r\n\r\n### mkdir\r\n\r\nhttps://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L201\r\n\r\n\u274c <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_</code>\r\n\u2714\ufe0f <code><i>target_path</i>/etc/cryptsetup-keys.d</code>\r\n### chmod\r\n\r\nhttps://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L206\r\n\r\n\u274c <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_</code>\r\n\u2714\ufe0f <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_.key</code>\r\n\r\n### crypttab\r\n\r\nhttps://github.com/archlinux/archinstall/blob/bc4f80441f4c4c9ba084d4cba29cfaedf30b5490/archinstall/lib/luks.py#L209\r\n\r\n\u274c <code><i>target_path</i>/etc/cryptsetup-keys.d/_name_/.key</code>\r\n\u2714\ufe0f <code>/etc/cryptsetup-keys.d/_name_.key</code>\r\n\r\n### References\r\n\r\n- https://wiki.archlinux.org/title/Dm-crypt/System_configuration#Unlocking_with_a_keyfile\r\n- https://wiki.archlinux.org/title/Dm-crypt/Device_encryption#passphrase\n", "before_files": [{"content": "from __future__ import annotations\n\nimport shlex\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional, List\n\nfrom . import disk\nfrom .general import SysCommand, generate_password, SysCommandWorker\nfrom .output import info, debug\nfrom .exceptions import SysCallError, DiskError\nfrom .storage import storage\n\n\n@dataclass\nclass Luks2:\n\tluks_dev_path: Path\n\tmapper_name: Optional[str] = None\n\tpassword: Optional[str] = None\n\tkey_file: Optional[Path] = None\n\tauto_unmount: bool = False\n\n\t# will be set internally after unlocking the device\n\t_mapper_dev: Optional[Path] = None\n\n\t@property\n\tdef mapper_dev(self) -> Optional[Path]:\n\t\tif self.mapper_name:\n\t\t\treturn Path(f'/dev/mapper/{self.mapper_name}')\n\t\treturn None\n\n\tdef __post_init__(self):\n\t\tif self.luks_dev_path is None:\n\t\t\traise ValueError('Partition must have a path set')\n\n\tdef __enter__(self):\n\t\tself.unlock(self.key_file)\n\n\tdef __exit__(self, *args: str, **kwargs: str):\n\t\tif self.auto_unmount:\n\t\t\tself.lock()\n\n\tdef _default_key_file(self) -> Path:\n\t\treturn Path(f'/tmp/{self.luks_dev_path.name}.disk_pw')\n\n\tdef _password_bytes(self) -> bytes:\n\t\tif not self.password:\n\t\t\traise ValueError('Password for luks2 device was not specified')\n\n\t\tif isinstance(self.password, bytes):\n\t\t\treturn self.password\n\t\telse:\n\t\t\treturn bytes(self.password, 'UTF-8')\n\n\tdef encrypt(\n\t\tself,\n\t\tkey_size: int = 512,\n\t\thash_type: str = 'sha512',\n\t\titer_time: int = 10000,\n\t\tkey_file: Optional[Path] = None\n\t) -> Path:\n\t\tinfo(f'Luks2 encrypting: {self.luks_dev_path}')\n\n\t\tbyte_password = self._password_bytes()\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = self._default_key_file()\n\n\t\t\t\twith open(key_file, 'wb') as fh:\n\t\t\t\t\tfh.write(byte_password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2id',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', str(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', str(self.luks_dev_path),\n\t\t])\n\n\t\t# Retry formatting the volume because archinstall can some times be too quick\n\t\t# which generates a \"Device /dev/sdX does not exist or access denied.\" between\n\t\t# setting up partitions and us trying to encrypt it.\n\t\tfor retry_attempt in range(storage['DISK_RETRY_ATTEMPTS']):\n\t\t\ttry:\n\t\t\t\tSysCommand(cryptsetup_args)\n\t\t\t\tbreak\n\t\t\texcept SysCallError as err:\n\t\t\t\ttime.sleep(storage['DISK_TIMEOUTS'])\n\n\t\t\t\tif retry_attempt != storage['DISK_RETRY_ATTEMPTS'] - 1:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif err.exit_code == 1:\n\t\t\t\t\tinfo(f'luks2 partition currently in use: {self.luks_dev_path}')\n\t\t\t\t\tinfo('Attempting to unmount, crypt-close and trying encryption again')\n\n\t\t\t\t\tself.lock()\n\t\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\t\tSysCommand(cryptsetup_args)\n\t\t\t\telse:\n\t\t\t\t\traise DiskError(f'Could not encrypt volume \"{self.luks_dev_path}\": {err}')\n\n\t\treturn key_file\n\n\tdef _get_luks_uuid(self) -> str:\n\t\tcommand = f'/usr/bin/cryptsetup luksUUID {self.luks_dev_path}'\n\n\t\ttry:\n\t\t\treturn SysCommand(command).decode().strip() # type: ignore\n\t\texcept SysCallError as err:\n\t\t\tinfo(f'Unable to get UUID for Luks device: {self.luks_dev_path}')\n\t\t\traise err\n\n\tdef is_unlocked(self) -> bool:\n\t\treturn self.mapper_name is not None and Path(f'/dev/mapper/{self.mapper_name}').exists()\n\n\tdef unlock(self, key_file: Optional[Path] = None):\n\t\t\"\"\"\n\t\tUnlocks the luks device, an optional key file location for unlocking can be specified,\n\t\totherwise a default location for the key file will be used.\n\n\t\t:param key_file: An alternative key file\n\t\t:type key_file: Path\n\t\t\"\"\"\n\t\tdebug(f'Unlocking luks2 device: {self.luks_dev_path}')\n\n\t\tif not self.mapper_name:\n\t\t\traise ValueError('mapper name missing')\n\n\t\tbyte_password = self._password_bytes()\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = self._default_key_file()\n\n\t\t\t\twith open(key_file, 'wb') as fh:\n\t\t\t\t\tfh.write(byte_password)\n\n\t\twait_timer = time.time()\n\t\twhile Path(self.luks_dev_path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tSysCommand(f'/usr/bin/cryptsetup open {self.luks_dev_path} {self.mapper_name} --key-file {key_file} --type luks2')\n\n\t\tif not self.mapper_dev or not self.mapper_dev.is_symlink():\n\t\t\traise DiskError(f'Failed to open luks2 device: {self.luks_dev_path}')\n\n\tdef lock(self):\n\t\tdisk.device_handler.umount(self.luks_dev_path)\n\n\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t# For instance: /dev/sda\n\t\tdisk.device_handler.partprobe(self.luks_dev_path)\n\t\tlsblk_info = disk.get_lsblk_info(self.luks_dev_path)\n\n\t\t# For each child (sub-partition/sub-device)\n\t\tfor child in lsblk_info.children:\n\t\t\t# Unmount the child location\n\t\t\tfor mountpoint in child.mountpoints:\n\t\t\t\tdebug(f'Unmounting {mountpoint}')\n\t\t\t\tdisk.device_handler.umount(mountpoint, recursive=True)\n\n\t\t\t# And close it if possible.\n\t\t\tdebug(f\"Closing crypt device {child.name}\")\n\t\t\tSysCommand(f\"cryptsetup close {child.name}\")\n\n\t\tself._mapper_dev = None\n\n\tdef create_keyfile(self, target_path: Path, override: bool = False):\n\t\t\"\"\"\n\t\tRoutine to create keyfiles, so it can be moved elsewhere\n\t\t\"\"\"\n\t\tif self.mapper_name is None:\n\t\t\traise ValueError('Mapper name must be provided')\n\n\t\t# Once we store the key as ../xyzloop.key systemd-cryptsetup can\n\t\t# automatically load this key if we name the device to \"xyzloop\"\n\t\tkey_file_path = target_path / 'etc/cryptsetup-keys.d/' / self.mapper_name\n\t\tkey_file = key_file_path / '.key'\n\t\tcrypttab_path = target_path / 'etc/crypttab'\n\n\t\tif key_file.exists():\n\t\t\tif not override:\n\t\t\t\tinfo(f'Key file {key_file} already exists, keeping existing')\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tinfo(f'Key file {key_file} already exists, overriding')\n\n\t\tkey_file_path.mkdir(parents=True, exist_ok=True)\n\n\t\twith open(key_file, \"w\") as keyfile:\n\t\t\tkeyfile.write(generate_password(length=512))\n\n\t\tkey_file_path.chmod(0o400)\n\n\t\tself._add_key(key_file)\n\t\tself._crypttab(crypttab_path, key_file, options=[\"luks\", \"key-slot=1\"])\n\n\tdef _add_key(self, key_file: Path):\n\t\tinfo(f'Adding additional key-file {key_file}')\n\n\t\tcommand = f'/usr/bin/cryptsetup -q -v luksAddKey {self.luks_dev_path} {key_file}'\n\t\tworker = SysCommandWorker(command, environment_vars={'LC_ALL': 'C'})\n\t\tpw_injected = False\n\n\t\twhile worker.is_alive():\n\t\t\tif b'Enter any existing passphrase' in worker and pw_injected is False:\n\t\t\t\tworker.write(self._password_bytes())\n\t\t\t\tpw_injected = True\n\n\t\tif worker.exit_code != 0:\n\t\t\traise DiskError(f'Could not add encryption key {key_file} to {self.luks_dev_path}: {worker.decode()}')\n\n\tdef _crypttab(\n\t\tself,\n\t\tcrypttab_path: Path,\n\t\tkey_file: Path,\n\t\toptions: List[str]\n\t) -> None:\n\t\tinfo(f'Adding crypttab entry for key {key_file}')\n\n\t\twith open(crypttab_path, 'a') as crypttab:\n\t\t\topt = ','.join(options)\n\t\t\tuuid = self._get_luks_uuid()\n\t\t\trow = f\"{self.mapper_name} UUID={uuid} {key_file} {opt}\\n\"\n\t\t\tcrypttab.write(row)\n", "path": "archinstall/lib/luks.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport shlex\nimport time\nfrom dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Optional, List\n\nfrom . import disk\nfrom .general import SysCommand, generate_password, SysCommandWorker\nfrom .output import info, debug\nfrom .exceptions import SysCallError, DiskError\nfrom .storage import storage\n\n\n@dataclass\nclass Luks2:\n\tluks_dev_path: Path\n\tmapper_name: Optional[str] = None\n\tpassword: Optional[str] = None\n\tkey_file: Optional[Path] = None\n\tauto_unmount: bool = False\n\n\t# will be set internally after unlocking the device\n\t_mapper_dev: Optional[Path] = None\n\n\t@property\n\tdef mapper_dev(self) -> Optional[Path]:\n\t\tif self.mapper_name:\n\t\t\treturn Path(f'/dev/mapper/{self.mapper_name}')\n\t\treturn None\n\n\tdef __post_init__(self):\n\t\tif self.luks_dev_path is None:\n\t\t\traise ValueError('Partition must have a path set')\n\n\tdef __enter__(self):\n\t\tself.unlock(self.key_file)\n\n\tdef __exit__(self, *args: str, **kwargs: str):\n\t\tif self.auto_unmount:\n\t\t\tself.lock()\n\n\tdef _default_key_file(self) -> Path:\n\t\treturn Path(f'/tmp/{self.luks_dev_path.name}.disk_pw')\n\n\tdef _password_bytes(self) -> bytes:\n\t\tif not self.password:\n\t\t\traise ValueError('Password for luks2 device was not specified')\n\n\t\tif isinstance(self.password, bytes):\n\t\t\treturn self.password\n\t\telse:\n\t\t\treturn bytes(self.password, 'UTF-8')\n\n\tdef encrypt(\n\t\tself,\n\t\tkey_size: int = 512,\n\t\thash_type: str = 'sha512',\n\t\titer_time: int = 10000,\n\t\tkey_file: Optional[Path] = None\n\t) -> Path:\n\t\tinfo(f'Luks2 encrypting: {self.luks_dev_path}')\n\n\t\tbyte_password = self._password_bytes()\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = self._default_key_file()\n\n\t\t\t\twith open(key_file, 'wb') as fh:\n\t\t\t\t\tfh.write(byte_password)\n\n\t\tcryptsetup_args = shlex.join([\n\t\t\t'/usr/bin/cryptsetup',\n\t\t\t'--batch-mode',\n\t\t\t'--verbose',\n\t\t\t'--type', 'luks2',\n\t\t\t'--pbkdf', 'argon2id',\n\t\t\t'--hash', hash_type,\n\t\t\t'--key-size', str(key_size),\n\t\t\t'--iter-time', str(iter_time),\n\t\t\t'--key-file', str(key_file),\n\t\t\t'--use-urandom',\n\t\t\t'luksFormat', str(self.luks_dev_path),\n\t\t])\n\n\t\t# Retry formatting the volume because archinstall can some times be too quick\n\t\t# which generates a \"Device /dev/sdX does not exist or access denied.\" between\n\t\t# setting up partitions and us trying to encrypt it.\n\t\tfor retry_attempt in range(storage['DISK_RETRY_ATTEMPTS']):\n\t\t\ttry:\n\t\t\t\tSysCommand(cryptsetup_args)\n\t\t\t\tbreak\n\t\t\texcept SysCallError as err:\n\t\t\t\ttime.sleep(storage['DISK_TIMEOUTS'])\n\n\t\t\t\tif retry_attempt != storage['DISK_RETRY_ATTEMPTS'] - 1:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif err.exit_code == 1:\n\t\t\t\t\tinfo(f'luks2 partition currently in use: {self.luks_dev_path}')\n\t\t\t\t\tinfo('Attempting to unmount, crypt-close and trying encryption again')\n\n\t\t\t\t\tself.lock()\n\t\t\t\t\t# Then try again to set up the crypt-device\n\t\t\t\t\tSysCommand(cryptsetup_args)\n\t\t\t\telse:\n\t\t\t\t\traise DiskError(f'Could not encrypt volume \"{self.luks_dev_path}\": {err}')\n\n\t\treturn key_file\n\n\tdef _get_luks_uuid(self) -> str:\n\t\tcommand = f'/usr/bin/cryptsetup luksUUID {self.luks_dev_path}'\n\n\t\ttry:\n\t\t\treturn SysCommand(command).decode().strip() # type: ignore\n\t\texcept SysCallError as err:\n\t\t\tinfo(f'Unable to get UUID for Luks device: {self.luks_dev_path}')\n\t\t\traise err\n\n\tdef is_unlocked(self) -> bool:\n\t\treturn self.mapper_name is not None and Path(f'/dev/mapper/{self.mapper_name}').exists()\n\n\tdef unlock(self, key_file: Optional[Path] = None):\n\t\t\"\"\"\n\t\tUnlocks the luks device, an optional key file location for unlocking can be specified,\n\t\totherwise a default location for the key file will be used.\n\n\t\t:param key_file: An alternative key file\n\t\t:type key_file: Path\n\t\t\"\"\"\n\t\tdebug(f'Unlocking luks2 device: {self.luks_dev_path}')\n\n\t\tif not self.mapper_name:\n\t\t\traise ValueError('mapper name missing')\n\n\t\tbyte_password = self._password_bytes()\n\n\t\tif not key_file:\n\t\t\tif self.key_file:\n\t\t\t\tkey_file = self.key_file\n\t\t\telse:\n\t\t\t\tkey_file = self._default_key_file()\n\n\t\t\t\twith open(key_file, 'wb') as fh:\n\t\t\t\t\tfh.write(byte_password)\n\n\t\twait_timer = time.time()\n\t\twhile Path(self.luks_dev_path).exists() is False and time.time() - wait_timer < 10:\n\t\t\ttime.sleep(0.025)\n\n\t\tSysCommand(f'/usr/bin/cryptsetup open {self.luks_dev_path} {self.mapper_name} --key-file {key_file} --type luks2')\n\n\t\tif not self.mapper_dev or not self.mapper_dev.is_symlink():\n\t\t\traise DiskError(f'Failed to open luks2 device: {self.luks_dev_path}')\n\n\tdef lock(self):\n\t\tdisk.device_handler.umount(self.luks_dev_path)\n\n\t\t# Get crypt-information about the device by doing a reverse lookup starting with the partition path\n\t\t# For instance: /dev/sda\n\t\tdisk.device_handler.partprobe(self.luks_dev_path)\n\t\tlsblk_info = disk.get_lsblk_info(self.luks_dev_path)\n\n\t\t# For each child (sub-partition/sub-device)\n\t\tfor child in lsblk_info.children:\n\t\t\t# Unmount the child location\n\t\t\tfor mountpoint in child.mountpoints:\n\t\t\t\tdebug(f'Unmounting {mountpoint}')\n\t\t\t\tdisk.device_handler.umount(mountpoint, recursive=True)\n\n\t\t\t# And close it if possible.\n\t\t\tdebug(f\"Closing crypt device {child.name}\")\n\t\t\tSysCommand(f\"cryptsetup close {child.name}\")\n\n\t\tself._mapper_dev = None\n\n\tdef create_keyfile(self, target_path: Path, override: bool = False):\n\t\t\"\"\"\n\t\tRoutine to create keyfiles, so it can be moved elsewhere\n\t\t\"\"\"\n\t\tif self.mapper_name is None:\n\t\t\traise ValueError('Mapper name must be provided')\n\n\t\t# Once we store the key as ../xyzloop.key systemd-cryptsetup can\n\t\t# automatically load this key if we name the device to \"xyzloop\"\n\t\tkf_path = Path(f'/etc/cryptsetup-keys.d/{self.mapper_name}.key')\n\t\tkey_file = target_path / kf_path.relative_to(kf_path.root)\n\t\tcrypttab_path = target_path / 'etc/crypttab'\n\n\t\tif key_file.exists():\n\t\t\tif not override:\n\t\t\t\tinfo(f'Key file {key_file} already exists, keeping existing')\n\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tinfo(f'Key file {key_file} already exists, overriding')\n\n\t\tkey_file.parent.mkdir(parents=True, exist_ok=True)\n\n\t\twith open(key_file, \"w\") as keyfile:\n\t\t\tkeyfile.write(generate_password(length=512))\n\n\t\tkey_file.chmod(0o400)\n\n\t\tself._add_key(key_file)\n\t\tself._crypttab(crypttab_path, kf_path, options=[\"luks\", \"key-slot=1\"])\n\n\tdef _add_key(self, key_file: Path):\n\t\tinfo(f'Adding additional key-file {key_file}')\n\n\t\tcommand = f'/usr/bin/cryptsetup -q -v luksAddKey {self.luks_dev_path} {key_file}'\n\t\tworker = SysCommandWorker(command, environment_vars={'LC_ALL': 'C'})\n\t\tpw_injected = False\n\n\t\twhile worker.is_alive():\n\t\t\tif b'Enter any existing passphrase' in worker and pw_injected is False:\n\t\t\t\tworker.write(self._password_bytes())\n\t\t\t\tpw_injected = True\n\n\t\tif worker.exit_code != 0:\n\t\t\traise DiskError(f'Could not add encryption key {key_file} to {self.luks_dev_path}: {worker.decode()}')\n\n\tdef _crypttab(\n\t\tself,\n\t\tcrypttab_path: Path,\n\t\tkey_file: Path,\n\t\toptions: List[str]\n\t) -> None:\n\t\tinfo(f'Adding crypttab entry for key {key_file}')\n\n\t\twith open(crypttab_path, 'a') as crypttab:\n\t\t\topt = ','.join(options)\n\t\t\tuuid = self._get_luks_uuid()\n\t\t\trow = f\"{self.mapper_name} UUID={uuid} {key_file} {opt}\\n\"\n\t\t\tcrypttab.write(row)\n", "path": "archinstall/lib/luks.py"}]}
3,383
388
gh_patches_debug_34582
rasdani/github-patches
git_diff
netbox-community__netbox-14903
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- User Dashboard can become permanently broken when misconfiguring DEFAULT_DASHBOARD ### Deployment Type Self-hosted ### NetBox Version v3.7.0 ### Python Version 3.9 ### Steps to Reproduce 1. User johndoe modifies their dashboard over the GUI 2. Administrator changes DEFAULT_DASHBOARD in configuration.py with a broken configuration (Assigned a wrong value in a bookmarks widget) 3. Bookmark widget looks fine without logging in since it won't load any data 4. After johndoe logs in, they reset their dashboard 5. Site crashes with an Field Error Exception since the dashboard is misconfigured 6. johndoe can't reset their dashboard anymore, even after the error in DEFAULT_DASHBOARD is corrected ### Expected Behavior When resetting the dashboard, I would expect it to now always follow the changes of DEFAULT_DASHBOARD. ### Observed Behavior Resetting the dashboard seems to copy the current state of DEFAULT_DASHBOARD. If that state is broken, a user cannot reset it anymore. Only the main page with the dashboard crashes. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `netbox/extras/dashboard/utils.py` Content: ``` 1 import uuid 2 3 from django.conf import settings 4 from django.core.exceptions import ObjectDoesNotExist 5 6 from netbox.registry import registry 7 from extras.constants import DEFAULT_DASHBOARD 8 9 __all__ = ( 10 'get_dashboard', 11 'get_default_dashboard', 12 'get_widget_class', 13 'register_widget', 14 ) 15 16 17 def register_widget(cls): 18 """ 19 Decorator for registering a DashboardWidget class. 20 """ 21 app_label = cls.__module__.split('.', maxsplit=1)[0] 22 label = f'{app_label}.{cls.__name__}' 23 registry['widgets'][label] = cls 24 25 return cls 26 27 28 def get_widget_class(name): 29 """ 30 Return a registered DashboardWidget class identified by its name. 31 """ 32 try: 33 return registry['widgets'][name] 34 except KeyError: 35 raise ValueError(f"Unregistered widget class: {name}") 36 37 38 def get_dashboard(user): 39 """ 40 Return the Dashboard for a given User if one exists, or generate a default dashboard. 41 """ 42 if user.is_anonymous: 43 dashboard = get_default_dashboard() 44 else: 45 try: 46 dashboard = user.dashboard 47 except ObjectDoesNotExist: 48 # Create a dashboard for this user 49 dashboard = get_default_dashboard() 50 dashboard.user = user 51 dashboard.save() 52 53 return dashboard 54 55 56 def get_default_dashboard(): 57 from extras.models import Dashboard 58 59 dashboard = Dashboard() 60 default_config = settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD 61 62 for widget in default_config: 63 id = str(uuid.uuid4()) 64 dashboard.layout.append({ 65 'id': id, 66 'w': widget['width'], 67 'h': widget['height'], 68 'x': widget.get('x'), 69 'y': widget.get('y'), 70 }) 71 dashboard.config[id] = { 72 'class': widget['widget'], 73 'title': widget.get('title'), 74 'color': widget.get('color'), 75 'config': widget.get('config', {}), 76 } 77 78 return dashboard 79 ``` Path: `netbox/netbox/views/misc.py` Content: ``` 1 import re 2 from collections import namedtuple 3 4 from django.conf import settings 5 from django.contrib.contenttypes.models import ContentType 6 from django.core.cache import cache 7 from django.shortcuts import redirect, render 8 from django.views.generic import View 9 from django_tables2 import RequestConfig 10 from packaging import version 11 12 from extras.dashboard.utils import get_dashboard 13 from netbox.forms import SearchForm 14 from netbox.search import LookupTypes 15 from netbox.search.backends import search_backend 16 from netbox.tables import SearchTable 17 from utilities.htmx import is_htmx 18 from utilities.paginator import EnhancedPaginator, get_paginate_count 19 20 __all__ = ( 21 'HomeView', 22 'SearchView', 23 ) 24 25 Link = namedtuple('Link', ('label', 'viewname', 'permission', 'count')) 26 27 28 class HomeView(View): 29 template_name = 'home.html' 30 31 def get(self, request): 32 if settings.LOGIN_REQUIRED and not request.user.is_authenticated: 33 return redirect('login') 34 35 # Construct the user's custom dashboard layout 36 dashboard = get_dashboard(request.user).get_layout() 37 38 # Check whether a new release is available. (Only for staff/superusers.) 39 new_release = None 40 if request.user.is_staff or request.user.is_superuser: 41 latest_release = cache.get('latest_release') 42 if latest_release: 43 release_version, release_url = latest_release 44 if release_version > version.parse(settings.VERSION): 45 new_release = { 46 'version': str(release_version), 47 'url': release_url, 48 } 49 50 return render(request, self.template_name, { 51 'dashboard': dashboard, 52 'new_release': new_release, 53 }) 54 55 56 class SearchView(View): 57 58 def get(self, request): 59 results = [] 60 highlight = None 61 62 # Initialize search form 63 form = SearchForm(request.GET) if 'q' in request.GET else SearchForm() 64 65 if form.is_valid(): 66 67 # Restrict results by object type 68 object_types = [] 69 for obj_type in form.cleaned_data['obj_types']: 70 app_label, model_name = obj_type.split('.') 71 object_types.append(ContentType.objects.get_by_natural_key(app_label, model_name)) 72 73 lookup = form.cleaned_data['lookup'] or LookupTypes.PARTIAL 74 results = search_backend.search( 75 form.cleaned_data['q'], 76 user=request.user, 77 object_types=object_types, 78 lookup=lookup 79 ) 80 81 # If performing a regex search, pass the highlight value as a compiled pattern 82 if form.cleaned_data['lookup'] == LookupTypes.REGEX: 83 try: 84 highlight = re.compile(f"({form.cleaned_data['q']})", flags=re.IGNORECASE) 85 except re.error: 86 pass 87 elif form.cleaned_data['lookup'] != LookupTypes.EXACT: 88 highlight = form.cleaned_data['q'] 89 90 table = SearchTable(results, highlight=highlight) 91 92 # Paginate the table results 93 RequestConfig(request, { 94 'paginator_class': EnhancedPaginator, 95 'per_page': get_paginate_count(request) 96 }).configure(table) 97 98 # If this is an HTMX request, return only the rendered table HTML 99 if is_htmx(request): 100 return render(request, 'htmx/table.html', { 101 'table': table, 102 }) 103 104 return render(request, 'search.html', { 105 'form': form, 106 'table': table, 107 }) 108 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/netbox/extras/dashboard/utils.py b/netbox/extras/dashboard/utils.py --- a/netbox/extras/dashboard/utils.py +++ b/netbox/extras/dashboard/utils.py @@ -53,13 +53,13 @@ return dashboard -def get_default_dashboard(): +def get_default_dashboard(config=None): from extras.models import Dashboard dashboard = Dashboard() - default_config = settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD + config = config or settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD - for widget in default_config: + for widget in config: id = str(uuid.uuid4()) dashboard.layout.append({ 'id': id, diff --git a/netbox/netbox/views/misc.py b/netbox/netbox/views/misc.py --- a/netbox/netbox/views/misc.py +++ b/netbox/netbox/views/misc.py @@ -2,14 +2,17 @@ from collections import namedtuple from django.conf import settings +from django.contrib import messages from django.contrib.contenttypes.models import ContentType from django.core.cache import cache from django.shortcuts import redirect, render +from django.utils.translation import gettext_lazy as _ from django.views.generic import View from django_tables2 import RequestConfig from packaging import version -from extras.dashboard.utils import get_dashboard +from extras.constants import DEFAULT_DASHBOARD +from extras.dashboard.utils import get_dashboard, get_default_dashboard from netbox.forms import SearchForm from netbox.search import LookupTypes from netbox.search.backends import search_backend @@ -33,7 +36,13 @@ return redirect('login') # Construct the user's custom dashboard layout - dashboard = get_dashboard(request.user).get_layout() + try: + dashboard = get_dashboard(request.user).get_layout() + except Exception: + messages.error(request, _( + "There was an error loading the dashboard configuration. A default dashboard is in use." + )) + dashboard = get_default_dashboard(config=DEFAULT_DASHBOARD).get_layout() # Check whether a new release is available. (Only for staff/superusers.) new_release = None
{"golden_diff": "diff --git a/netbox/extras/dashboard/utils.py b/netbox/extras/dashboard/utils.py\n--- a/netbox/extras/dashboard/utils.py\n+++ b/netbox/extras/dashboard/utils.py\n@@ -53,13 +53,13 @@\n return dashboard\n \n \n-def get_default_dashboard():\n+def get_default_dashboard(config=None):\n from extras.models import Dashboard\n \n dashboard = Dashboard()\n- default_config = settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD\n+ config = config or settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD\n \n- for widget in default_config:\n+ for widget in config:\n id = str(uuid.uuid4())\n dashboard.layout.append({\n 'id': id,\ndiff --git a/netbox/netbox/views/misc.py b/netbox/netbox/views/misc.py\n--- a/netbox/netbox/views/misc.py\n+++ b/netbox/netbox/views/misc.py\n@@ -2,14 +2,17 @@\n from collections import namedtuple\n \n from django.conf import settings\n+from django.contrib import messages\n from django.contrib.contenttypes.models import ContentType\n from django.core.cache import cache\n from django.shortcuts import redirect, render\n+from django.utils.translation import gettext_lazy as _\n from django.views.generic import View\n from django_tables2 import RequestConfig\n from packaging import version\n \n-from extras.dashboard.utils import get_dashboard\n+from extras.constants import DEFAULT_DASHBOARD\n+from extras.dashboard.utils import get_dashboard, get_default_dashboard\n from netbox.forms import SearchForm\n from netbox.search import LookupTypes\n from netbox.search.backends import search_backend\n@@ -33,7 +36,13 @@\n return redirect('login')\n \n # Construct the user's custom dashboard layout\n- dashboard = get_dashboard(request.user).get_layout()\n+ try:\n+ dashboard = get_dashboard(request.user).get_layout()\n+ except Exception:\n+ messages.error(request, _(\n+ \"There was an error loading the dashboard configuration. A default dashboard is in use.\"\n+ ))\n+ dashboard = get_default_dashboard(config=DEFAULT_DASHBOARD).get_layout()\n \n # Check whether a new release is available. (Only for staff/superusers.)\n new_release = None\n", "issue": "User Dashboard can become permanently broken when misconfiguring DEFAULT_DASHBOARD\n### Deployment Type\n\nSelf-hosted\n\n### NetBox Version\n\nv3.7.0\n\n### Python Version\n\n3.9\n\n### Steps to Reproduce\n\n1. User johndoe modifies their dashboard over the GUI\r\n2. Administrator changes DEFAULT_DASHBOARD in configuration.py with a broken configuration (Assigned a wrong value in a bookmarks widget)\r\n3. Bookmark widget looks fine without logging in since it won't load any data\r\n4. After johndoe logs in, they reset their dashboard\r\n5. Site crashes with an Field Error Exception since the dashboard is misconfigured\r\n6. johndoe can't reset their dashboard anymore, even after the error in DEFAULT_DASHBOARD is corrected\n\n### Expected Behavior\n\nWhen resetting the dashboard, I would expect it to now always follow the changes of DEFAULT_DASHBOARD.\n\n### Observed Behavior\n\nResetting the dashboard seems to copy the current state of DEFAULT_DASHBOARD. If that state is broken, a user cannot reset it anymore.\r\nOnly the main page with the dashboard crashes.\n", "before_files": [{"content": "import uuid\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom netbox.registry import registry\nfrom extras.constants import DEFAULT_DASHBOARD\n\n__all__ = (\n 'get_dashboard',\n 'get_default_dashboard',\n 'get_widget_class',\n 'register_widget',\n)\n\n\ndef register_widget(cls):\n \"\"\"\n Decorator for registering a DashboardWidget class.\n \"\"\"\n app_label = cls.__module__.split('.', maxsplit=1)[0]\n label = f'{app_label}.{cls.__name__}'\n registry['widgets'][label] = cls\n\n return cls\n\n\ndef get_widget_class(name):\n \"\"\"\n Return a registered DashboardWidget class identified by its name.\n \"\"\"\n try:\n return registry['widgets'][name]\n except KeyError:\n raise ValueError(f\"Unregistered widget class: {name}\")\n\n\ndef get_dashboard(user):\n \"\"\"\n Return the Dashboard for a given User if one exists, or generate a default dashboard.\n \"\"\"\n if user.is_anonymous:\n dashboard = get_default_dashboard()\n else:\n try:\n dashboard = user.dashboard\n except ObjectDoesNotExist:\n # Create a dashboard for this user\n dashboard = get_default_dashboard()\n dashboard.user = user\n dashboard.save()\n\n return dashboard\n\n\ndef get_default_dashboard():\n from extras.models import Dashboard\n\n dashboard = Dashboard()\n default_config = settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD\n\n for widget in default_config:\n id = str(uuid.uuid4())\n dashboard.layout.append({\n 'id': id,\n 'w': widget['width'],\n 'h': widget['height'],\n 'x': widget.get('x'),\n 'y': widget.get('y'),\n })\n dashboard.config[id] = {\n 'class': widget['widget'],\n 'title': widget.get('title'),\n 'color': widget.get('color'),\n 'config': widget.get('config', {}),\n }\n\n return dashboard\n", "path": "netbox/extras/dashboard/utils.py"}, {"content": "import re\nfrom collections import namedtuple\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.cache import cache\nfrom django.shortcuts import redirect, render\nfrom django.views.generic import View\nfrom django_tables2 import RequestConfig\nfrom packaging import version\n\nfrom extras.dashboard.utils import get_dashboard\nfrom netbox.forms import SearchForm\nfrom netbox.search import LookupTypes\nfrom netbox.search.backends import search_backend\nfrom netbox.tables import SearchTable\nfrom utilities.htmx import is_htmx\nfrom utilities.paginator import EnhancedPaginator, get_paginate_count\n\n__all__ = (\n 'HomeView',\n 'SearchView',\n)\n\nLink = namedtuple('Link', ('label', 'viewname', 'permission', 'count'))\n\n\nclass HomeView(View):\n template_name = 'home.html'\n\n def get(self, request):\n if settings.LOGIN_REQUIRED and not request.user.is_authenticated:\n return redirect('login')\n\n # Construct the user's custom dashboard layout\n dashboard = get_dashboard(request.user).get_layout()\n\n # Check whether a new release is available. (Only for staff/superusers.)\n new_release = None\n if request.user.is_staff or request.user.is_superuser:\n latest_release = cache.get('latest_release')\n if latest_release:\n release_version, release_url = latest_release\n if release_version > version.parse(settings.VERSION):\n new_release = {\n 'version': str(release_version),\n 'url': release_url,\n }\n\n return render(request, self.template_name, {\n 'dashboard': dashboard,\n 'new_release': new_release,\n })\n\n\nclass SearchView(View):\n\n def get(self, request):\n results = []\n highlight = None\n\n # Initialize search form\n form = SearchForm(request.GET) if 'q' in request.GET else SearchForm()\n\n if form.is_valid():\n\n # Restrict results by object type\n object_types = []\n for obj_type in form.cleaned_data['obj_types']:\n app_label, model_name = obj_type.split('.')\n object_types.append(ContentType.objects.get_by_natural_key(app_label, model_name))\n\n lookup = form.cleaned_data['lookup'] or LookupTypes.PARTIAL\n results = search_backend.search(\n form.cleaned_data['q'],\n user=request.user,\n object_types=object_types,\n lookup=lookup\n )\n\n # If performing a regex search, pass the highlight value as a compiled pattern\n if form.cleaned_data['lookup'] == LookupTypes.REGEX:\n try:\n highlight = re.compile(f\"({form.cleaned_data['q']})\", flags=re.IGNORECASE)\n except re.error:\n pass\n elif form.cleaned_data['lookup'] != LookupTypes.EXACT:\n highlight = form.cleaned_data['q']\n\n table = SearchTable(results, highlight=highlight)\n\n # Paginate the table results\n RequestConfig(request, {\n 'paginator_class': EnhancedPaginator,\n 'per_page': get_paginate_count(request)\n }).configure(table)\n\n # If this is an HTMX request, return only the rendered table HTML\n if is_htmx(request):\n return render(request, 'htmx/table.html', {\n 'table': table,\n })\n\n return render(request, 'search.html', {\n 'form': form,\n 'table': table,\n })\n", "path": "netbox/netbox/views/misc.py"}], "after_files": [{"content": "import uuid\n\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\n\nfrom netbox.registry import registry\nfrom extras.constants import DEFAULT_DASHBOARD\n\n__all__ = (\n 'get_dashboard',\n 'get_default_dashboard',\n 'get_widget_class',\n 'register_widget',\n)\n\n\ndef register_widget(cls):\n \"\"\"\n Decorator for registering a DashboardWidget class.\n \"\"\"\n app_label = cls.__module__.split('.', maxsplit=1)[0]\n label = f'{app_label}.{cls.__name__}'\n registry['widgets'][label] = cls\n\n return cls\n\n\ndef get_widget_class(name):\n \"\"\"\n Return a registered DashboardWidget class identified by its name.\n \"\"\"\n try:\n return registry['widgets'][name]\n except KeyError:\n raise ValueError(f\"Unregistered widget class: {name}\")\n\n\ndef get_dashboard(user):\n \"\"\"\n Return the Dashboard for a given User if one exists, or generate a default dashboard.\n \"\"\"\n if user.is_anonymous:\n dashboard = get_default_dashboard()\n else:\n try:\n dashboard = user.dashboard\n except ObjectDoesNotExist:\n # Create a dashboard for this user\n dashboard = get_default_dashboard()\n dashboard.user = user\n dashboard.save()\n\n return dashboard\n\n\ndef get_default_dashboard(config=None):\n from extras.models import Dashboard\n\n dashboard = Dashboard()\n config = config or settings.DEFAULT_DASHBOARD or DEFAULT_DASHBOARD\n\n for widget in config:\n id = str(uuid.uuid4())\n dashboard.layout.append({\n 'id': id,\n 'w': widget['width'],\n 'h': widget['height'],\n 'x': widget.get('x'),\n 'y': widget.get('y'),\n })\n dashboard.config[id] = {\n 'class': widget['widget'],\n 'title': widget.get('title'),\n 'color': widget.get('color'),\n 'config': widget.get('config', {}),\n }\n\n return dashboard\n", "path": "netbox/extras/dashboard/utils.py"}, {"content": "import re\nfrom collections import namedtuple\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.cache import cache\nfrom django.shortcuts import redirect, render\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views.generic import View\nfrom django_tables2 import RequestConfig\nfrom packaging import version\n\nfrom extras.constants import DEFAULT_DASHBOARD\nfrom extras.dashboard.utils import get_dashboard, get_default_dashboard\nfrom netbox.forms import SearchForm\nfrom netbox.search import LookupTypes\nfrom netbox.search.backends import search_backend\nfrom netbox.tables import SearchTable\nfrom utilities.htmx import is_htmx\nfrom utilities.paginator import EnhancedPaginator, get_paginate_count\n\n__all__ = (\n 'HomeView',\n 'SearchView',\n)\n\nLink = namedtuple('Link', ('label', 'viewname', 'permission', 'count'))\n\n\nclass HomeView(View):\n template_name = 'home.html'\n\n def get(self, request):\n if settings.LOGIN_REQUIRED and not request.user.is_authenticated:\n return redirect('login')\n\n # Construct the user's custom dashboard layout\n try:\n dashboard = get_dashboard(request.user).get_layout()\n except Exception:\n messages.error(request, _(\n \"There was an error loading the dashboard configuration. A default dashboard is in use.\"\n ))\n dashboard = get_default_dashboard(config=DEFAULT_DASHBOARD).get_layout()\n\n # Check whether a new release is available. (Only for staff/superusers.)\n new_release = None\n if request.user.is_staff or request.user.is_superuser:\n latest_release = cache.get('latest_release')\n if latest_release:\n release_version, release_url = latest_release\n if release_version > version.parse(settings.VERSION):\n new_release = {\n 'version': str(release_version),\n 'url': release_url,\n }\n\n return render(request, self.template_name, {\n 'dashboard': dashboard,\n 'new_release': new_release,\n })\n\n\nclass SearchView(View):\n\n def get(self, request):\n results = []\n highlight = None\n\n # Initialize search form\n form = SearchForm(request.GET) if 'q' in request.GET else SearchForm()\n\n if form.is_valid():\n\n # Restrict results by object type\n object_types = []\n for obj_type in form.cleaned_data['obj_types']:\n app_label, model_name = obj_type.split('.')\n object_types.append(ContentType.objects.get_by_natural_key(app_label, model_name))\n\n lookup = form.cleaned_data['lookup'] or LookupTypes.PARTIAL\n results = search_backend.search(\n form.cleaned_data['q'],\n user=request.user,\n object_types=object_types,\n lookup=lookup\n )\n\n # If performing a regex search, pass the highlight value as a compiled pattern\n if form.cleaned_data['lookup'] == LookupTypes.REGEX:\n try:\n highlight = re.compile(f\"({form.cleaned_data['q']})\", flags=re.IGNORECASE)\n except re.error:\n pass\n elif form.cleaned_data['lookup'] != LookupTypes.EXACT:\n highlight = form.cleaned_data['q']\n\n table = SearchTable(results, highlight=highlight)\n\n # Paginate the table results\n RequestConfig(request, {\n 'paginator_class': EnhancedPaginator,\n 'per_page': get_paginate_count(request)\n }).configure(table)\n\n # If this is an HTMX request, return only the rendered table HTML\n if is_htmx(request):\n return render(request, 'htmx/table.html', {\n 'table': table,\n })\n\n return render(request, 'search.html', {\n 'form': form,\n 'table': table,\n })\n", "path": "netbox/netbox/views/misc.py"}]}
2,002
460
gh_patches_debug_38759
rasdani/github-patches
git_diff
readthedocs__readthedocs.org-2501
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Update django-filter to 1.0 ## Details Sorry for deleting the issue template: This is about technical debt :) It may not be immediately critical, but the advice from the author of django-filter is that it's worth it. django-filter 1.0 has changes that are backwards incompatible. The release notes are here: http://django-filter.readthedocs.io/en/latest/migration.html It means, amongst other this, that all where `Filter` object instances are iterated on, we have to [add the `.qs` method](http://django-filter.readthedocs.io/en/latest/migration.html#queryset-methods-are-no-longer-proxied). Pin django-filter The new 1.0 series is incompatible, and I've opened #2498 for this purpose. Meanwhile, as the current master is broken because of this, the version should be pinned - I guess it's sort of bad practice to use the `master` branch anyways, am thinking it's possibly also an outdated decision now. This fixes #2495 and #2490 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `readthedocs/builds/filters.py` Content: ``` 1 from django.utils.translation import ugettext_lazy as _ 2 3 import django_filters 4 5 from readthedocs.builds import constants 6 from readthedocs.builds.models import Build, Version 7 8 9 ANY_REPO = ( 10 ('', _('Any')), 11 ) 12 13 BUILD_TYPES = ANY_REPO + constants.BUILD_TYPES 14 15 16 class VersionSlugFilter(django_filters.FilterSet): 17 18 class Meta: 19 model = Version 20 fields = { 21 'identifier': ['icontains'], 22 'slug': ['icontains'], 23 } 24 25 26 class VersionFilter(django_filters.FilterSet): 27 project = django_filters.CharFilter(name='project__slug') 28 # Allow filtering on slug= or version= 29 slug = django_filters.CharFilter(label=_("Name"), name='slug', 30 lookup_type='exact') 31 version = django_filters.CharFilter(label=_("Version"), name='slug', 32 lookup_type='exact') 33 34 class Meta: 35 model = Version 36 fields = ['project', 'slug', 'version'] 37 38 39 class BuildFilter(django_filters.FilterSet): 40 date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_type='range') 41 type = django_filters.ChoiceFilter(label=_("Build Type"), 42 choices=BUILD_TYPES) 43 44 class Meta: 45 model = Build 46 fields = ['type', 'date', 'success'] 47 ``` Path: `readthedocs/projects/filters.py` Content: ``` 1 """Project query filters""" 2 3 from django.utils.translation import ugettext_lazy as _ 4 5 import django_filters 6 7 from readthedocs.projects import constants 8 from readthedocs.projects.models import Project, Domain 9 10 ANY_REPO = ( 11 ('', _('Any')), 12 ) 13 14 REPO_CHOICES = ANY_REPO + constants.REPO_CHOICES 15 16 17 def sort_slug(queryset, query): 18 """Fuzzy filter for slug fields 19 20 Returns sorted queryset where slug approximates ``query`` 21 """ 22 queryset = queryset.filter(slug__icontains=query) 23 ret = [] 24 ret.extend([q.pk for q in queryset 25 if q.slug == query]) 26 ret.extend([q.pk for q in queryset 27 if q.slug.startswith(query) and q.pk not in ret]) 28 ret.extend([q.pk for q in queryset 29 if q.slug.endswith(query) and q.pk not in ret]) 30 ret.extend([q.pk for q in queryset 31 if q.pk not in ret]) 32 33 # Create a QS preserving ordering 34 clauses = ' '.join(['WHEN projects_project.id=%s THEN %s' % (pk, i) 35 for i, pk in enumerate(ret)]) 36 ordering = 'CASE %s END' % clauses 37 ret_queryset = Project.objects.filter(pk__in=ret).extra( 38 select={'ordering': ordering}, order_by=('ordering',)) 39 return ret_queryset 40 41 42 class ProjectFilter(django_filters.FilterSet): 43 44 """Project filter for filter views""" 45 46 name = django_filters.CharFilter(label=_("Name"), name='name', 47 lookup_type='icontains') 48 slug = django_filters.CharFilter(label=_("Slug"), name='slug', 49 lookup_type='icontains') 50 pub_date = django_filters.DateRangeFilter(label=_("Created Date"), 51 name="pub_date") 52 repo = django_filters.CharFilter(label=_("Repository URL"), name='repo', 53 lookup_type='icontains') 54 repo_type = django_filters.ChoiceFilter( 55 label=_("Repository Type"), 56 name='repo', 57 lookup_type='icontains', 58 choices=REPO_CHOICES, 59 ) 60 61 class Meta: 62 model = Project 63 fields = ['name', 'slug', 'pub_date', 'repo', 'repo_type'] 64 65 66 class DomainFilter(django_filters.FilterSet): 67 project = django_filters.CharFilter(label=_("Project"), name='project__slug', 68 lookup_type='exact') 69 70 class Meta: 71 model = Domain 72 fields = ['domain', 'project', 'canonical'] 73 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/readthedocs/builds/filters.py b/readthedocs/builds/filters.py --- a/readthedocs/builds/filters.py +++ b/readthedocs/builds/filters.py @@ -27,9 +27,9 @@ project = django_filters.CharFilter(name='project__slug') # Allow filtering on slug= or version= slug = django_filters.CharFilter(label=_("Name"), name='slug', - lookup_type='exact') + lookup_expr='exact') version = django_filters.CharFilter(label=_("Version"), name='slug', - lookup_type='exact') + lookup_expr='exact') class Meta: model = Version @@ -37,7 +37,7 @@ class BuildFilter(django_filters.FilterSet): - date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_type='range') + date = django_filters.DateRangeFilter(label=_("Build Date"), name="date", lookup_expr='range') type = django_filters.ChoiceFilter(label=_("Build Type"), choices=BUILD_TYPES) diff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py --- a/readthedocs/projects/filters.py +++ b/readthedocs/projects/filters.py @@ -44,17 +44,17 @@ """Project filter for filter views""" name = django_filters.CharFilter(label=_("Name"), name='name', - lookup_type='icontains') + lookup_expr='icontains') slug = django_filters.CharFilter(label=_("Slug"), name='slug', - lookup_type='icontains') + lookup_expr='icontains') pub_date = django_filters.DateRangeFilter(label=_("Created Date"), name="pub_date") repo = django_filters.CharFilter(label=_("Repository URL"), name='repo', - lookup_type='icontains') + lookup_expr='icontains') repo_type = django_filters.ChoiceFilter( label=_("Repository Type"), name='repo', - lookup_type='icontains', + lookup_expr='icontains', choices=REPO_CHOICES, ) @@ -65,7 +65,7 @@ class DomainFilter(django_filters.FilterSet): project = django_filters.CharFilter(label=_("Project"), name='project__slug', - lookup_type='exact') + lookup_expr='exact') class Meta: model = Domain
{"golden_diff": "diff --git a/readthedocs/builds/filters.py b/readthedocs/builds/filters.py\n--- a/readthedocs/builds/filters.py\n+++ b/readthedocs/builds/filters.py\n@@ -27,9 +27,9 @@\n project = django_filters.CharFilter(name='project__slug')\n # Allow filtering on slug= or version=\n slug = django_filters.CharFilter(label=_(\"Name\"), name='slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n version = django_filters.CharFilter(label=_(\"Version\"), name='slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n \n class Meta:\n model = Version\n@@ -37,7 +37,7 @@\n \n \n class BuildFilter(django_filters.FilterSet):\n- date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_type='range')\n+ date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_expr='range')\n type = django_filters.ChoiceFilter(label=_(\"Build Type\"),\n choices=BUILD_TYPES)\n \ndiff --git a/readthedocs/projects/filters.py b/readthedocs/projects/filters.py\n--- a/readthedocs/projects/filters.py\n+++ b/readthedocs/projects/filters.py\n@@ -44,17 +44,17 @@\n \"\"\"Project filter for filter views\"\"\"\n \n name = django_filters.CharFilter(label=_(\"Name\"), name='name',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n slug = django_filters.CharFilter(label=_(\"Slug\"), name='slug',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n pub_date = django_filters.DateRangeFilter(label=_(\"Created Date\"),\n name=\"pub_date\")\n repo = django_filters.CharFilter(label=_(\"Repository URL\"), name='repo',\n- lookup_type='icontains')\n+ lookup_expr='icontains')\n repo_type = django_filters.ChoiceFilter(\n label=_(\"Repository Type\"),\n name='repo',\n- lookup_type='icontains',\n+ lookup_expr='icontains',\n choices=REPO_CHOICES,\n )\n \n@@ -65,7 +65,7 @@\n \n class DomainFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(label=_(\"Project\"), name='project__slug',\n- lookup_type='exact')\n+ lookup_expr='exact')\n \n class Meta:\n model = Domain\n", "issue": "Update django-filter to 1.0\n## Details\r\n\r\nSorry for deleting the issue template: This is about technical debt :) It may not be immediately critical, but the advice from the author of django-filter is that it's worth it.\r\n\r\ndjango-filter 1.0 has changes that are backwards incompatible. The release notes are here:\r\n\r\nhttp://django-filter.readthedocs.io/en/latest/migration.html\r\n\r\nIt means, amongst other this, that all where `Filter` object instances are iterated on, we have to [add the `.qs` method](http://django-filter.readthedocs.io/en/latest/migration.html#queryset-methods-are-no-longer-proxied).\nPin django-filter\nThe new 1.0 series is incompatible, and I've opened #2498 for this purpose.\r\n\r\nMeanwhile, as the current master is broken because of this, the version should be pinned - I guess it's sort of bad practice to use the `master` branch anyways, am thinking it's possibly also an outdated decision now.\r\n\r\nThis fixes #2495 and #2490\n", "before_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nimport django_filters\n\nfrom readthedocs.builds import constants\nfrom readthedocs.builds.models import Build, Version\n\n\nANY_REPO = (\n ('', _('Any')),\n)\n\nBUILD_TYPES = ANY_REPO + constants.BUILD_TYPES\n\n\nclass VersionSlugFilter(django_filters.FilterSet):\n\n class Meta:\n model = Version\n fields = {\n 'identifier': ['icontains'],\n 'slug': ['icontains'],\n }\n\n\nclass VersionFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(name='project__slug')\n # Allow filtering on slug= or version=\n slug = django_filters.CharFilter(label=_(\"Name\"), name='slug',\n lookup_type='exact')\n version = django_filters.CharFilter(label=_(\"Version\"), name='slug',\n lookup_type='exact')\n\n class Meta:\n model = Version\n fields = ['project', 'slug', 'version']\n\n\nclass BuildFilter(django_filters.FilterSet):\n date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_type='range')\n type = django_filters.ChoiceFilter(label=_(\"Build Type\"),\n choices=BUILD_TYPES)\n\n class Meta:\n model = Build\n fields = ['type', 'date', 'success']\n", "path": "readthedocs/builds/filters.py"}, {"content": "\"\"\"Project query filters\"\"\"\n\nfrom django.utils.translation import ugettext_lazy as _\n\nimport django_filters\n\nfrom readthedocs.projects import constants\nfrom readthedocs.projects.models import Project, Domain\n\nANY_REPO = (\n ('', _('Any')),\n)\n\nREPO_CHOICES = ANY_REPO + constants.REPO_CHOICES\n\n\ndef sort_slug(queryset, query):\n \"\"\"Fuzzy filter for slug fields\n\n Returns sorted queryset where slug approximates ``query``\n \"\"\"\n queryset = queryset.filter(slug__icontains=query)\n ret = []\n ret.extend([q.pk for q in queryset\n if q.slug == query])\n ret.extend([q.pk for q in queryset\n if q.slug.startswith(query) and q.pk not in ret])\n ret.extend([q.pk for q in queryset\n if q.slug.endswith(query) and q.pk not in ret])\n ret.extend([q.pk for q in queryset\n if q.pk not in ret])\n\n # Create a QS preserving ordering\n clauses = ' '.join(['WHEN projects_project.id=%s THEN %s' % (pk, i)\n for i, pk in enumerate(ret)])\n ordering = 'CASE %s END' % clauses\n ret_queryset = Project.objects.filter(pk__in=ret).extra(\n select={'ordering': ordering}, order_by=('ordering',))\n return ret_queryset\n\n\nclass ProjectFilter(django_filters.FilterSet):\n\n \"\"\"Project filter for filter views\"\"\"\n\n name = django_filters.CharFilter(label=_(\"Name\"), name='name',\n lookup_type='icontains')\n slug = django_filters.CharFilter(label=_(\"Slug\"), name='slug',\n lookup_type='icontains')\n pub_date = django_filters.DateRangeFilter(label=_(\"Created Date\"),\n name=\"pub_date\")\n repo = django_filters.CharFilter(label=_(\"Repository URL\"), name='repo',\n lookup_type='icontains')\n repo_type = django_filters.ChoiceFilter(\n label=_(\"Repository Type\"),\n name='repo',\n lookup_type='icontains',\n choices=REPO_CHOICES,\n )\n\n class Meta:\n model = Project\n fields = ['name', 'slug', 'pub_date', 'repo', 'repo_type']\n\n\nclass DomainFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(label=_(\"Project\"), name='project__slug',\n lookup_type='exact')\n\n class Meta:\n model = Domain\n fields = ['domain', 'project', 'canonical']\n", "path": "readthedocs/projects/filters.py"}], "after_files": [{"content": "from django.utils.translation import ugettext_lazy as _\n\nimport django_filters\n\nfrom readthedocs.builds import constants\nfrom readthedocs.builds.models import Build, Version\n\n\nANY_REPO = (\n ('', _('Any')),\n)\n\nBUILD_TYPES = ANY_REPO + constants.BUILD_TYPES\n\n\nclass VersionSlugFilter(django_filters.FilterSet):\n\n class Meta:\n model = Version\n fields = {\n 'identifier': ['icontains'],\n 'slug': ['icontains'],\n }\n\n\nclass VersionFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(name='project__slug')\n # Allow filtering on slug= or version=\n slug = django_filters.CharFilter(label=_(\"Name\"), name='slug',\n lookup_expr='exact')\n version = django_filters.CharFilter(label=_(\"Version\"), name='slug',\n lookup_expr='exact')\n\n class Meta:\n model = Version\n fields = ['project', 'slug', 'version']\n\n\nclass BuildFilter(django_filters.FilterSet):\n date = django_filters.DateRangeFilter(label=_(\"Build Date\"), name=\"date\", lookup_expr='range')\n type = django_filters.ChoiceFilter(label=_(\"Build Type\"),\n choices=BUILD_TYPES)\n\n class Meta:\n model = Build\n fields = ['type', 'date', 'success']\n", "path": "readthedocs/builds/filters.py"}, {"content": "\"\"\"Project query filters\"\"\"\n\nfrom django.utils.translation import ugettext_lazy as _\n\nimport django_filters\n\nfrom readthedocs.projects import constants\nfrom readthedocs.projects.models import Project, Domain\n\nANY_REPO = (\n ('', _('Any')),\n)\n\nREPO_CHOICES = ANY_REPO + constants.REPO_CHOICES\n\n\ndef sort_slug(queryset, query):\n \"\"\"Fuzzy filter for slug fields\n\n Returns sorted queryset where slug approximates ``query``\n \"\"\"\n queryset = queryset.filter(slug__icontains=query)\n ret = []\n ret.extend([q.pk for q in queryset\n if q.slug == query])\n ret.extend([q.pk for q in queryset\n if q.slug.startswith(query) and q.pk not in ret])\n ret.extend([q.pk for q in queryset\n if q.slug.endswith(query) and q.pk not in ret])\n ret.extend([q.pk for q in queryset\n if q.pk not in ret])\n\n # Create a QS preserving ordering\n clauses = ' '.join(['WHEN projects_project.id=%s THEN %s' % (pk, i)\n for i, pk in enumerate(ret)])\n ordering = 'CASE %s END' % clauses\n ret_queryset = Project.objects.filter(pk__in=ret).extra(\n select={'ordering': ordering}, order_by=('ordering',))\n return ret_queryset\n\n\nclass ProjectFilter(django_filters.FilterSet):\n\n \"\"\"Project filter for filter views\"\"\"\n\n name = django_filters.CharFilter(label=_(\"Name\"), name='name',\n lookup_expr='icontains')\n slug = django_filters.CharFilter(label=_(\"Slug\"), name='slug',\n lookup_expr='icontains')\n pub_date = django_filters.DateRangeFilter(label=_(\"Created Date\"),\n name=\"pub_date\")\n repo = django_filters.CharFilter(label=_(\"Repository URL\"), name='repo',\n lookup_expr='icontains')\n repo_type = django_filters.ChoiceFilter(\n label=_(\"Repository Type\"),\n name='repo',\n lookup_expr='icontains',\n choices=REPO_CHOICES,\n )\n\n class Meta:\n model = Project\n fields = ['name', 'slug', 'pub_date', 'repo', 'repo_type']\n\n\nclass DomainFilter(django_filters.FilterSet):\n project = django_filters.CharFilter(label=_(\"Project\"), name='project__slug',\n lookup_expr='exact')\n\n class Meta:\n model = Domain\n fields = ['domain', 'project', 'canonical']\n", "path": "readthedocs/projects/filters.py"}]}
1,519
514
gh_patches_debug_11048
rasdani/github-patches
git_diff
kubeflow__pipelines-2986
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- KFP 0.2.1 SDK client broken when inverse proxy host is used This bug is introduced in #2896 Under the hood `subprocess.check_output()` return bytes instead of string. However in `kfp_server_api/configuration.py` it was assumed to be string, which caused error like the following: ``` ~/.local/lib/python3.5/site-packages/kfp_server_api/configuration.py in get_api_key_with_prefix(self, identifier) 203 if (self.api_key.get(identifier) and 204 self.api_key_prefix.get(identifier)): --> 205 return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501 206 elif self.api_key.get(identifier): 207 return self.api_key[identifier] TypeError: Can't convert 'bytes' object to str implicitly ``` This impacts some CUJs involving TFX CLI and CAIP notebook experiences. Will send out a fix today. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/kfp/_auth.py` Content: ``` 1 # Copyright 2018 Google LLC 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import logging 16 import os 17 import subprocess 18 import google.auth 19 import google.auth.app_engine 20 import google.auth.compute_engine.credentials 21 import google.auth.iam 22 from google.auth.transport.requests import Request 23 import google.oauth2.credentials 24 import google.oauth2.service_account 25 import requests_toolbelt.adapters.appengine 26 from webbrowser import open_new_tab 27 import requests 28 import json 29 30 IAM_SCOPE = 'https://www.googleapis.com/auth/iam' 31 OAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token' 32 LOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json') 33 34 def get_gcp_access_token(): 35 """Get and return GCP access token for the current Application Default 36 Credentials. If not set, returns None. For more information, see 37 https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token 38 """ 39 args = ['gcloud', 'auth', 'print-access-token'] 40 return subprocess.check_output(args).rstrip() 41 42 def get_auth_token(client_id, other_client_id, other_client_secret): 43 """Gets auth token from default service account or user account.""" 44 if os.path.exists(LOCAL_KFP_CREDENTIAL): 45 # fetch IAP auth token using the locally stored credentials. 46 with open(LOCAL_KFP_CREDENTIAL, 'r') as f: 47 credentials = json.load(f) 48 if client_id in credentials: 49 return id_token_from_refresh_token(credentials[client_id]['other_client_id'], 50 credentials[client_id]['other_client_secret'], 51 credentials[client_id]['refresh_token'], 52 client_id) 53 if other_client_id is None or other_client_secret is None: 54 # fetch IAP auth token: service accounts 55 token = get_auth_token_from_sa(client_id) 56 else: 57 # fetch IAP auth token: user account 58 # Obtain the ID token for provided Client ID with user accounts. 59 # Flow: get authorization code -> exchange for refresh token -> obtain and return ID token 60 refresh_token = get_refresh_token_from_client_id(other_client_id, other_client_secret) 61 credentials = {} 62 if os.path.exists(LOCAL_KFP_CREDENTIAL): 63 with open(LOCAL_KFP_CREDENTIAL, 'r') as f: 64 credentials = json.load(f) 65 credentials[client_id] = {} 66 credentials[client_id]['other_client_id'] = other_client_id 67 credentials[client_id]['other_client_secret'] = other_client_secret 68 credentials[client_id]['refresh_token'] = refresh_token 69 #TODO: handle the case when the refresh_token expires. 70 # which only happens if the refresh_token is not used once for six months. 71 if not os.path.exists(os.path.dirname(LOCAL_KFP_CREDENTIAL)): 72 os.makedirs(os.path.dirname(LOCAL_KFP_CREDENTIAL)) 73 with open(LOCAL_KFP_CREDENTIAL, 'w') as f: 74 json.dump(credentials, f) 75 token = id_token_from_refresh_token(other_client_id, other_client_secret, refresh_token, client_id) 76 return token 77 78 def get_auth_token_from_sa(client_id): 79 """Gets auth token from default service account. 80 81 If no service account credential is found, returns None. 82 """ 83 service_account_credentials = get_service_account_credentials(client_id) 84 if service_account_credentials: 85 return get_google_open_id_connect_token(service_account_credentials) 86 return None 87 88 def get_service_account_credentials(client_id): 89 # Figure out what environment we're running in and get some preliminary 90 # information about the service account. 91 bootstrap_credentials, _ = google.auth.default( 92 scopes=[IAM_SCOPE]) 93 if isinstance(bootstrap_credentials, 94 google.oauth2.credentials.Credentials): 95 logging.info('Found OAuth2 credentials and skip SA auth.') 96 return None 97 elif isinstance(bootstrap_credentials, 98 google.auth.app_engine.Credentials): 99 requests_toolbelt.adapters.appengine.monkeypatch() 100 101 # For service account's using the Compute Engine metadata service, 102 # service_account_email isn't available until refresh is called. 103 bootstrap_credentials.refresh(Request()) 104 signer_email = bootstrap_credentials.service_account_email 105 if isinstance(bootstrap_credentials, 106 google.auth.compute_engine.credentials.Credentials): 107 # Since the Compute Engine metadata service doesn't expose the service 108 # account key, we use the IAM signBlob API to sign instead. 109 # In order for this to work: 110 # 111 # 1. Your VM needs the https://www.googleapis.com/auth/iam scope. 112 # You can specify this specific scope when creating a VM 113 # through the API or gcloud. When using Cloud Console, 114 # you'll need to specify the "full access to all Cloud APIs" 115 # scope. A VM's scopes can only be specified at creation time. 116 # 117 # 2. The VM's default service account needs the "Service Account Actor" 118 # role. This can be found under the "Project" category in Cloud 119 # Console, or roles/iam.serviceAccountActor in gcloud. 120 signer = google.auth.iam.Signer( 121 Request(), bootstrap_credentials, signer_email) 122 else: 123 # A Signer object can sign a JWT using the service account's key. 124 signer = bootstrap_credentials.signer 125 126 # Construct OAuth 2.0 service account credentials using the signer 127 # and email acquired from the bootstrap credentials. 128 return google.oauth2.service_account.Credentials( 129 signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={ 130 'target_audience': client_id 131 }) 132 133 def get_google_open_id_connect_token(service_account_credentials): 134 """Get an OpenID Connect token issued by Google for the service account. 135 This function: 136 1. Generates a JWT signed with the service account's private key 137 containing a special "target_audience" claim. 138 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1 139 has a target_audience claim, that endpoint will respond with 140 an OpenID Connect token for the service account -- in other words, 141 a JWT signed by *Google*. The aud claim in this JWT will be 142 set to the value from the target_audience claim in #1. 143 For more information, see 144 https://developers.google.com/identity/protocols/OAuth2ServiceAccount . 145 The HTTP/REST example on that page describes the JWT structure and 146 demonstrates how to call the token endpoint. (The example on that page 147 shows how to get an OAuth2 access token; this code is using a 148 modified version of it to get an OpenID Connect token.) 149 """ 150 151 service_account_jwt = ( 152 service_account_credentials._make_authorization_grant_assertion()) 153 request = google.auth.transport.requests.Request() 154 body = { 155 'assertion': service_account_jwt, 156 'grant_type': google.oauth2._client._JWT_GRANT_TYPE, 157 } 158 token_response = google.oauth2._client._token_endpoint_request( 159 request, OAUTH_TOKEN_URI, body) 160 return token_response['id_token'] 161 162 def get_refresh_token_from_client_id(client_id, client_secret): 163 """Obtain the ID token for provided Client ID with user accounts. 164 Flow: get authorization code -> exchange for refresh token -> obtain and return ID token 165 """ 166 auth_code = get_auth_code(client_id) 167 return get_refresh_token_from_code(auth_code, client_id, client_secret) 168 169 def get_auth_code(client_id): 170 auth_url = "https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&response_type=code&scope=openid%%20email&access_type=offline&redirect_uri=urn:ietf:wg:oauth:2.0:oob"%client_id 171 print(auth_url) 172 open_new_tab(auth_url) 173 return input("Authorization code: ") 174 175 def get_refresh_token_from_code(auth_code, client_id, client_secret): 176 payload = {"code": auth_code, "client_id": client_id, "client_secret": client_secret, 177 "redirect_uri": "urn:ietf:wg:oauth:2.0:oob", "grant_type": "authorization_code"} 178 res = requests.post(OAUTH_TOKEN_URI, data=payload) 179 return (str(json.loads(res.text)[u"refresh_token"])) 180 181 def id_token_from_refresh_token(client_id, client_secret, refresh_token, audience): 182 payload = {"client_id": client_id, "client_secret": client_secret, 183 "refresh_token": refresh_token, "grant_type": "refresh_token", 184 "audience": audience} 185 res = requests.post(OAUTH_TOKEN_URI, data=payload) 186 return (str(json.loads(res.text)[u"id_token"])) 187 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/kfp/_auth.py b/sdk/python/kfp/_auth.py --- a/sdk/python/kfp/_auth.py +++ b/sdk/python/kfp/_auth.py @@ -37,7 +37,8 @@ https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token """ args = ['gcloud', 'auth', 'print-access-token'] - return subprocess.check_output(args).rstrip() + # Casting to string to accommodate API server request schema. + return subprocess.check_output(args).rstrip().decode("utf-8") def get_auth_token(client_id, other_client_id, other_client_secret): """Gets auth token from default service account or user account."""
{"golden_diff": "diff --git a/sdk/python/kfp/_auth.py b/sdk/python/kfp/_auth.py\n--- a/sdk/python/kfp/_auth.py\n+++ b/sdk/python/kfp/_auth.py\n@@ -37,7 +37,8 @@\n https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token\n \"\"\"\n args = ['gcloud', 'auth', 'print-access-token']\n- return subprocess.check_output(args).rstrip()\n+ # Casting to string to accommodate API server request schema.\n+ return subprocess.check_output(args).rstrip().decode(\"utf-8\")\n \n def get_auth_token(client_id, other_client_id, other_client_secret):\n \"\"\"Gets auth token from default service account or user account.\"\"\"\n", "issue": "KFP 0.2.1 SDK client broken when inverse proxy host is used\nThis bug is introduced in #2896 \r\n\r\nUnder the hood `subprocess.check_output()` return bytes instead of string. However in `kfp_server_api/configuration.py` it was assumed to be string, which caused error like the following:\r\n\r\n```\r\n~/.local/lib/python3.5/site-packages/kfp_server_api/configuration.py in get_api_key_with_prefix(self, identifier)\r\n 203 if (self.api_key.get(identifier) and\r\n 204 self.api_key_prefix.get(identifier)):\r\n--> 205 return self.api_key_prefix[identifier] + ' ' + self.api_key[identifier] # noqa: E501\r\n 206 elif self.api_key.get(identifier):\r\n 207 return self.api_key[identifier]\r\n\r\nTypeError: Can't convert 'bytes' object to str implicitly\r\n```\r\nThis impacts some CUJs involving TFX CLI and CAIP notebook experiences. Will send out a fix today.\r\n\r\n\n", "before_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport subprocess\nimport google.auth\nimport google.auth.app_engine\nimport google.auth.compute_engine.credentials\nimport google.auth.iam\nfrom google.auth.transport.requests import Request\nimport google.oauth2.credentials\nimport google.oauth2.service_account\nimport requests_toolbelt.adapters.appengine\nfrom webbrowser import open_new_tab\nimport requests\nimport json\n\nIAM_SCOPE = 'https://www.googleapis.com/auth/iam'\nOAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'\nLOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json')\n\ndef get_gcp_access_token():\n \"\"\"Get and return GCP access token for the current Application Default\n Credentials. If not set, returns None. For more information, see\n https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token\n \"\"\"\n args = ['gcloud', 'auth', 'print-access-token']\n return subprocess.check_output(args).rstrip()\n\ndef get_auth_token(client_id, other_client_id, other_client_secret):\n \"\"\"Gets auth token from default service account or user account.\"\"\"\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n # fetch IAP auth token using the locally stored credentials.\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n if client_id in credentials:\n return id_token_from_refresh_token(credentials[client_id]['other_client_id'],\n credentials[client_id]['other_client_secret'],\n credentials[client_id]['refresh_token'],\n client_id)\n if other_client_id is None or other_client_secret is None:\n # fetch IAP auth token: service accounts\n token = get_auth_token_from_sa(client_id)\n else:\n # fetch IAP auth token: user account\n # Obtain the ID token for provided Client ID with user accounts.\n # Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n refresh_token = get_refresh_token_from_client_id(other_client_id, other_client_secret)\n credentials = {}\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n credentials[client_id] = {}\n credentials[client_id]['other_client_id'] = other_client_id\n credentials[client_id]['other_client_secret'] = other_client_secret\n credentials[client_id]['refresh_token'] = refresh_token\n #TODO: handle the case when the refresh_token expires.\n # which only happens if the refresh_token is not used once for six months.\n if not os.path.exists(os.path.dirname(LOCAL_KFP_CREDENTIAL)):\n os.makedirs(os.path.dirname(LOCAL_KFP_CREDENTIAL))\n with open(LOCAL_KFP_CREDENTIAL, 'w') as f:\n json.dump(credentials, f)\n token = id_token_from_refresh_token(other_client_id, other_client_secret, refresh_token, client_id)\n return token\n\ndef get_auth_token_from_sa(client_id):\n \"\"\"Gets auth token from default service account.\n\n If no service account credential is found, returns None.\n \"\"\"\n service_account_credentials = get_service_account_credentials(client_id)\n if service_account_credentials:\n return get_google_open_id_connect_token(service_account_credentials)\n return None\n\ndef get_service_account_credentials(client_id):\n # Figure out what environment we're running in and get some preliminary\n # information about the service account.\n bootstrap_credentials, _ = google.auth.default(\n scopes=[IAM_SCOPE])\n if isinstance(bootstrap_credentials,\n google.oauth2.credentials.Credentials):\n logging.info('Found OAuth2 credentials and skip SA auth.')\n return None\n elif isinstance(bootstrap_credentials,\n google.auth.app_engine.Credentials):\n requests_toolbelt.adapters.appengine.monkeypatch()\n\n # For service account's using the Compute Engine metadata service,\n # service_account_email isn't available until refresh is called.\n bootstrap_credentials.refresh(Request())\n signer_email = bootstrap_credentials.service_account_email\n if isinstance(bootstrap_credentials,\n google.auth.compute_engine.credentials.Credentials):\n # Since the Compute Engine metadata service doesn't expose the service\n # account key, we use the IAM signBlob API to sign instead.\n # In order for this to work:\n #\n # 1. Your VM needs the https://www.googleapis.com/auth/iam scope.\n # You can specify this specific scope when creating a VM\n # through the API or gcloud. When using Cloud Console,\n # you'll need to specify the \"full access to all Cloud APIs\"\n # scope. A VM's scopes can only be specified at creation time.\n #\n # 2. The VM's default service account needs the \"Service Account Actor\"\n # role. This can be found under the \"Project\" category in Cloud\n # Console, or roles/iam.serviceAccountActor in gcloud.\n signer = google.auth.iam.Signer(\n Request(), bootstrap_credentials, signer_email)\n else:\n # A Signer object can sign a JWT using the service account's key.\n signer = bootstrap_credentials.signer\n\n # Construct OAuth 2.0 service account credentials using the signer\n # and email acquired from the bootstrap credentials.\n return google.oauth2.service_account.Credentials(\n signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={\n 'target_audience': client_id\n })\n\ndef get_google_open_id_connect_token(service_account_credentials):\n \"\"\"Get an OpenID Connect token issued by Google for the service account.\n This function:\n 1. Generates a JWT signed with the service account's private key\n containing a special \"target_audience\" claim.\n 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1\n has a target_audience claim, that endpoint will respond with\n an OpenID Connect token for the service account -- in other words,\n a JWT signed by *Google*. The aud claim in this JWT will be\n set to the value from the target_audience claim in #1.\n For more information, see\n https://developers.google.com/identity/protocols/OAuth2ServiceAccount .\n The HTTP/REST example on that page describes the JWT structure and\n demonstrates how to call the token endpoint. (The example on that page\n shows how to get an OAuth2 access token; this code is using a\n modified version of it to get an OpenID Connect token.)\n \"\"\"\n\n service_account_jwt = (\n service_account_credentials._make_authorization_grant_assertion())\n request = google.auth.transport.requests.Request()\n body = {\n 'assertion': service_account_jwt,\n 'grant_type': google.oauth2._client._JWT_GRANT_TYPE,\n }\n token_response = google.oauth2._client._token_endpoint_request(\n request, OAUTH_TOKEN_URI, body)\n return token_response['id_token']\n\ndef get_refresh_token_from_client_id(client_id, client_secret):\n \"\"\"Obtain the ID token for provided Client ID with user accounts.\n Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n \"\"\"\n auth_code = get_auth_code(client_id)\n return get_refresh_token_from_code(auth_code, client_id, client_secret)\n\ndef get_auth_code(client_id):\n auth_url = \"https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&response_type=code&scope=openid%%20email&access_type=offline&redirect_uri=urn:ietf:wg:oauth:2.0:oob\"%client_id\n print(auth_url)\n open_new_tab(auth_url)\n return input(\"Authorization code: \")\n\ndef get_refresh_token_from_code(auth_code, client_id, client_secret):\n payload = {\"code\": auth_code, \"client_id\": client_id, \"client_secret\": client_secret,\n \"redirect_uri\": \"urn:ietf:wg:oauth:2.0:oob\", \"grant_type\": \"authorization_code\"}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n return (str(json.loads(res.text)[u\"refresh_token\"]))\n\ndef id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):\n payload = {\"client_id\": client_id, \"client_secret\": client_secret,\n \"refresh_token\": refresh_token, \"grant_type\": \"refresh_token\",\n \"audience\": audience}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n return (str(json.loads(res.text)[u\"id_token\"]))\n", "path": "sdk/python/kfp/_auth.py"}], "after_files": [{"content": "# Copyright 2018 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport os\nimport subprocess\nimport google.auth\nimport google.auth.app_engine\nimport google.auth.compute_engine.credentials\nimport google.auth.iam\nfrom google.auth.transport.requests import Request\nimport google.oauth2.credentials\nimport google.oauth2.service_account\nimport requests_toolbelt.adapters.appengine\nfrom webbrowser import open_new_tab\nimport requests\nimport json\n\nIAM_SCOPE = 'https://www.googleapis.com/auth/iam'\nOAUTH_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'\nLOCAL_KFP_CREDENTIAL = os.path.expanduser('~/.config/kfp/credentials.json')\n\ndef get_gcp_access_token():\n \"\"\"Get and return GCP access token for the current Application Default\n Credentials. If not set, returns None. For more information, see\n https://cloud.google.com/sdk/gcloud/reference/auth/application-default/print-access-token\n \"\"\"\n args = ['gcloud', 'auth', 'print-access-token']\n # Casting to string to accommodate API server request schema.\n return subprocess.check_output(args).rstrip().decode(\"utf-8\")\n\ndef get_auth_token(client_id, other_client_id, other_client_secret):\n \"\"\"Gets auth token from default service account or user account.\"\"\"\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n # fetch IAP auth token using the locally stored credentials.\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n if client_id in credentials:\n return id_token_from_refresh_token(credentials[client_id]['other_client_id'],\n credentials[client_id]['other_client_secret'],\n credentials[client_id]['refresh_token'],\n client_id)\n if other_client_id is None or other_client_secret is None:\n # fetch IAP auth token: service accounts\n token = get_auth_token_from_sa(client_id)\n else:\n # fetch IAP auth token: user account\n # Obtain the ID token for provided Client ID with user accounts.\n # Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n refresh_token = get_refresh_token_from_client_id(other_client_id, other_client_secret)\n credentials = {}\n if os.path.exists(LOCAL_KFP_CREDENTIAL):\n with open(LOCAL_KFP_CREDENTIAL, 'r') as f:\n credentials = json.load(f)\n credentials[client_id] = {}\n credentials[client_id]['other_client_id'] = other_client_id\n credentials[client_id]['other_client_secret'] = other_client_secret\n credentials[client_id]['refresh_token'] = refresh_token\n #TODO: handle the case when the refresh_token expires.\n # which only happens if the refresh_token is not used once for six months.\n if not os.path.exists(os.path.dirname(LOCAL_KFP_CREDENTIAL)):\n os.makedirs(os.path.dirname(LOCAL_KFP_CREDENTIAL))\n with open(LOCAL_KFP_CREDENTIAL, 'w') as f:\n json.dump(credentials, f)\n token = id_token_from_refresh_token(other_client_id, other_client_secret, refresh_token, client_id)\n return token\n\ndef get_auth_token_from_sa(client_id):\n \"\"\"Gets auth token from default service account.\n\n If no service account credential is found, returns None.\n \"\"\"\n service_account_credentials = get_service_account_credentials(client_id)\n if service_account_credentials:\n return get_google_open_id_connect_token(service_account_credentials)\n return None\n\ndef get_service_account_credentials(client_id):\n # Figure out what environment we're running in and get some preliminary\n # information about the service account.\n bootstrap_credentials, _ = google.auth.default(\n scopes=[IAM_SCOPE])\n if isinstance(bootstrap_credentials,\n google.oauth2.credentials.Credentials):\n logging.info('Found OAuth2 credentials and skip SA auth.')\n return None\n elif isinstance(bootstrap_credentials,\n google.auth.app_engine.Credentials):\n requests_toolbelt.adapters.appengine.monkeypatch()\n\n # For service account's using the Compute Engine metadata service,\n # service_account_email isn't available until refresh is called.\n bootstrap_credentials.refresh(Request())\n signer_email = bootstrap_credentials.service_account_email\n if isinstance(bootstrap_credentials,\n google.auth.compute_engine.credentials.Credentials):\n # Since the Compute Engine metadata service doesn't expose the service\n # account key, we use the IAM signBlob API to sign instead.\n # In order for this to work:\n #\n # 1. Your VM needs the https://www.googleapis.com/auth/iam scope.\n # You can specify this specific scope when creating a VM\n # through the API or gcloud. When using Cloud Console,\n # you'll need to specify the \"full access to all Cloud APIs\"\n # scope. A VM's scopes can only be specified at creation time.\n #\n # 2. The VM's default service account needs the \"Service Account Actor\"\n # role. This can be found under the \"Project\" category in Cloud\n # Console, or roles/iam.serviceAccountActor in gcloud.\n signer = google.auth.iam.Signer(\n Request(), bootstrap_credentials, signer_email)\n else:\n # A Signer object can sign a JWT using the service account's key.\n signer = bootstrap_credentials.signer\n\n # Construct OAuth 2.0 service account credentials using the signer\n # and email acquired from the bootstrap credentials.\n return google.oauth2.service_account.Credentials(\n signer, signer_email, token_uri=OAUTH_TOKEN_URI, additional_claims={\n 'target_audience': client_id\n })\n\ndef get_google_open_id_connect_token(service_account_credentials):\n \"\"\"Get an OpenID Connect token issued by Google for the service account.\n This function:\n 1. Generates a JWT signed with the service account's private key\n containing a special \"target_audience\" claim.\n 2. Sends it to the OAUTH_TOKEN_URI endpoint. Because the JWT in #1\n has a target_audience claim, that endpoint will respond with\n an OpenID Connect token for the service account -- in other words,\n a JWT signed by *Google*. The aud claim in this JWT will be\n set to the value from the target_audience claim in #1.\n For more information, see\n https://developers.google.com/identity/protocols/OAuth2ServiceAccount .\n The HTTP/REST example on that page describes the JWT structure and\n demonstrates how to call the token endpoint. (The example on that page\n shows how to get an OAuth2 access token; this code is using a\n modified version of it to get an OpenID Connect token.)\n \"\"\"\n\n service_account_jwt = (\n service_account_credentials._make_authorization_grant_assertion())\n request = google.auth.transport.requests.Request()\n body = {\n 'assertion': service_account_jwt,\n 'grant_type': google.oauth2._client._JWT_GRANT_TYPE,\n }\n token_response = google.oauth2._client._token_endpoint_request(\n request, OAUTH_TOKEN_URI, body)\n return token_response['id_token']\n\ndef get_refresh_token_from_client_id(client_id, client_secret):\n \"\"\"Obtain the ID token for provided Client ID with user accounts.\n Flow: get authorization code -> exchange for refresh token -> obtain and return ID token\n \"\"\"\n auth_code = get_auth_code(client_id)\n return get_refresh_token_from_code(auth_code, client_id, client_secret)\n\ndef get_auth_code(client_id):\n auth_url = \"https://accounts.google.com/o/oauth2/v2/auth?client_id=%s&response_type=code&scope=openid%%20email&access_type=offline&redirect_uri=urn:ietf:wg:oauth:2.0:oob\"%client_id\n print(auth_url)\n open_new_tab(auth_url)\n return input(\"Authorization code: \")\n\ndef get_refresh_token_from_code(auth_code, client_id, client_secret):\n payload = {\"code\": auth_code, \"client_id\": client_id, \"client_secret\": client_secret,\n \"redirect_uri\": \"urn:ietf:wg:oauth:2.0:oob\", \"grant_type\": \"authorization_code\"}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n return (str(json.loads(res.text)[u\"refresh_token\"]))\n\ndef id_token_from_refresh_token(client_id, client_secret, refresh_token, audience):\n payload = {\"client_id\": client_id, \"client_secret\": client_secret,\n \"refresh_token\": refresh_token, \"grant_type\": \"refresh_token\",\n \"audience\": audience}\n res = requests.post(OAUTH_TOKEN_URI, data=payload)\n return (str(json.loads(res.text)[u\"id_token\"]))\n", "path": "sdk/python/kfp/_auth.py"}]}
2,888
154
gh_patches_debug_25884
rasdani/github-patches
git_diff
searxng__searxng-2089
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Bug: bing engine crashes sometimes **Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG** Repository: https://github.com/searxng/searxng Branch: master Version: 2023.01.08-4e735b28 **How did you install SearXNG?** Docker **What happened?** Bing searches sometimes don't work. **How To Reproduce** `!bi certbot` (does not work at all for me) `!bi current events` (works ~50% of the time) **Expected behavior** Search works **Screenshots & Logs** ``` 2023-01-08 13:07:27,885 WARNING:searx.engines.bing: ErrorContext('searx/engines/bing.py', 89, "link = eval_xpath(result, './/h2/a')[0]", 'IndexError', None, ()) False 2023-01-08 13:07:27,885 ERROR:searx.engines.bing: exception : list index out of range Traceback (most recent call last): File "/usr/local/searxng/searx/search/processors/online.py", line 160, in search search_results = self._search_basic(query, params) File "/usr/local/searxng/searx/search/processors/online.py", line 148, in _search_basic return self.engine.response(response) File "/usr/local/searxng/searx/engines/bing.py", line 89, in response link = eval_xpath(result, './/h2/a')[0] IndexError: list index out of range ``` **Additional context** Many searches *do* work. Reproducible on some*, but not most, public instances running `2023.01.08-4e735b28`. At least using the specific repro searches above. \* https://search.rhscz.eu , https://xo.wtf , out of the 7 running the current version --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `searx/engines/bing.py` Content: ``` 1 # SPDX-License-Identifier: AGPL-3.0-or-later 2 # lint: pylint 3 """Bing (Web) 4 5 - https://github.com/searx/searx/issues/2019#issuecomment-648227442 6 """ 7 # pylint: disable=too-many-branches 8 9 import re 10 from urllib.parse import urlencode, urlparse, parse_qs 11 from lxml import html 12 from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language 13 from searx.network import multi_requests, Request 14 15 about = { 16 "website": 'https://www.bing.com', 17 "wikidata_id": 'Q182496', 18 "official_api_documentation": 'https://www.microsoft.com/en-us/bing/apis/bing-web-search-api', 19 "use_official_api": False, 20 "require_api_key": False, 21 "results": 'HTML', 22 } 23 24 # engine dependent config 25 categories = ['general', 'web'] 26 paging = True 27 time_range_support = False 28 safesearch = False 29 send_accept_language_header = True 30 supported_languages_url = 'https://www.bing.com/account/general' 31 language_aliases = {} 32 33 # search-url 34 base_url = 'https://www.bing.com/' 35 36 # initial query: https://www.bing.com/search?q=foo&search=&form=QBLH 37 inital_query = 'search?{query}&search=&form=QBLH' 38 39 # following queries: https://www.bing.com/search?q=foo&search=&first=11&FORM=PERE 40 page_query = 'search?{query}&search=&first={offset}&FORM=PERE' 41 42 43 def _get_offset_from_pageno(pageno): 44 return (pageno - 1) * 10 + 1 45 46 47 def request(query, params): 48 49 offset = _get_offset_from_pageno(params.get('pageno', 1)) 50 51 # logger.debug("params['pageno'] --> %s", params.get('pageno')) 52 # logger.debug(" offset --> %s", offset) 53 54 search_string = page_query 55 if offset == 1: 56 search_string = inital_query 57 58 if params['language'] == 'all': 59 lang = 'EN' 60 else: 61 lang = match_language(params['language'], supported_languages, language_aliases) 62 63 query = 'language:{} {}'.format(lang.split('-')[0].upper(), query) 64 65 search_path = search_string.format(query=urlencode({'q': query}), offset=offset) 66 67 if offset > 1: 68 referer = base_url + inital_query.format(query=urlencode({'q': query})) 69 params['headers']['Referer'] = referer 70 logger.debug("headers.Referer --> %s", referer) 71 72 params['url'] = base_url + search_path 73 params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8' 74 return params 75 76 77 def response(resp): 78 results = [] 79 result_len = 0 80 81 dom = html.fromstring(resp.text) 82 83 # parse results again if nothing is found yet 84 85 url_to_resolve = [] 86 url_to_resolve_index = [] 87 for i, result in enumerate(eval_xpath_list(dom, '//li[contains(@class, "b_algo")]')): 88 89 link = eval_xpath(result, './/h2/a')[0] 90 url = link.attrib.get('href') 91 title = extract_text(link) 92 93 # Make sure that the element is free of <a href> links and <span class='algoSlug_icon'> 94 content = eval_xpath(result, '(.//p)[1]') 95 for p in content: 96 for e in p.xpath('.//a'): 97 e.getparent().remove(e) 98 for e in p.xpath('.//span[@class="algoSlug_icon"]'): 99 e.getparent().remove(e) 100 content = extract_text(content) 101 102 # get the real URL either using the URL shown to user or following the Bing URL 103 if url.startswith('https://www.bing.com/ck/a?'): 104 url_cite = extract_text(eval_xpath(result, './/div[@class="b_attribution"]/cite')) 105 # Bing can shorten the URL either at the end or in the middle of the string 106 if ( 107 url_cite.startswith('https://') 108 and '…' not in url_cite 109 and '...' not in url_cite 110 and '›' not in url_cite 111 ): 112 # no need for an additional HTTP request 113 url = url_cite 114 else: 115 # resolve the URL with an additional HTTP request 116 url_to_resolve.append(url.replace('&ntb=1', '&ntb=F')) 117 url_to_resolve_index.append(i) 118 url = None # remove the result if the HTTP Bing redirect raise an exception 119 120 # append result 121 results.append({'url': url, 'title': title, 'content': content}) 122 123 # resolve all Bing redirections in parallel 124 request_list = [ 125 Request.get(u, allow_redirects=False, headers=resp.search_params['headers']) for u in url_to_resolve 126 ] 127 response_list = multi_requests(request_list) 128 for i, redirect_response in enumerate(response_list): 129 if not isinstance(redirect_response, Exception): 130 results[url_to_resolve_index[i]]['url'] = redirect_response.headers['location'] 131 132 # get number_of_results 133 try: 134 result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()')) 135 if "-" in result_len_container: 136 137 # Remove the part "from-to" for paginated request ... 138 result_len_container = result_len_container[result_len_container.find("-") * 2 + 2 :] 139 140 result_len_container = re.sub('[^0-9]', '', result_len_container) 141 142 if len(result_len_container) > 0: 143 result_len = int(result_len_container) 144 145 except Exception as e: # pylint: disable=broad-except 146 logger.debug('result error :\n%s', e) 147 148 if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len: 149 return [] 150 151 results.append({'number_of_results': result_len}) 152 return results 153 154 155 # get supported languages from their site 156 def _fetch_supported_languages(resp): 157 158 lang_tags = set() 159 160 dom = html.fromstring(resp.text) 161 lang_links = eval_xpath(dom, '//div[@id="language-section"]//li') 162 163 for _li in lang_links: 164 165 href = eval_xpath(_li, './/@href')[0] 166 (_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href) 167 query = parse_qs(query, keep_blank_values=True) 168 169 # fmt: off 170 setlang = query.get('setlang', [None, ])[0] 171 # example: 'mn-Cyrl-MN' --> '['mn', 'Cyrl-MN'] 172 lang, nation = (setlang.split('-', maxsplit=1) + [None,])[:2] # fmt: skip 173 # fmt: on 174 175 tag = lang + '-' + nation if nation else lang 176 lang_tags.add(tag) 177 178 return list(lang_tags) 179 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/searx/engines/bing.py b/searx/engines/bing.py --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -9,7 +9,7 @@ import re from urllib.parse import urlencode, urlparse, parse_qs from lxml import html -from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language +from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language, eval_xpath_getindex from searx.network import multi_requests, Request about = { @@ -84,9 +84,12 @@ url_to_resolve = [] url_to_resolve_index = [] - for i, result in enumerate(eval_xpath_list(dom, '//li[contains(@class, "b_algo")]')): + i = 0 + for result in eval_xpath_list(dom, '//ol[@id="b_results"]/li[contains(@class, "b_algo")]'): - link = eval_xpath(result, './/h2/a')[0] + link = eval_xpath_getindex(result, './/h2/a', 0, None) + if link is None: + continue url = link.attrib.get('href') title = extract_text(link) @@ -119,6 +122,8 @@ # append result results.append({'url': url, 'title': title, 'content': content}) + # increment result pointer for the next iteration in this loop + i += 1 # resolve all Bing redirections in parallel request_list = [
{"golden_diff": "diff --git a/searx/engines/bing.py b/searx/engines/bing.py\n--- a/searx/engines/bing.py\n+++ b/searx/engines/bing.py\n@@ -9,7 +9,7 @@\n import re\n from urllib.parse import urlencode, urlparse, parse_qs\n from lxml import html\n-from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language\n+from searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language, eval_xpath_getindex\n from searx.network import multi_requests, Request\n \n about = {\n@@ -84,9 +84,12 @@\n \n url_to_resolve = []\n url_to_resolve_index = []\n- for i, result in enumerate(eval_xpath_list(dom, '//li[contains(@class, \"b_algo\")]')):\n+ i = 0\n+ for result in eval_xpath_list(dom, '//ol[@id=\"b_results\"]/li[contains(@class, \"b_algo\")]'):\n \n- link = eval_xpath(result, './/h2/a')[0]\n+ link = eval_xpath_getindex(result, './/h2/a', 0, None)\n+ if link is None:\n+ continue\n url = link.attrib.get('href')\n title = extract_text(link)\n \n@@ -119,6 +122,8 @@\n \n # append result\n results.append({'url': url, 'title': title, 'content': content})\n+ # increment result pointer for the next iteration in this loop\n+ i += 1\n \n # resolve all Bing redirections in parallel\n request_list = [\n", "issue": "Bug: bing engine crashes sometimes\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nRepository: https://github.com/searxng/searxng\r\nBranch: master\r\nVersion: 2023.01.08-4e735b28\r\n\r\n**How did you install SearXNG?**\r\nDocker\r\n**What happened?**\r\nBing searches sometimes don't work.\r\n\r\n**How To Reproduce**\r\n`!bi certbot` (does not work at all for me) \r\n`!bi current events` (works ~50% of the time)\r\n\r\n**Expected behavior**\r\nSearch works\r\n\r\n**Screenshots & Logs**\r\n```\r\n2023-01-08 13:07:27,885 WARNING:searx.engines.bing: ErrorContext('searx/engines/bing.py', 89, \"link = eval_xpath(result, './/h2/a')[0]\", 'IndexError', None, ()) False\r\n2023-01-08 13:07:27,885 ERROR:searx.engines.bing: exception : list index out of range\r\nTraceback (most recent call last):\r\n File \"/usr/local/searxng/searx/search/processors/online.py\", line 160, in search\r\n search_results = self._search_basic(query, params)\r\n File \"/usr/local/searxng/searx/search/processors/online.py\", line 148, in _search_basic\r\n return self.engine.response(response)\r\n File \"/usr/local/searxng/searx/engines/bing.py\", line 89, in response\r\n link = eval_xpath(result, './/h2/a')[0]\r\nIndexError: list index out of range\r\n```\r\n\r\n**Additional context**\r\nMany searches *do* work.\r\n\r\nReproducible on some*, but not most, public instances running `2023.01.08-4e735b28`. At least using the specific repro searches above.\r\n\r\n\\* https://search.rhscz.eu , https://xo.wtf , out of the 7 running the current version\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bing (Web)\n\n- https://github.com/searx/searx/issues/2019#issuecomment-648227442\n\"\"\"\n# pylint: disable=too-many-branches\n\nimport re\nfrom urllib.parse import urlencode, urlparse, parse_qs\nfrom lxml import html\nfrom searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language\nfrom searx.network import multi_requests, Request\n\nabout = {\n \"website\": 'https://www.bing.com',\n \"wikidata_id\": 'Q182496',\n \"official_api_documentation\": 'https://www.microsoft.com/en-us/bing/apis/bing-web-search-api',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\ntime_range_support = False\nsafesearch = False\nsend_accept_language_header = True\nsupported_languages_url = 'https://www.bing.com/account/general'\nlanguage_aliases = {}\n\n# search-url\nbase_url = 'https://www.bing.com/'\n\n# initial query: https://www.bing.com/search?q=foo&search=&form=QBLH\ninital_query = 'search?{query}&search=&form=QBLH'\n\n# following queries: https://www.bing.com/search?q=foo&search=&first=11&FORM=PERE\npage_query = 'search?{query}&search=&first={offset}&FORM=PERE'\n\n\ndef _get_offset_from_pageno(pageno):\n return (pageno - 1) * 10 + 1\n\n\ndef request(query, params):\n\n offset = _get_offset_from_pageno(params.get('pageno', 1))\n\n # logger.debug(\"params['pageno'] --> %s\", params.get('pageno'))\n # logger.debug(\" offset --> %s\", offset)\n\n search_string = page_query\n if offset == 1:\n search_string = inital_query\n\n if params['language'] == 'all':\n lang = 'EN'\n else:\n lang = match_language(params['language'], supported_languages, language_aliases)\n\n query = 'language:{} {}'.format(lang.split('-')[0].upper(), query)\n\n search_path = search_string.format(query=urlencode({'q': query}), offset=offset)\n\n if offset > 1:\n referer = base_url + inital_query.format(query=urlencode({'q': query}))\n params['headers']['Referer'] = referer\n logger.debug(\"headers.Referer --> %s\", referer)\n\n params['url'] = base_url + search_path\n params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n return params\n\n\ndef response(resp):\n results = []\n result_len = 0\n\n dom = html.fromstring(resp.text)\n\n # parse results again if nothing is found yet\n\n url_to_resolve = []\n url_to_resolve_index = []\n for i, result in enumerate(eval_xpath_list(dom, '//li[contains(@class, \"b_algo\")]')):\n\n link = eval_xpath(result, './/h2/a')[0]\n url = link.attrib.get('href')\n title = extract_text(link)\n\n # Make sure that the element is free of <a href> links and <span class='algoSlug_icon'>\n content = eval_xpath(result, '(.//p)[1]')\n for p in content:\n for e in p.xpath('.//a'):\n e.getparent().remove(e)\n for e in p.xpath('.//span[@class=\"algoSlug_icon\"]'):\n e.getparent().remove(e)\n content = extract_text(content)\n\n # get the real URL either using the URL shown to user or following the Bing URL\n if url.startswith('https://www.bing.com/ck/a?'):\n url_cite = extract_text(eval_xpath(result, './/div[@class=\"b_attribution\"]/cite'))\n # Bing can shorten the URL either at the end or in the middle of the string\n if (\n url_cite.startswith('https://')\n and '\u2026' not in url_cite\n and '...' not in url_cite\n and '\u203a' not in url_cite\n ):\n # no need for an additional HTTP request\n url = url_cite\n else:\n # resolve the URL with an additional HTTP request\n url_to_resolve.append(url.replace('&ntb=1', '&ntb=F'))\n url_to_resolve_index.append(i)\n url = None # remove the result if the HTTP Bing redirect raise an exception\n\n # append result\n results.append({'url': url, 'title': title, 'content': content})\n\n # resolve all Bing redirections in parallel\n request_list = [\n Request.get(u, allow_redirects=False, headers=resp.search_params['headers']) for u in url_to_resolve\n ]\n response_list = multi_requests(request_list)\n for i, redirect_response in enumerate(response_list):\n if not isinstance(redirect_response, Exception):\n results[url_to_resolve_index[i]]['url'] = redirect_response.headers['location']\n\n # get number_of_results\n try:\n result_len_container = \"\".join(eval_xpath(dom, '//span[@class=\"sb_count\"]//text()'))\n if \"-\" in result_len_container:\n\n # Remove the part \"from-to\" for paginated request ...\n result_len_container = result_len_container[result_len_container.find(\"-\") * 2 + 2 :]\n\n result_len_container = re.sub('[^0-9]', '', result_len_container)\n\n if len(result_len_container) > 0:\n result_len = int(result_len_container)\n\n except Exception as e: # pylint: disable=broad-except\n logger.debug('result error :\\n%s', e)\n\n if result_len and _get_offset_from_pageno(resp.search_params.get(\"pageno\", 0)) > result_len:\n return []\n\n results.append({'number_of_results': result_len})\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n\n lang_tags = set()\n\n dom = html.fromstring(resp.text)\n lang_links = eval_xpath(dom, '//div[@id=\"language-section\"]//li')\n\n for _li in lang_links:\n\n href = eval_xpath(_li, './/@href')[0]\n (_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href)\n query = parse_qs(query, keep_blank_values=True)\n\n # fmt: off\n setlang = query.get('setlang', [None, ])[0]\n # example: 'mn-Cyrl-MN' --> '['mn', 'Cyrl-MN']\n lang, nation = (setlang.split('-', maxsplit=1) + [None,])[:2] # fmt: skip\n # fmt: on\n\n tag = lang + '-' + nation if nation else lang\n lang_tags.add(tag)\n\n return list(lang_tags)\n", "path": "searx/engines/bing.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n# lint: pylint\n\"\"\"Bing (Web)\n\n- https://github.com/searx/searx/issues/2019#issuecomment-648227442\n\"\"\"\n# pylint: disable=too-many-branches\n\nimport re\nfrom urllib.parse import urlencode, urlparse, parse_qs\nfrom lxml import html\nfrom searx.utils import eval_xpath, extract_text, eval_xpath_list, match_language, eval_xpath_getindex\nfrom searx.network import multi_requests, Request\n\nabout = {\n \"website\": 'https://www.bing.com',\n \"wikidata_id\": 'Q182496',\n \"official_api_documentation\": 'https://www.microsoft.com/en-us/bing/apis/bing-web-search-api',\n \"use_official_api\": False,\n \"require_api_key\": False,\n \"results\": 'HTML',\n}\n\n# engine dependent config\ncategories = ['general', 'web']\npaging = True\ntime_range_support = False\nsafesearch = False\nsend_accept_language_header = True\nsupported_languages_url = 'https://www.bing.com/account/general'\nlanguage_aliases = {}\n\n# search-url\nbase_url = 'https://www.bing.com/'\n\n# initial query: https://www.bing.com/search?q=foo&search=&form=QBLH\ninital_query = 'search?{query}&search=&form=QBLH'\n\n# following queries: https://www.bing.com/search?q=foo&search=&first=11&FORM=PERE\npage_query = 'search?{query}&search=&first={offset}&FORM=PERE'\n\n\ndef _get_offset_from_pageno(pageno):\n return (pageno - 1) * 10 + 1\n\n\ndef request(query, params):\n\n offset = _get_offset_from_pageno(params.get('pageno', 1))\n\n # logger.debug(\"params['pageno'] --> %s\", params.get('pageno'))\n # logger.debug(\" offset --> %s\", offset)\n\n search_string = page_query\n if offset == 1:\n search_string = inital_query\n\n if params['language'] == 'all':\n lang = 'EN'\n else:\n lang = match_language(params['language'], supported_languages, language_aliases)\n\n query = 'language:{} {}'.format(lang.split('-')[0].upper(), query)\n\n search_path = search_string.format(query=urlencode({'q': query}), offset=offset)\n\n if offset > 1:\n referer = base_url + inital_query.format(query=urlencode({'q': query}))\n params['headers']['Referer'] = referer\n logger.debug(\"headers.Referer --> %s\", referer)\n\n params['url'] = base_url + search_path\n params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'\n return params\n\n\ndef response(resp):\n results = []\n result_len = 0\n\n dom = html.fromstring(resp.text)\n\n # parse results again if nothing is found yet\n\n url_to_resolve = []\n url_to_resolve_index = []\n i = 0\n for result in eval_xpath_list(dom, '//ol[@id=\"b_results\"]/li[contains(@class, \"b_algo\")]'):\n\n link = eval_xpath_getindex(result, './/h2/a', 0, None)\n if link is None:\n continue\n url = link.attrib.get('href')\n title = extract_text(link)\n\n # Make sure that the element is free of <a href> links and <span class='algoSlug_icon'>\n content = eval_xpath(result, '(.//p)[1]')\n for p in content:\n for e in p.xpath('.//a'):\n e.getparent().remove(e)\n for e in p.xpath('.//span[@class=\"algoSlug_icon\"]'):\n e.getparent().remove(e)\n content = extract_text(content)\n\n # get the real URL either using the URL shown to user or following the Bing URL\n if url.startswith('https://www.bing.com/ck/a?'):\n url_cite = extract_text(eval_xpath(result, './/div[@class=\"b_attribution\"]/cite'))\n # Bing can shorten the URL either at the end or in the middle of the string\n if (\n url_cite.startswith('https://')\n and '\u2026' not in url_cite\n and '...' not in url_cite\n and '\u203a' not in url_cite\n ):\n # no need for an additional HTTP request\n url = url_cite\n else:\n # resolve the URL with an additional HTTP request\n url_to_resolve.append(url.replace('&ntb=1', '&ntb=F'))\n url_to_resolve_index.append(i)\n url = None # remove the result if the HTTP Bing redirect raise an exception\n\n # append result\n results.append({'url': url, 'title': title, 'content': content})\n # increment result pointer for the next iteration in this loop\n i += 1\n\n # resolve all Bing redirections in parallel\n request_list = [\n Request.get(u, allow_redirects=False, headers=resp.search_params['headers']) for u in url_to_resolve\n ]\n response_list = multi_requests(request_list)\n for i, redirect_response in enumerate(response_list):\n if not isinstance(redirect_response, Exception):\n results[url_to_resolve_index[i]]['url'] = redirect_response.headers['location']\n\n # get number_of_results\n try:\n result_len_container = \"\".join(eval_xpath(dom, '//span[@class=\"sb_count\"]//text()'))\n if \"-\" in result_len_container:\n\n # Remove the part \"from-to\" for paginated request ...\n result_len_container = result_len_container[result_len_container.find(\"-\") * 2 + 2 :]\n\n result_len_container = re.sub('[^0-9]', '', result_len_container)\n\n if len(result_len_container) > 0:\n result_len = int(result_len_container)\n\n except Exception as e: # pylint: disable=broad-except\n logger.debug('result error :\\n%s', e)\n\n if result_len and _get_offset_from_pageno(resp.search_params.get(\"pageno\", 0)) > result_len:\n return []\n\n results.append({'number_of_results': result_len})\n return results\n\n\n# get supported languages from their site\ndef _fetch_supported_languages(resp):\n\n lang_tags = set()\n\n dom = html.fromstring(resp.text)\n lang_links = eval_xpath(dom, '//div[@id=\"language-section\"]//li')\n\n for _li in lang_links:\n\n href = eval_xpath(_li, './/@href')[0]\n (_scheme, _netloc, _path, _params, query, _fragment) = urlparse(href)\n query = parse_qs(query, keep_blank_values=True)\n\n # fmt: off\n setlang = query.get('setlang', [None, ])[0]\n # example: 'mn-Cyrl-MN' --> '['mn', 'Cyrl-MN']\n lang, nation = (setlang.split('-', maxsplit=1) + [None,])[:2] # fmt: skip\n # fmt: on\n\n tag = lang + '-' + nation if nation else lang\n lang_tags.add(tag)\n\n return list(lang_tags)\n", "path": "searx/engines/bing.py"}]}
2,771
361
gh_patches_debug_39315
rasdani/github-patches
git_diff
pypa__pipenv-2641
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pipenv shell does not work when running Git for Windows' bash on Windows ##### Issue description Running `pipenv shell` on windows when running bash in Git for Windows does not activate the virtual env successfully. It appears the actual activate script with the correct values is not being used. ##### Expected result I would successfully be in the virtualenv ##### Actual result The `VIRTUAL_ENV` and `PATH` variables are in the `C:` notation instead of `/c/` notation which would have worked. ``` declare -x VIRTUAL_ENV="C:\\Users\\user\\.virtualenvs\\project-OJ2s3Ey8" declare -x PATH="C:/Users/user/.virtualenvs/project-OJ2s3Ey8/Scripts:/c/Users/user/bin:/mingw64/bin:/usr/local/bin:/usr/bin:/usr/bin:/mingw64/bin:/usr/bin:/c/Users/andy/bin:/c/Python37/Scripts:/c/Python37" etc... ``` Sourcing the virtualenv activate variable does work, even though Git for Windows uses `msys` and not `cygwin`. Did not test cygwin ##### Steps to replicate 1. `pipenv install sphinx` 1. `echo $$` 1. `pipenv shell` 1. `echo $$` 1. `pip freeze` 1. `echo $PATH` Shows that the pip environment is the original python environment, not the virtualenv. The different pid verifies that a new shell is indeed being run, but the virtualenv is not activating Also tried 1. `PIPENV_EMULATOR=bash pipenv shell` Identical results <details><summary>$ pipenv --support</summary> Pipenv version: `'2018.7.1'` Pipenv location: `'c:\\python37\\lib\\site-packages\\pipenv'` Python location: `'c:\\python37\\python.exe'` Other Python installations in `PATH`: - `3.7`: `C:\Python37\python.exe` - `3.7.0`: `C:\Python37\python.exe` - `3.7.0`: `C:\Windows\py.exe` PEP 508 Information: ``` {'implementation_name': 'cpython', 'implementation_version': '3.7.0', 'os_name': 'nt', 'platform_machine': 'AMD64', 'platform_python_implementation': 'CPython', 'platform_release': '10', 'platform_system': 'Windows', 'platform_version': '10.0.17134', 'python_full_version': '3.7.0', 'python_version': '3.7', 'sys_platform': 'win32'} ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pipenv/shells.py` Content: ``` 1 import collections 2 import contextlib 3 import os 4 import signal 5 import subprocess 6 import sys 7 8 from ._compat import get_terminal_size, Path 9 from .environments import PIPENV_SHELL_EXPLICIT, PIPENV_SHELL, PIPENV_EMULATOR 10 from .utils import temp_environ 11 from .vendor import shellingham 12 13 14 ShellDetectionFailure = shellingham.ShellDetectionFailure 15 16 17 def _build_info(value): 18 return (os.path.splitext(os.path.basename(value))[0], value) 19 20 21 def detect_info(): 22 if PIPENV_SHELL_EXPLICIT: 23 return _build_info(PIPENV_SHELL_EXPLICIT) 24 try: 25 return shellingham.detect_shell() 26 except (shellingham.ShellDetectionFailure, TypeError): 27 if PIPENV_SHELL: 28 return _build_info(PIPENV_SHELL) 29 raise ShellDetectionFailure 30 31 32 def _get_activate_script(venv): 33 """Returns the string to activate a virtualenv. 34 35 This is POSIX-only at the moment since the compat (pexpect-based) shell 36 does not work elsewhere anyway. 37 """ 38 # Suffix and source command for other shells. 39 # Support for fish shell. 40 if PIPENV_SHELL and "fish" in PIPENV_SHELL: 41 suffix = ".fish" 42 command = "source" 43 # Support for csh shell. 44 elif PIPENV_SHELL and "csh" in PIPENV_SHELL: 45 suffix = ".csh" 46 command = "source" 47 else: 48 suffix = "" 49 command = "." 50 # Escape any spaces located within the virtualenv path to allow 51 # for proper activation. 52 venv_location = str(venv).replace(" ", r"\ ") 53 # The leading space can make history cleaner in some shells. 54 return " {2} {0}/bin/activate{1}".format(venv_location, suffix, command) 55 56 57 def _handover(cmd, args): 58 args = [cmd] + args 59 if os.name != "nt": 60 os.execvp(cmd, args) 61 else: 62 sys.exit(subprocess.call(args, shell=True, universal_newlines=True)) 63 64 65 class Shell(object): 66 def __init__(self, cmd): 67 self.cmd = cmd 68 self.args = [] 69 70 def __repr__(self): 71 return '{type}(cmd={cmd!r})'.format( 72 type=type(self).__name__, 73 cmd=self.cmd, 74 ) 75 76 @contextlib.contextmanager 77 def inject_path(self, venv): 78 with temp_environ(): 79 os.environ["PATH"] = "{0}{1}{2}".format( 80 os.pathsep.join(str(p.parent) for p in _iter_python(venv)), 81 os.pathsep, 82 os.environ["PATH"], 83 ) 84 yield 85 86 def fork(self, venv, cwd, args): 87 # FIXME: This isn't necessarily the correct prompt. We should read the 88 # actual prompt by peeking into the activation script. 89 name = os.path.basename(venv) 90 os.environ["VIRTUAL_ENV"] = str(venv) 91 if "PROMPT" in os.environ: 92 os.environ["PROMPT"] = "({0}) {1}".format(name, os.environ["PROMPT"]) 93 if "PS1" in os.environ: 94 os.environ["PS1"] = "({0}) {1}".format(name, os.environ["PS1"]) 95 with self.inject_path(venv): 96 os.chdir(cwd) 97 _handover(self.cmd, self.args + list(args)) 98 99 def fork_compat(self, venv, cwd, args): 100 from .vendor import pexpect 101 102 # Grab current terminal dimensions to replace the hardcoded default 103 # dimensions of pexpect. 104 dims = get_terminal_size() 105 with temp_environ(): 106 c = pexpect.spawn(self.cmd, ["-i"], dimensions=(dims.lines, dims.columns)) 107 c.sendline(_get_activate_script(venv)) 108 if args: 109 c.sendline(" ".join(args)) 110 111 # Handler for terminal resizing events 112 # Must be defined here to have the shell process in its context, since 113 # we can't pass it as an argument 114 def sigwinch_passthrough(sig, data): 115 dims = get_terminal_size() 116 c.setwinsize(dims.lines, dims.columns) 117 118 signal.signal(signal.SIGWINCH, sigwinch_passthrough) 119 120 # Interact with the new shell. 121 c.interact(escape_character=None) 122 c.close() 123 sys.exit(c.exitstatus) 124 125 126 POSSIBLE_ENV_PYTHON = [Path("bin", "python"), Path("Scripts", "python.exe")] 127 128 129 def _iter_python(venv): 130 for path in POSSIBLE_ENV_PYTHON: 131 full_path = Path(venv, path) 132 if full_path.is_file(): 133 yield full_path 134 135 136 class Bash(Shell): 137 # The usual PATH injection technique does not work with Bash. 138 # https://github.com/berdario/pew/issues/58#issuecomment-102182346 139 @contextlib.contextmanager 140 def inject_path(self, venv): 141 from ._compat import NamedTemporaryFile 142 143 bashrc_path = Path.home().joinpath(".bashrc") 144 with NamedTemporaryFile("w+") as rcfile: 145 if bashrc_path.is_file(): 146 base_rc_src = 'source "{0}"\n'.format(bashrc_path.as_posix()) 147 rcfile.write(base_rc_src) 148 149 export_path = 'export PATH="{0}:$PATH"\n'.format( 150 ":".join(python.parent.as_posix() for python in _iter_python(venv)) 151 ) 152 rcfile.write(export_path) 153 rcfile.flush() 154 self.args.extend(["--rcfile", rcfile.name]) 155 yield 156 157 158 class CmderEmulatedShell(Shell): 159 def fork(self, venv, cwd, args): 160 if cwd: 161 os.environ["CMDER_START"] = cwd 162 super(CmderEmulatedShell, self).fork(venv, cwd, args) 163 164 165 class CmderCommandPrompt(CmderEmulatedShell): 166 def fork(self, venv, cwd, args): 167 rc = os.path.expandvars("%CMDER_ROOT%\\vendor\\init.bat") 168 if os.path.exists(rc): 169 self.args.extend(["/k", rc]) 170 super(CmderCommandPrompt, self).fork(venv, cwd, args) 171 172 173 class CmderPowershell(Shell): 174 def fork(self, venv, cwd, args): 175 rc = os.path.expandvars("%CMDER_ROOT%\\vendor\\profile.ps1") 176 if os.path.exists(rc): 177 self.args.extend( 178 [ 179 "-ExecutionPolicy", 180 "Bypass", 181 "-NoLogo", 182 "-NoProfile", 183 "-NoExit", 184 "-Command", 185 "Invoke-Expression '. ''{0}'''".format(rc), 186 ] 187 ) 188 super(CmderPowershell, self).fork(venv, cwd, args) 189 190 191 # Two dimensional dict. First is the shell type, second is the emulator type. 192 # Example: SHELL_LOOKUP['powershell']['cmder'] => CmderPowershell. 193 SHELL_LOOKUP = collections.defaultdict( 194 lambda: collections.defaultdict(lambda: Shell), 195 { 196 "bash": collections.defaultdict(lambda: Bash), 197 "cmd": collections.defaultdict(lambda: Shell, {"cmder": CmderCommandPrompt}), 198 "powershell": collections.defaultdict( 199 lambda: Shell, {"cmder": CmderPowershell} 200 ), 201 "pwsh": collections.defaultdict(lambda: Shell, {"cmder": CmderPowershell}), 202 }, 203 ) 204 205 206 def _detect_emulator(): 207 if os.environ.get("CMDER_ROOT"): 208 return "cmder" 209 return "" 210 211 212 def choose_shell(): 213 emulator = PIPENV_EMULATOR.lower() or _detect_emulator() 214 type_, command = detect_info() 215 return SHELL_LOOKUP[type_][emulator](command) 216 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pipenv/shells.py b/pipenv/shells.py --- a/pipenv/shells.py +++ b/pipenv/shells.py @@ -134,6 +134,9 @@ class Bash(Shell): + def _format_path(self, python): + return python.parent.as_posix() + # The usual PATH injection technique does not work with Bash. # https://github.com/berdario/pew/issues/58#issuecomment-102182346 @contextlib.contextmanager @@ -146,15 +149,25 @@ base_rc_src = 'source "{0}"\n'.format(bashrc_path.as_posix()) rcfile.write(base_rc_src) - export_path = 'export PATH="{0}:$PATH"\n'.format( - ":".join(python.parent.as_posix() for python in _iter_python(venv)) - ) + export_path = 'export PATH="{0}:$PATH"\n'.format(":".join( + self._format_path(python) + for python in _iter_python(venv) + )) rcfile.write(export_path) rcfile.flush() self.args.extend(["--rcfile", rcfile.name]) yield +class MsysBash(Bash): + def _format_path(self, python): + s = super(MsysBash, self)._format_path(python) + if not python.drive: + return s + # Convert "C:/something" to "/c/something". + return '/{drive}{path}'.format(drive=s[0].lower(), path=s[2:]) + + class CmderEmulatedShell(Shell): def fork(self, venv, cwd, args): if cwd: @@ -193,23 +206,37 @@ SHELL_LOOKUP = collections.defaultdict( lambda: collections.defaultdict(lambda: Shell), { - "bash": collections.defaultdict(lambda: Bash), - "cmd": collections.defaultdict(lambda: Shell, {"cmder": CmderCommandPrompt}), + "bash": collections.defaultdict( + lambda: Bash, {"msys": MsysBash}, + ), + "cmd": collections.defaultdict( + lambda: Shell, {"cmder": CmderCommandPrompt}, + ), "powershell": collections.defaultdict( - lambda: Shell, {"cmder": CmderPowershell} + lambda: Shell, {"cmder": CmderPowershell}, + ), + "pwsh": collections.defaultdict( + lambda: Shell, {"cmder": CmderPowershell}, ), - "pwsh": collections.defaultdict(lambda: Shell, {"cmder": CmderPowershell}), }, ) def _detect_emulator(): + keys = [] if os.environ.get("CMDER_ROOT"): - return "cmder" - return "" + keys.append("cmder") + if os.environ.get("MSYSTEM"): + keys.append("msys") + return ",".join(keys) def choose_shell(): emulator = PIPENV_EMULATOR.lower() or _detect_emulator() type_, command = detect_info() - return SHELL_LOOKUP[type_][emulator](command) + shell_types = SHELL_LOOKUP[type_] + for key in emulator.split(","): + key = key.strip().lower() + if key in shell_types: + return shell_types[key](command) + return shell_types[""](command)
{"golden_diff": "diff --git a/pipenv/shells.py b/pipenv/shells.py\n--- a/pipenv/shells.py\n+++ b/pipenv/shells.py\n@@ -134,6 +134,9 @@\n \n \n class Bash(Shell):\n+ def _format_path(self, python):\n+ return python.parent.as_posix()\n+\n # The usual PATH injection technique does not work with Bash.\n # https://github.com/berdario/pew/issues/58#issuecomment-102182346\n @contextlib.contextmanager\n@@ -146,15 +149,25 @@\n base_rc_src = 'source \"{0}\"\\n'.format(bashrc_path.as_posix())\n rcfile.write(base_rc_src)\n \n- export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\n- \":\".join(python.parent.as_posix() for python in _iter_python(venv))\n- )\n+ export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\":\".join(\n+ self._format_path(python)\n+ for python in _iter_python(venv)\n+ ))\n rcfile.write(export_path)\n rcfile.flush()\n self.args.extend([\"--rcfile\", rcfile.name])\n yield\n \n \n+class MsysBash(Bash):\n+ def _format_path(self, python):\n+ s = super(MsysBash, self)._format_path(python)\n+ if not python.drive:\n+ return s\n+ # Convert \"C:/something\" to \"/c/something\".\n+ return '/{drive}{path}'.format(drive=s[0].lower(), path=s[2:])\n+\n+\n class CmderEmulatedShell(Shell):\n def fork(self, venv, cwd, args):\n if cwd:\n@@ -193,23 +206,37 @@\n SHELL_LOOKUP = collections.defaultdict(\n lambda: collections.defaultdict(lambda: Shell),\n {\n- \"bash\": collections.defaultdict(lambda: Bash),\n- \"cmd\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderCommandPrompt}),\n+ \"bash\": collections.defaultdict(\n+ lambda: Bash, {\"msys\": MsysBash},\n+ ),\n+ \"cmd\": collections.defaultdict(\n+ lambda: Shell, {\"cmder\": CmderCommandPrompt},\n+ ),\n \"powershell\": collections.defaultdict(\n- lambda: Shell, {\"cmder\": CmderPowershell}\n+ lambda: Shell, {\"cmder\": CmderPowershell},\n+ ),\n+ \"pwsh\": collections.defaultdict(\n+ lambda: Shell, {\"cmder\": CmderPowershell},\n ),\n- \"pwsh\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderPowershell}),\n },\n )\n \n \n def _detect_emulator():\n+ keys = []\n if os.environ.get(\"CMDER_ROOT\"):\n- return \"cmder\"\n- return \"\"\n+ keys.append(\"cmder\")\n+ if os.environ.get(\"MSYSTEM\"):\n+ keys.append(\"msys\")\n+ return \",\".join(keys)\n \n \n def choose_shell():\n emulator = PIPENV_EMULATOR.lower() or _detect_emulator()\n type_, command = detect_info()\n- return SHELL_LOOKUP[type_][emulator](command)\n+ shell_types = SHELL_LOOKUP[type_]\n+ for key in emulator.split(\",\"):\n+ key = key.strip().lower()\n+ if key in shell_types:\n+ return shell_types[key](command)\n+ return shell_types[\"\"](command)\n", "issue": "pipenv shell does not work when running Git for Windows' bash on Windows\n##### Issue description\r\n\r\nRunning `pipenv shell` on windows when running bash in Git for Windows does not activate the virtual env successfully. It appears the actual activate script with the correct values is not being used.\r\n\r\n##### Expected result\r\n\r\nI would successfully be in the virtualenv\r\n\r\n##### Actual result\r\n\r\nThe `VIRTUAL_ENV` and `PATH` variables are in the `C:` notation instead of `/c/` notation which would have worked.\r\n\r\n```\r\ndeclare -x VIRTUAL_ENV=\"C:\\\\Users\\\\user\\\\.virtualenvs\\\\project-OJ2s3Ey8\"\r\ndeclare -x PATH=\"C:/Users/user/.virtualenvs/project-OJ2s3Ey8/Scripts:/c/Users/user/bin:/mingw64/bin:/usr/local/bin:/usr/bin:/usr/bin:/mingw64/bin:/usr/bin:/c/Users/andy/bin:/c/Python37/Scripts:/c/Python37\" etc...\r\n```\r\n\r\nSourcing the virtualenv activate variable does work, even though Git for Windows uses `msys` and not `cygwin`. Did not test cygwin\r\n\r\n##### Steps to replicate\r\n\r\n1. `pipenv install sphinx`\r\n1. `echo $$`\r\n1. `pipenv shell`\r\n1. `echo $$`\r\n1. `pip freeze`\r\n1. `echo $PATH`\r\n\r\nShows that the pip environment is the original python environment, not the virtualenv. The different pid verifies that a new shell is indeed being run, but the virtualenv is not activating\r\n\r\nAlso tried\r\n\r\n1. `PIPENV_EMULATOR=bash pipenv shell`\r\n\r\nIdentical results\r\n\r\n<details><summary>$ pipenv --support</summary>\r\n\r\nPipenv version: `'2018.7.1'`\r\n\r\nPipenv location: `'c:\\\\python37\\\\lib\\\\site-packages\\\\pipenv'`\r\n\r\nPython location: `'c:\\\\python37\\\\python.exe'`\r\n\r\nOther Python installations in `PATH`:\r\n\r\n - `3.7`: `C:\\Python37\\python.exe`\r\n\r\n - `3.7.0`: `C:\\Python37\\python.exe`\r\n - `3.7.0`: `C:\\Windows\\py.exe`\r\n\r\nPEP 508 Information:\r\n\r\n```\r\n{'implementation_name': 'cpython',\r\n 'implementation_version': '3.7.0',\r\n 'os_name': 'nt',\r\n 'platform_machine': 'AMD64',\r\n 'platform_python_implementation': 'CPython',\r\n 'platform_release': '10',\r\n 'platform_system': 'Windows',\r\n 'platform_version': '10.0.17134',\r\n 'python_full_version': '3.7.0',\r\n 'python_version': '3.7',\r\n 'sys_platform': 'win32'}\r\n```\n", "before_files": [{"content": "import collections\nimport contextlib\nimport os\nimport signal\nimport subprocess\nimport sys\n\nfrom ._compat import get_terminal_size, Path\nfrom .environments import PIPENV_SHELL_EXPLICIT, PIPENV_SHELL, PIPENV_EMULATOR\nfrom .utils import temp_environ\nfrom .vendor import shellingham\n\n\nShellDetectionFailure = shellingham.ShellDetectionFailure\n\n\ndef _build_info(value):\n return (os.path.splitext(os.path.basename(value))[0], value)\n\n\ndef detect_info():\n if PIPENV_SHELL_EXPLICIT:\n return _build_info(PIPENV_SHELL_EXPLICIT)\n try:\n return shellingham.detect_shell()\n except (shellingham.ShellDetectionFailure, TypeError):\n if PIPENV_SHELL:\n return _build_info(PIPENV_SHELL)\n raise ShellDetectionFailure\n\n\ndef _get_activate_script(venv):\n \"\"\"Returns the string to activate a virtualenv.\n\n This is POSIX-only at the moment since the compat (pexpect-based) shell\n does not work elsewhere anyway.\n \"\"\"\n # Suffix and source command for other shells.\n # Support for fish shell.\n if PIPENV_SHELL and \"fish\" in PIPENV_SHELL:\n suffix = \".fish\"\n command = \"source\"\n # Support for csh shell.\n elif PIPENV_SHELL and \"csh\" in PIPENV_SHELL:\n suffix = \".csh\"\n command = \"source\"\n else:\n suffix = \"\"\n command = \".\"\n # Escape any spaces located within the virtualenv path to allow\n # for proper activation.\n venv_location = str(venv).replace(\" \", r\"\\ \")\n # The leading space can make history cleaner in some shells.\n return \" {2} {0}/bin/activate{1}\".format(venv_location, suffix, command)\n\n\ndef _handover(cmd, args):\n args = [cmd] + args\n if os.name != \"nt\":\n os.execvp(cmd, args)\n else:\n sys.exit(subprocess.call(args, shell=True, universal_newlines=True))\n\n\nclass Shell(object):\n def __init__(self, cmd):\n self.cmd = cmd\n self.args = []\n\n def __repr__(self):\n return '{type}(cmd={cmd!r})'.format(\n type=type(self).__name__,\n cmd=self.cmd,\n )\n\n @contextlib.contextmanager\n def inject_path(self, venv):\n with temp_environ():\n os.environ[\"PATH\"] = \"{0}{1}{2}\".format(\n os.pathsep.join(str(p.parent) for p in _iter_python(venv)),\n os.pathsep,\n os.environ[\"PATH\"],\n )\n yield\n\n def fork(self, venv, cwd, args):\n # FIXME: This isn't necessarily the correct prompt. We should read the\n # actual prompt by peeking into the activation script.\n name = os.path.basename(venv)\n os.environ[\"VIRTUAL_ENV\"] = str(venv)\n if \"PROMPT\" in os.environ:\n os.environ[\"PROMPT\"] = \"({0}) {1}\".format(name, os.environ[\"PROMPT\"])\n if \"PS1\" in os.environ:\n os.environ[\"PS1\"] = \"({0}) {1}\".format(name, os.environ[\"PS1\"])\n with self.inject_path(venv):\n os.chdir(cwd)\n _handover(self.cmd, self.args + list(args))\n\n def fork_compat(self, venv, cwd, args):\n from .vendor import pexpect\n\n # Grab current terminal dimensions to replace the hardcoded default\n # dimensions of pexpect.\n dims = get_terminal_size()\n with temp_environ():\n c = pexpect.spawn(self.cmd, [\"-i\"], dimensions=(dims.lines, dims.columns))\n c.sendline(_get_activate_script(venv))\n if args:\n c.sendline(\" \".join(args))\n\n # Handler for terminal resizing events\n # Must be defined here to have the shell process in its context, since\n # we can't pass it as an argument\n def sigwinch_passthrough(sig, data):\n dims = get_terminal_size()\n c.setwinsize(dims.lines, dims.columns)\n\n signal.signal(signal.SIGWINCH, sigwinch_passthrough)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n sys.exit(c.exitstatus)\n\n\nPOSSIBLE_ENV_PYTHON = [Path(\"bin\", \"python\"), Path(\"Scripts\", \"python.exe\")]\n\n\ndef _iter_python(venv):\n for path in POSSIBLE_ENV_PYTHON:\n full_path = Path(venv, path)\n if full_path.is_file():\n yield full_path\n\n\nclass Bash(Shell):\n # The usual PATH injection technique does not work with Bash.\n # https://github.com/berdario/pew/issues/58#issuecomment-102182346\n @contextlib.contextmanager\n def inject_path(self, venv):\n from ._compat import NamedTemporaryFile\n\n bashrc_path = Path.home().joinpath(\".bashrc\")\n with NamedTemporaryFile(\"w+\") as rcfile:\n if bashrc_path.is_file():\n base_rc_src = 'source \"{0}\"\\n'.format(bashrc_path.as_posix())\n rcfile.write(base_rc_src)\n\n export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\n \":\".join(python.parent.as_posix() for python in _iter_python(venv))\n )\n rcfile.write(export_path)\n rcfile.flush()\n self.args.extend([\"--rcfile\", rcfile.name])\n yield\n\n\nclass CmderEmulatedShell(Shell):\n def fork(self, venv, cwd, args):\n if cwd:\n os.environ[\"CMDER_START\"] = cwd\n super(CmderEmulatedShell, self).fork(venv, cwd, args)\n\n\nclass CmderCommandPrompt(CmderEmulatedShell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\init.bat\")\n if os.path.exists(rc):\n self.args.extend([\"/k\", rc])\n super(CmderCommandPrompt, self).fork(venv, cwd, args)\n\n\nclass CmderPowershell(Shell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\profile.ps1\")\n if os.path.exists(rc):\n self.args.extend(\n [\n \"-ExecutionPolicy\",\n \"Bypass\",\n \"-NoLogo\",\n \"-NoProfile\",\n \"-NoExit\",\n \"-Command\",\n \"Invoke-Expression '. ''{0}'''\".format(rc),\n ]\n )\n super(CmderPowershell, self).fork(venv, cwd, args)\n\n\n# Two dimensional dict. First is the shell type, second is the emulator type.\n# Example: SHELL_LOOKUP['powershell']['cmder'] => CmderPowershell.\nSHELL_LOOKUP = collections.defaultdict(\n lambda: collections.defaultdict(lambda: Shell),\n {\n \"bash\": collections.defaultdict(lambda: Bash),\n \"cmd\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderCommandPrompt}),\n \"powershell\": collections.defaultdict(\n lambda: Shell, {\"cmder\": CmderPowershell}\n ),\n \"pwsh\": collections.defaultdict(lambda: Shell, {\"cmder\": CmderPowershell}),\n },\n)\n\n\ndef _detect_emulator():\n if os.environ.get(\"CMDER_ROOT\"):\n return \"cmder\"\n return \"\"\n\n\ndef choose_shell():\n emulator = PIPENV_EMULATOR.lower() or _detect_emulator()\n type_, command = detect_info()\n return SHELL_LOOKUP[type_][emulator](command)\n", "path": "pipenv/shells.py"}], "after_files": [{"content": "import collections\nimport contextlib\nimport os\nimport signal\nimport subprocess\nimport sys\n\nfrom ._compat import get_terminal_size, Path\nfrom .environments import PIPENV_SHELL_EXPLICIT, PIPENV_SHELL, PIPENV_EMULATOR\nfrom .utils import temp_environ\nfrom .vendor import shellingham\n\n\nShellDetectionFailure = shellingham.ShellDetectionFailure\n\n\ndef _build_info(value):\n return (os.path.splitext(os.path.basename(value))[0], value)\n\n\ndef detect_info():\n if PIPENV_SHELL_EXPLICIT:\n return _build_info(PIPENV_SHELL_EXPLICIT)\n try:\n return shellingham.detect_shell()\n except (shellingham.ShellDetectionFailure, TypeError):\n if PIPENV_SHELL:\n return _build_info(PIPENV_SHELL)\n raise ShellDetectionFailure\n\n\ndef _get_activate_script(venv):\n \"\"\"Returns the string to activate a virtualenv.\n\n This is POSIX-only at the moment since the compat (pexpect-based) shell\n does not work elsewhere anyway.\n \"\"\"\n # Suffix and source command for other shells.\n # Support for fish shell.\n if PIPENV_SHELL and \"fish\" in PIPENV_SHELL:\n suffix = \".fish\"\n command = \"source\"\n # Support for csh shell.\n elif PIPENV_SHELL and \"csh\" in PIPENV_SHELL:\n suffix = \".csh\"\n command = \"source\"\n else:\n suffix = \"\"\n command = \".\"\n # Escape any spaces located within the virtualenv path to allow\n # for proper activation.\n venv_location = str(venv).replace(\" \", r\"\\ \")\n # The leading space can make history cleaner in some shells.\n return \" {2} {0}/bin/activate{1}\".format(venv_location, suffix, command)\n\n\ndef _handover(cmd, args):\n args = [cmd] + args\n if os.name != \"nt\":\n os.execvp(cmd, args)\n else:\n sys.exit(subprocess.call(args, shell=True, universal_newlines=True))\n\n\nclass Shell(object):\n def __init__(self, cmd):\n self.cmd = cmd\n self.args = []\n\n def __repr__(self):\n return '{type}(cmd={cmd!r})'.format(\n type=type(self).__name__,\n cmd=self.cmd,\n )\n\n @contextlib.contextmanager\n def inject_path(self, venv):\n with temp_environ():\n os.environ[\"PATH\"] = \"{0}{1}{2}\".format(\n os.pathsep.join(str(p.parent) for p in _iter_python(venv)),\n os.pathsep,\n os.environ[\"PATH\"],\n )\n yield\n\n def fork(self, venv, cwd, args):\n # FIXME: This isn't necessarily the correct prompt. We should read the\n # actual prompt by peeking into the activation script.\n name = os.path.basename(venv)\n os.environ[\"VIRTUAL_ENV\"] = str(venv)\n if \"PROMPT\" in os.environ:\n os.environ[\"PROMPT\"] = \"({0}) {1}\".format(name, os.environ[\"PROMPT\"])\n if \"PS1\" in os.environ:\n os.environ[\"PS1\"] = \"({0}) {1}\".format(name, os.environ[\"PS1\"])\n with self.inject_path(venv):\n os.chdir(cwd)\n _handover(self.cmd, self.args + list(args))\n\n def fork_compat(self, venv, cwd, args):\n from .vendor import pexpect\n\n # Grab current terminal dimensions to replace the hardcoded default\n # dimensions of pexpect.\n dims = get_terminal_size()\n with temp_environ():\n c = pexpect.spawn(self.cmd, [\"-i\"], dimensions=(dims.lines, dims.columns))\n c.sendline(_get_activate_script(venv))\n if args:\n c.sendline(\" \".join(args))\n\n # Handler for terminal resizing events\n # Must be defined here to have the shell process in its context, since\n # we can't pass it as an argument\n def sigwinch_passthrough(sig, data):\n dims = get_terminal_size()\n c.setwinsize(dims.lines, dims.columns)\n\n signal.signal(signal.SIGWINCH, sigwinch_passthrough)\n\n # Interact with the new shell.\n c.interact(escape_character=None)\n c.close()\n sys.exit(c.exitstatus)\n\n\nPOSSIBLE_ENV_PYTHON = [Path(\"bin\", \"python\"), Path(\"Scripts\", \"python.exe\")]\n\n\ndef _iter_python(venv):\n for path in POSSIBLE_ENV_PYTHON:\n full_path = Path(venv, path)\n if full_path.is_file():\n yield full_path\n\n\nclass Bash(Shell):\n def _format_path(self, python):\n return python.parent.as_posix()\n\n # The usual PATH injection technique does not work with Bash.\n # https://github.com/berdario/pew/issues/58#issuecomment-102182346\n @contextlib.contextmanager\n def inject_path(self, venv):\n from ._compat import NamedTemporaryFile\n\n bashrc_path = Path.home().joinpath(\".bashrc\")\n with NamedTemporaryFile(\"w+\") as rcfile:\n if bashrc_path.is_file():\n base_rc_src = 'source \"{0}\"\\n'.format(bashrc_path.as_posix())\n rcfile.write(base_rc_src)\n\n export_path = 'export PATH=\"{0}:$PATH\"\\n'.format(\":\".join(\n self._format_path(python)\n for python in _iter_python(venv)\n ))\n rcfile.write(export_path)\n rcfile.flush()\n self.args.extend([\"--rcfile\", rcfile.name])\n yield\n\n\nclass MsysBash(Bash):\n def _format_path(self, python):\n s = super(MsysBash, self)._format_path(python)\n if not python.drive:\n return s\n # Convert \"C:/something\" to \"/c/something\".\n return '/{drive}{path}'.format(drive=s[0].lower(), path=s[2:])\n\n\nclass CmderEmulatedShell(Shell):\n def fork(self, venv, cwd, args):\n if cwd:\n os.environ[\"CMDER_START\"] = cwd\n super(CmderEmulatedShell, self).fork(venv, cwd, args)\n\n\nclass CmderCommandPrompt(CmderEmulatedShell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\init.bat\")\n if os.path.exists(rc):\n self.args.extend([\"/k\", rc])\n super(CmderCommandPrompt, self).fork(venv, cwd, args)\n\n\nclass CmderPowershell(Shell):\n def fork(self, venv, cwd, args):\n rc = os.path.expandvars(\"%CMDER_ROOT%\\\\vendor\\\\profile.ps1\")\n if os.path.exists(rc):\n self.args.extend(\n [\n \"-ExecutionPolicy\",\n \"Bypass\",\n \"-NoLogo\",\n \"-NoProfile\",\n \"-NoExit\",\n \"-Command\",\n \"Invoke-Expression '. ''{0}'''\".format(rc),\n ]\n )\n super(CmderPowershell, self).fork(venv, cwd, args)\n\n\n# Two dimensional dict. First is the shell type, second is the emulator type.\n# Example: SHELL_LOOKUP['powershell']['cmder'] => CmderPowershell.\nSHELL_LOOKUP = collections.defaultdict(\n lambda: collections.defaultdict(lambda: Shell),\n {\n \"bash\": collections.defaultdict(\n lambda: Bash, {\"msys\": MsysBash},\n ),\n \"cmd\": collections.defaultdict(\n lambda: Shell, {\"cmder\": CmderCommandPrompt},\n ),\n \"powershell\": collections.defaultdict(\n lambda: Shell, {\"cmder\": CmderPowershell},\n ),\n \"pwsh\": collections.defaultdict(\n lambda: Shell, {\"cmder\": CmderPowershell},\n ),\n },\n)\n\n\ndef _detect_emulator():\n keys = []\n if os.environ.get(\"CMDER_ROOT\"):\n keys.append(\"cmder\")\n if os.environ.get(\"MSYSTEM\"):\n keys.append(\"msys\")\n return \",\".join(keys)\n\n\ndef choose_shell():\n emulator = PIPENV_EMULATOR.lower() or _detect_emulator()\n type_, command = detect_info()\n shell_types = SHELL_LOOKUP[type_]\n for key in emulator.split(\",\"):\n key = key.strip().lower()\n if key in shell_types:\n return shell_types[key](command)\n return shell_types[\"\"](command)\n", "path": "pipenv/shells.py"}]}
3,130
797
gh_patches_debug_20195
rasdani/github-patches
git_diff
kivy__python-for-android-1723
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Psycopg2 error after the apk installation. ![erro psycopg](https://user-images.githubusercontent.com/39860703/52805033-d8002480-306c-11e9-931b-87bc567e70f0.PNG) I got this error while debugging the android apk. I associate this to Buildozer because I specified into the buildozer.spec requirements the psycopg2 library. It means that is not working. How do I fix it? I know that is a recipe for psycopg2 here: https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/psycopg2/__init__.py How can I add this recipe to my project, to buildozer compile it successfully ? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pythonforandroid/recipes/psycopg2/__init__.py` Content: ``` 1 from pythonforandroid.recipe import PythonRecipe 2 from pythonforandroid.toolchain import current_directory, shprint 3 import sh 4 5 6 class Psycopg2Recipe(PythonRecipe): 7 """ 8 Requires `libpq-dev` system dependency e.g. for `pg_config` binary. 9 """ 10 version = 'latest' 11 url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz' 12 depends = ['libpq'] 13 site_packages_name = 'psycopg2' 14 call_hostpython_via_targetpython = False 15 16 def prebuild_arch(self, arch): 17 libdir = self.ctx.get_libs_dir(arch.arch) 18 with current_directory(self.get_build_dir(arch.arch)): 19 # pg_config_helper will return the system installed libpq, but we 20 # need the one we just cross-compiled 21 shprint(sh.sed, '-i', 22 "s|pg_config_helper.query(.libdir.)|'{}'|".format(libdir), 23 'setup.py') 24 25 def get_recipe_env(self, arch): 26 env = super(Psycopg2Recipe, self).get_recipe_env(arch) 27 env['LDFLAGS'] = "{} -L{}".format(env['LDFLAGS'], self.ctx.get_libs_dir(arch.arch)) 28 env['EXTRA_CFLAGS'] = "--host linux-armv" 29 return env 30 31 def install_python_package(self, arch, name=None, env=None, is_dir=True): 32 '''Automate the installation of a Python package (or a cython 33 package where the cython components are pre-built).''' 34 if env is None: 35 env = self.get_recipe_env(arch) 36 37 with current_directory(self.get_build_dir(arch.arch)): 38 hostpython = sh.Command(self.ctx.hostpython) 39 40 shprint(hostpython, 'setup.py', 'build_ext', '--static-libpq', 41 _env=env) 42 shprint(hostpython, 'setup.py', 'install', '-O2', 43 '--root={}'.format(self.ctx.get_python_install_dir()), 44 '--install-lib=lib/python2.7/site-packages', _env=env) 45 46 47 recipe = Psycopg2Recipe() 48 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pythonforandroid/recipes/psycopg2/__init__.py b/pythonforandroid/recipes/psycopg2/__init__.py --- a/pythonforandroid/recipes/psycopg2/__init__.py +++ b/pythonforandroid/recipes/psycopg2/__init__.py @@ -6,6 +6,9 @@ class Psycopg2Recipe(PythonRecipe): """ Requires `libpq-dev` system dependency e.g. for `pg_config` binary. + If you get `nl_langinfo` symbol runtime error, make sure you're running on + `ANDROID_API` (`ndk-api`) >= 26, see: + https://github.com/kivy/python-for-android/issues/1711#issuecomment-465747557 """ version = 'latest' url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz' @@ -41,7 +44,7 @@ _env=env) shprint(hostpython, 'setup.py', 'install', '-O2', '--root={}'.format(self.ctx.get_python_install_dir()), - '--install-lib=lib/python2.7/site-packages', _env=env) + '--install-lib=.', _env=env) recipe = Psycopg2Recipe()
{"golden_diff": "diff --git a/pythonforandroid/recipes/psycopg2/__init__.py b/pythonforandroid/recipes/psycopg2/__init__.py\n--- a/pythonforandroid/recipes/psycopg2/__init__.py\n+++ b/pythonforandroid/recipes/psycopg2/__init__.py\n@@ -6,6 +6,9 @@\n class Psycopg2Recipe(PythonRecipe):\n \"\"\"\n Requires `libpq-dev` system dependency e.g. for `pg_config` binary.\n+ If you get `nl_langinfo` symbol runtime error, make sure you're running on\n+ `ANDROID_API` (`ndk-api`) >= 26, see:\n+ https://github.com/kivy/python-for-android/issues/1711#issuecomment-465747557\n \"\"\"\n version = 'latest'\n url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'\n@@ -41,7 +44,7 @@\n _env=env)\n shprint(hostpython, 'setup.py', 'install', '-O2',\n '--root={}'.format(self.ctx.get_python_install_dir()),\n- '--install-lib=lib/python2.7/site-packages', _env=env)\n+ '--install-lib=.', _env=env)\n \n \n recipe = Psycopg2Recipe()\n", "issue": "Psycopg2 error after the apk installation.\n![erro psycopg](https://user-images.githubusercontent.com/39860703/52805033-d8002480-306c-11e9-931b-87bc567e70f0.PNG)\r\n\r\nI got this error while debugging the android apk. I associate this to Buildozer because I specified into the buildozer.spec requirements the psycopg2 library. It means that is not working.\r\n\r\nHow do I fix it? I know that is a recipe for psycopg2 here: https://github.com/kivy/python-for-android/blob/master/pythonforandroid/recipes/psycopg2/__init__.py\r\n\r\nHow can I add this recipe to my project, to buildozer compile it successfully ?\n", "before_files": [{"content": "from pythonforandroid.recipe import PythonRecipe\nfrom pythonforandroid.toolchain import current_directory, shprint\nimport sh\n\n\nclass Psycopg2Recipe(PythonRecipe):\n \"\"\"\n Requires `libpq-dev` system dependency e.g. for `pg_config` binary.\n \"\"\"\n version = 'latest'\n url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'\n depends = ['libpq']\n site_packages_name = 'psycopg2'\n call_hostpython_via_targetpython = False\n\n def prebuild_arch(self, arch):\n libdir = self.ctx.get_libs_dir(arch.arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # pg_config_helper will return the system installed libpq, but we\n # need the one we just cross-compiled\n shprint(sh.sed, '-i',\n \"s|pg_config_helper.query(.libdir.)|'{}'|\".format(libdir),\n 'setup.py')\n\n def get_recipe_env(self, arch):\n env = super(Psycopg2Recipe, self).get_recipe_env(arch)\n env['LDFLAGS'] = \"{} -L{}\".format(env['LDFLAGS'], self.ctx.get_libs_dir(arch.arch))\n env['EXTRA_CFLAGS'] = \"--host linux-armv\"\n return env\n\n def install_python_package(self, arch, name=None, env=None, is_dir=True):\n '''Automate the installation of a Python package (or a cython\n package where the cython components are pre-built).'''\n if env is None:\n env = self.get_recipe_env(arch)\n\n with current_directory(self.get_build_dir(arch.arch)):\n hostpython = sh.Command(self.ctx.hostpython)\n\n shprint(hostpython, 'setup.py', 'build_ext', '--static-libpq',\n _env=env)\n shprint(hostpython, 'setup.py', 'install', '-O2',\n '--root={}'.format(self.ctx.get_python_install_dir()),\n '--install-lib=lib/python2.7/site-packages', _env=env)\n\n\nrecipe = Psycopg2Recipe()\n", "path": "pythonforandroid/recipes/psycopg2/__init__.py"}], "after_files": [{"content": "from pythonforandroid.recipe import PythonRecipe\nfrom pythonforandroid.toolchain import current_directory, shprint\nimport sh\n\n\nclass Psycopg2Recipe(PythonRecipe):\n \"\"\"\n Requires `libpq-dev` system dependency e.g. for `pg_config` binary.\n If you get `nl_langinfo` symbol runtime error, make sure you're running on\n `ANDROID_API` (`ndk-api`) >= 26, see:\n https://github.com/kivy/python-for-android/issues/1711#issuecomment-465747557\n \"\"\"\n version = 'latest'\n url = 'http://initd.org/psycopg/tarballs/psycopg2-{version}.tar.gz'\n depends = ['libpq']\n site_packages_name = 'psycopg2'\n call_hostpython_via_targetpython = False\n\n def prebuild_arch(self, arch):\n libdir = self.ctx.get_libs_dir(arch.arch)\n with current_directory(self.get_build_dir(arch.arch)):\n # pg_config_helper will return the system installed libpq, but we\n # need the one we just cross-compiled\n shprint(sh.sed, '-i',\n \"s|pg_config_helper.query(.libdir.)|'{}'|\".format(libdir),\n 'setup.py')\n\n def get_recipe_env(self, arch):\n env = super(Psycopg2Recipe, self).get_recipe_env(arch)\n env['LDFLAGS'] = \"{} -L{}\".format(env['LDFLAGS'], self.ctx.get_libs_dir(arch.arch))\n env['EXTRA_CFLAGS'] = \"--host linux-armv\"\n return env\n\n def install_python_package(self, arch, name=None, env=None, is_dir=True):\n '''Automate the installation of a Python package (or a cython\n package where the cython components are pre-built).'''\n if env is None:\n env = self.get_recipe_env(arch)\n\n with current_directory(self.get_build_dir(arch.arch)):\n hostpython = sh.Command(self.ctx.hostpython)\n\n shprint(hostpython, 'setup.py', 'build_ext', '--static-libpq',\n _env=env)\n shprint(hostpython, 'setup.py', 'install', '-O2',\n '--root={}'.format(self.ctx.get_python_install_dir()),\n '--install-lib=.', _env=env)\n\n\nrecipe = Psycopg2Recipe()\n", "path": "pythonforandroid/recipes/psycopg2/__init__.py"}]}
992
294
gh_patches_debug_7498
rasdani/github-patches
git_diff
beeware__toga-1751
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- iOS app not showing content with Rubicon ObjC 0.4.4 ### Describe the bug When running an iOS app with Toga 0.3.0dev39 and Rubicon 0.4.4, the app isn't successfully started, and the main app content is never displayed. When the app runs, you'll see the following in the log: ``` 2023-01-24 12:14:13.871494+0800 Hello World[94057:4239245] Running app module: helloworld 2023-01-24 12:14:14.399629+0800 Hello World[94057:4239245] /Users/rkm/Library/Developer/CoreSimulator/Devices/84FC86CA-1D89-46EF-9349-29DDCF840143/data/Containers/Bundle/Application/7038F3CE-2212-4C60-9067-1978A80DEC8D/Hello World.app/app_packages/toga_iOS/app.py:95: DeprecationWarning: There is no current event loop 2023-01-24 12:14:14.399738+0800 Hello World[94057:4239245] self.loop = asyncio.get_event_loop() ``` This is a warning, not an error; the app will continue to run. ### Steps to reproduce 1. Run `examples/tutorial0` on iOS 2. See error The app won't crash; but the app window will remain black. ### Expected behavior The app should run and window content should be displayed. ### Screenshots _No response_ ### Environment - Operating System: iOS - Python version: All - Software versions: - Briefcase: All - Toga: <=0.3.0.dev39 - Rubicon-objc 0.4.4 ### Logs N/A ### Additional context The error has been caused because Toga-iOS 0.3.0.dev39 [included a shim](https://github.com/beeware/toga/blob/v0.3.0.dev39/src/iOS/src/toga_iOS/app.py#L112) that reproduced the implementation of `run_forever_cooperatively()`. This was done when the iOS implementation was originally created, with the expectation that this shim would be replaced with the actual call once Rubicon 0.3 was released. This didn't happen, but the old shim continued to work as it matched the implementation in Rubicon. However, Rubicon 0.4.4 altered the implementation of `run_forever_cooperatively()`. As a result, the shim in Toga-iOS 0.3.0.dev39 no longer does everything it needs to in order to start the app. The issue has already been [fixed in the main branch](https://github.com/beeware/toga/blob/main/iOS/src/toga_iOS/app.py#L117) - the shim has been replaced with the actual call to `run_forever_cooperatively()`. Two workarounds exist: 1. Use the `main` branch of Toga in your app. 2. Block the use of rubicon-objc 0.4.4. If you add `rubicon-objc!=0.4.4` to the requires list in your iOS configuration, this will prevent toga-iOS from using the new version. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `iOS/setup.py` Content: ``` 1 #!/usr/bin/env python 2 import re 3 4 from setuptools import setup 5 6 # Version handline needs to be programatic because 7 # we can't import toga_iOS to compute the version; 8 # and to support versioned subpackage dependencies 9 with open("src/toga_iOS/__init__.py", encoding="utf8") as version_file: 10 version_match = re.search( 11 r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M 12 ) 13 if version_match: 14 version = version_match.group(1) 15 else: 16 raise RuntimeError("Unable to find version string.") 17 18 setup( 19 version=version, 20 install_requires=[ 21 "rubicon-objc>=0.4.4", 22 f"toga-core=={version}", 23 ], 24 ) 25 ``` Path: `cocoa/setup.py` Content: ``` 1 #!/usr/bin/env python 2 import re 3 4 from setuptools import setup 5 6 # Version handline needs to be programatic because 7 # we can't import toga_cocoa to compute the version; 8 # and to support versioned subpackage dependencies 9 with open("src/toga_cocoa/__init__.py", encoding="utf8") as version_file: 10 version_match = re.search( 11 r"^__version__ = ['\"]([^'\"]*)['\"]", version_file.read(), re.M 12 ) 13 if version_match: 14 version = version_match.group(1) 15 else: 16 raise RuntimeError("Unable to find version string.") 17 18 setup( 19 version=version, 20 install_requires=[ 21 "rubicon-objc>=0.4.4", 22 f"toga-core=={version}", 23 ], 24 ) 25 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/cocoa/setup.py b/cocoa/setup.py --- a/cocoa/setup.py +++ b/cocoa/setup.py @@ -18,7 +18,7 @@ setup( version=version, install_requires=[ - "rubicon-objc>=0.4.4", - f"toga-core=={version}", + "rubicon-objc >= 0.4.5rc1, < 0.5.0", + f"toga-core == {version}", ], ) diff --git a/iOS/setup.py b/iOS/setup.py --- a/iOS/setup.py +++ b/iOS/setup.py @@ -18,7 +18,7 @@ setup( version=version, install_requires=[ - "rubicon-objc>=0.4.4", - f"toga-core=={version}", + "rubicon-objc >= 0.4.5rc1, < 0.5.0", + f"toga-core == {version}", ], )
{"golden_diff": "diff --git a/cocoa/setup.py b/cocoa/setup.py\n--- a/cocoa/setup.py\n+++ b/cocoa/setup.py\n@@ -18,7 +18,7 @@\n setup(\n version=version,\n install_requires=[\n- \"rubicon-objc>=0.4.4\",\n- f\"toga-core=={version}\",\n+ \"rubicon-objc >= 0.4.5rc1, < 0.5.0\",\n+ f\"toga-core == {version}\",\n ],\n )\ndiff --git a/iOS/setup.py b/iOS/setup.py\n--- a/iOS/setup.py\n+++ b/iOS/setup.py\n@@ -18,7 +18,7 @@\n setup(\n version=version,\n install_requires=[\n- \"rubicon-objc>=0.4.4\",\n- f\"toga-core=={version}\",\n+ \"rubicon-objc >= 0.4.5rc1, < 0.5.0\",\n+ f\"toga-core == {version}\",\n ],\n )\n", "issue": "iOS app not showing content with Rubicon ObjC 0.4.4\n### Describe the bug\r\n\r\nWhen running an iOS app with Toga 0.3.0dev39 and Rubicon 0.4.4, the app isn't successfully started, and the main app content is never displayed. When the app runs, you'll see the following in the log:\r\n \r\n```\r\n2023-01-24 12:14:13.871494+0800 Hello World[94057:4239245] Running app module: helloworld\r\n2023-01-24 12:14:14.399629+0800 Hello World[94057:4239245] /Users/rkm/Library/Developer/CoreSimulator/Devices/84FC86CA-1D89-46EF-9349-29DDCF840143/data/Containers/Bundle/Application/7038F3CE-2212-4C60-9067-1978A80DEC8D/Hello World.app/app_packages/toga_iOS/app.py:95: DeprecationWarning: There is no current event loop\r\n2023-01-24 12:14:14.399738+0800 Hello World[94057:4239245] self.loop = asyncio.get_event_loop()\r\n```\r\n\r\nThis is a warning, not an error; the app will continue to run.\r\n\r\n### Steps to reproduce\r\n\r\n1. Run `examples/tutorial0` on iOS\r\n2. See error\r\n\r\nThe app won't crash; but the app window will remain black.\r\n\r\n### Expected behavior\r\n\r\nThe app should run and window content should be displayed.\r\n\r\n### Screenshots\r\n\r\n_No response_\r\n\r\n### Environment\r\n\r\n- Operating System: iOS\r\n- Python version: All\r\n- Software versions:\r\n - Briefcase: All\r\n - Toga: <=0.3.0.dev39\r\n - Rubicon-objc 0.4.4\r\n\r\n\r\n### Logs\r\n\r\nN/A\r\n\r\n### Additional context\r\n\r\nThe error has been caused because Toga-iOS 0.3.0.dev39 [included a shim](https://github.com/beeware/toga/blob/v0.3.0.dev39/src/iOS/src/toga_iOS/app.py#L112) that reproduced the implementation of `run_forever_cooperatively()`. This was done when the iOS implementation was originally created, with the expectation that this shim would be replaced with the actual call once Rubicon 0.3 was released. This didn't happen, but the old shim continued to work as it matched the implementation in Rubicon.\r\n\r\nHowever, Rubicon 0.4.4 altered the implementation of `run_forever_cooperatively()`. As a result, the shim in Toga-iOS 0.3.0.dev39 no longer does everything it needs to in order to start the app. \r\n\r\nThe issue has already been [fixed in the main branch](https://github.com/beeware/toga/blob/main/iOS/src/toga_iOS/app.py#L117) - the shim has been replaced with the actual call to `run_forever_cooperatively()`.\r\n\r\nTwo workarounds exist:\r\n1. Use the `main` branch of Toga in your app.\r\n2. Block the use of rubicon-objc 0.4.4. If you add `rubicon-objc!=0.4.4` to the requires list in your iOS configuration, this will prevent toga-iOS from using the new version.\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_iOS to compute the version;\n# and to support versioned subpackage dependencies\nwith open(\"src/toga_iOS/__init__.py\", encoding=\"utf8\") as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n \"rubicon-objc>=0.4.4\",\n f\"toga-core=={version}\",\n ],\n)\n", "path": "iOS/setup.py"}, {"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_cocoa to compute the version;\n# and to support versioned subpackage dependencies\nwith open(\"src/toga_cocoa/__init__.py\", encoding=\"utf8\") as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n \"rubicon-objc>=0.4.4\",\n f\"toga-core=={version}\",\n ],\n)\n", "path": "cocoa/setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_iOS to compute the version;\n# and to support versioned subpackage dependencies\nwith open(\"src/toga_iOS/__init__.py\", encoding=\"utf8\") as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n \"rubicon-objc >= 0.4.5rc1, < 0.5.0\",\n f\"toga-core == {version}\",\n ],\n)\n", "path": "iOS/setup.py"}, {"content": "#!/usr/bin/env python\nimport re\n\nfrom setuptools import setup\n\n# Version handline needs to be programatic because\n# we can't import toga_cocoa to compute the version;\n# and to support versioned subpackage dependencies\nwith open(\"src/toga_cocoa/__init__.py\", encoding=\"utf8\") as version_file:\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\", version_file.read(), re.M\n )\n if version_match:\n version = version_match.group(1)\n else:\n raise RuntimeError(\"Unable to find version string.\")\n\nsetup(\n version=version,\n install_requires=[\n \"rubicon-objc >= 0.4.5rc1, < 0.5.0\",\n f\"toga-core == {version}\",\n ],\n)\n", "path": "cocoa/setup.py"}]}
1,508
230
gh_patches_debug_10540
rasdani/github-patches
git_diff
scikit-hep__pyhf-1670
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Remove `pyhf.simplemodels.hepdata_like` from public API for v0.7.0 ### Summary [`pyhf.simplemodels.hepdata_like`](https://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.simplemodels.hepdata_like.html#pyhf.simplemodels.hepdata_like) has been scheduled for removal from the public API in release `v0.7.0`. As `v0.7.0` will be the next release (and hopefully soon) this should get removed now. ### Additional Information `pyhf.simplemodels.hepdata_like` has been deprecated since `v0.6.2`. ### Code of Conduct - [X] I agree to follow the Code of Conduct --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pyhf/simplemodels.py` Content: ``` 1 from warnings import warn 2 3 from pyhf import Model 4 5 __all__ = ["correlated_background", "uncorrelated_background"] 6 7 8 def __dir__(): 9 return __all__ 10 11 12 def correlated_background(signal, bkg, bkg_up, bkg_down, batch_size=None): 13 r""" 14 Construct a simple single channel :class:`~pyhf.pdf.Model` with a 15 :class:`~pyhf.modifiers.histosys` modifier representing a background 16 with a fully correlated bin-by-bin uncertainty. 17 18 Args: 19 signal (:obj:`list`): The data in the signal sample. 20 bkg (:obj:`list`): The data in the background sample. 21 bkg_up (:obj:`list`): The background sample under an upward variation 22 corresponding to :math:`\alpha=+1`. 23 bkg_down (:obj:`list`): The background sample under a downward variation 24 corresponding to :math:`\alpha=-1`. 25 batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute. 26 27 Returns: 28 ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema. 29 30 Example: 31 >>> import pyhf 32 >>> pyhf.set_backend("numpy") 33 >>> model = pyhf.simplemodels.correlated_background( 34 ... signal=[12.0, 11.0], 35 ... bkg=[50.0, 52.0], 36 ... bkg_up=[45.0, 57.0], 37 ... bkg_down=[55.0, 47.0], 38 ... ) 39 >>> model.schema 40 'model.json' 41 >>> model.config.channels 42 ['single_channel'] 43 >>> model.config.samples 44 ['background', 'signal'] 45 >>> model.config.parameters 46 ['correlated_bkg_uncertainty', 'mu'] 47 >>> model.expected_data(model.config.suggested_init()) 48 array([62., 63., 0.]) 49 50 """ 51 spec = { 52 "channels": [ 53 { 54 "name": "single_channel", 55 "samples": [ 56 { 57 "name": "signal", 58 "data": signal, 59 "modifiers": [ 60 {"name": "mu", "type": "normfactor", "data": None} 61 ], 62 }, 63 { 64 "name": "background", 65 "data": bkg, 66 "modifiers": [ 67 { 68 "name": "correlated_bkg_uncertainty", 69 "type": "histosys", 70 "data": {"hi_data": bkg_up, "lo_data": bkg_down}, 71 } 72 ], 73 }, 74 ], 75 } 76 ] 77 } 78 return Model(spec, batch_size=batch_size) 79 80 81 def uncorrelated_background(signal, bkg, bkg_uncertainty, batch_size=None): 82 """ 83 Construct a simple single channel :class:`~pyhf.pdf.Model` with a 84 :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated 85 background uncertainty. 86 87 Example: 88 >>> import pyhf 89 >>> pyhf.set_backend("numpy") 90 >>> model = pyhf.simplemodels.uncorrelated_background( 91 ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0] 92 ... ) 93 >>> model.schema 94 'model.json' 95 >>> model.config.channels 96 ['singlechannel'] 97 >>> model.config.samples 98 ['background', 'signal'] 99 >>> model.config.parameters 100 ['mu', 'uncorr_bkguncrt'] 101 >>> model.expected_data(model.config.suggested_init()) 102 array([ 62. , 63. , 277.77777778, 55.18367347]) 103 104 Args: 105 signal (:obj:`list`): The data in the signal sample 106 bkg (:obj:`list`): The data in the background sample 107 bkg_uncertainty (:obj:`list`): The statistical uncertainty on the background sample counts 108 batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute 109 110 Returns: 111 ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema 112 113 """ 114 spec = { 115 'channels': [ 116 { 117 'name': 'singlechannel', 118 'samples': [ 119 { 120 'name': 'signal', 121 'data': signal, 122 'modifiers': [ 123 {'name': 'mu', 'type': 'normfactor', 'data': None} 124 ], 125 }, 126 { 127 'name': 'background', 128 'data': bkg, 129 'modifiers': [ 130 { 131 'name': 'uncorr_bkguncrt', 132 'type': 'shapesys', 133 'data': bkg_uncertainty, 134 } 135 ], 136 }, 137 ], 138 } 139 ] 140 } 141 return Model(spec, batch_size=batch_size) 142 143 144 # Deprecated APIs 145 def _deprecated_api_warning( 146 deprecated_api, new_api, deprecated_release, remove_release 147 ): 148 warn( 149 f"{deprecated_api} is deprecated in favor of {new_api} as of pyhf v{deprecated_release} and will be removed in release {remove_release}." 150 + f" Please use {new_api}.", 151 DeprecationWarning, 152 stacklevel=3, # Raise to user level 153 ) 154 155 156 def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None): 157 """ 158 .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background` 159 instead. 160 161 .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in 162 ``pyhf`` ``v0.7.0``. 163 """ 164 _deprecated_api_warning( 165 "pyhf.simplemodels.hepdata_like", 166 "pyhf.simplemodels.uncorrelated_background", 167 "0.6.2", 168 "0.7.0", 169 ) 170 return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size) 171 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py --- a/src/pyhf/simplemodels.py +++ b/src/pyhf/simplemodels.py @@ -151,20 +151,3 @@ DeprecationWarning, stacklevel=3, # Raise to user level ) - - -def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None): - """ - .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background` - instead. - - .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in - ``pyhf`` ``v0.7.0``. - """ - _deprecated_api_warning( - "pyhf.simplemodels.hepdata_like", - "pyhf.simplemodels.uncorrelated_background", - "0.6.2", - "0.7.0", - ) - return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)
{"golden_diff": "diff --git a/src/pyhf/simplemodels.py b/src/pyhf/simplemodels.py\n--- a/src/pyhf/simplemodels.py\n+++ b/src/pyhf/simplemodels.py\n@@ -151,20 +151,3 @@\n DeprecationWarning,\n stacklevel=3, # Raise to user level\n )\n-\n-\n-def hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n- \"\"\"\n- .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background`\n- instead.\n-\n- .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in\n- ``pyhf`` ``v0.7.0``.\n- \"\"\"\n- _deprecated_api_warning(\n- \"pyhf.simplemodels.hepdata_like\",\n- \"pyhf.simplemodels.uncorrelated_background\",\n- \"0.6.2\",\n- \"0.7.0\",\n- )\n- return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)\n", "issue": "Remove `pyhf.simplemodels.hepdata_like` from public API for v0.7.0\n### Summary\n\n[`pyhf.simplemodels.hepdata_like`](https://pyhf.readthedocs.io/en/v0.6.3/_generated/pyhf.simplemodels.hepdata_like.html#pyhf.simplemodels.hepdata_like) has been scheduled for removal from the public API in release `v0.7.0`. As `v0.7.0` will be the next release (and hopefully soon) this should get removed now.\n\n### Additional Information\n\n`pyhf.simplemodels.hepdata_like` has been deprecated since `v0.6.2`.\n\n### Code of Conduct\n\n- [X] I agree to follow the Code of Conduct\n", "before_files": [{"content": "from warnings import warn\n\nfrom pyhf import Model\n\n__all__ = [\"correlated_background\", \"uncorrelated_background\"]\n\n\ndef __dir__():\n return __all__\n\n\ndef correlated_background(signal, bkg, bkg_up, bkg_down, batch_size=None):\n r\"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.histosys` modifier representing a background\n with a fully correlated bin-by-bin uncertainty.\n\n Args:\n signal (:obj:`list`): The data in the signal sample.\n bkg (:obj:`list`): The data in the background sample.\n bkg_up (:obj:`list`): The background sample under an upward variation\n corresponding to :math:`\\alpha=+1`.\n bkg_down (:obj:`list`): The background sample under a downward variation\n corresponding to :math:`\\alpha=-1`.\n batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute.\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.correlated_background(\n ... signal=[12.0, 11.0],\n ... bkg=[50.0, 52.0],\n ... bkg_up=[45.0, 57.0],\n ... bkg_down=[55.0, 47.0],\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['single_channel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['correlated_bkg_uncertainty', 'mu']\n >>> model.expected_data(model.config.suggested_init())\n array([62., 63., 0.])\n\n \"\"\"\n spec = {\n \"channels\": [\n {\n \"name\": \"single_channel\",\n \"samples\": [\n {\n \"name\": \"signal\",\n \"data\": signal,\n \"modifiers\": [\n {\"name\": \"mu\", \"type\": \"normfactor\", \"data\": None}\n ],\n },\n {\n \"name\": \"background\",\n \"data\": bkg,\n \"modifiers\": [\n {\n \"name\": \"correlated_bkg_uncertainty\",\n \"type\": \"histosys\",\n \"data\": {\"hi_data\": bkg_up, \"lo_data\": bkg_down},\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n\n\ndef uncorrelated_background(signal, bkg, bkg_uncertainty, batch_size=None):\n \"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated\n background uncertainty.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.uncorrelated_background(\n ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['singlechannel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['mu', 'uncorr_bkguncrt']\n >>> model.expected_data(model.config.suggested_init())\n array([ 62. , 63. , 277.77777778, 55.18367347])\n\n Args:\n signal (:obj:`list`): The data in the signal sample\n bkg (:obj:`list`): The data in the background sample\n bkg_uncertainty (:obj:`list`): The statistical uncertainty on the background sample counts\n batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema\n\n \"\"\"\n spec = {\n 'channels': [\n {\n 'name': 'singlechannel',\n 'samples': [\n {\n 'name': 'signal',\n 'data': signal,\n 'modifiers': [\n {'name': 'mu', 'type': 'normfactor', 'data': None}\n ],\n },\n {\n 'name': 'background',\n 'data': bkg,\n 'modifiers': [\n {\n 'name': 'uncorr_bkguncrt',\n 'type': 'shapesys',\n 'data': bkg_uncertainty,\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n\n\n# Deprecated APIs\ndef _deprecated_api_warning(\n deprecated_api, new_api, deprecated_release, remove_release\n):\n warn(\n f\"{deprecated_api} is deprecated in favor of {new_api} as of pyhf v{deprecated_release} and will be removed in release {remove_release}.\"\n + f\" Please use {new_api}.\",\n DeprecationWarning,\n stacklevel=3, # Raise to user level\n )\n\n\ndef hepdata_like(signal_data, bkg_data, bkg_uncerts, batch_size=None):\n \"\"\"\n .. note:: Deprecated API: Use :func:`~pyhf.simplemodels.uncorrelated_background`\n instead.\n\n .. warning:: :func:`~pyhf.simplemodels.hepdata_like` will be removed in\n ``pyhf`` ``v0.7.0``.\n \"\"\"\n _deprecated_api_warning(\n \"pyhf.simplemodels.hepdata_like\",\n \"pyhf.simplemodels.uncorrelated_background\",\n \"0.6.2\",\n \"0.7.0\",\n )\n return uncorrelated_background(signal_data, bkg_data, bkg_uncerts, batch_size)\n", "path": "src/pyhf/simplemodels.py"}], "after_files": [{"content": "from warnings import warn\n\nfrom pyhf import Model\n\n__all__ = [\"correlated_background\", \"uncorrelated_background\"]\n\n\ndef __dir__():\n return __all__\n\n\ndef correlated_background(signal, bkg, bkg_up, bkg_down, batch_size=None):\n r\"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.histosys` modifier representing a background\n with a fully correlated bin-by-bin uncertainty.\n\n Args:\n signal (:obj:`list`): The data in the signal sample.\n bkg (:obj:`list`): The data in the background sample.\n bkg_up (:obj:`list`): The background sample under an upward variation\n corresponding to :math:`\\alpha=+1`.\n bkg_down (:obj:`list`): The background sample under a downward variation\n corresponding to :math:`\\alpha=-1`.\n batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute.\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.correlated_background(\n ... signal=[12.0, 11.0],\n ... bkg=[50.0, 52.0],\n ... bkg_up=[45.0, 57.0],\n ... bkg_down=[55.0, 47.0],\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['single_channel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['correlated_bkg_uncertainty', 'mu']\n >>> model.expected_data(model.config.suggested_init())\n array([62., 63., 0.])\n\n \"\"\"\n spec = {\n \"channels\": [\n {\n \"name\": \"single_channel\",\n \"samples\": [\n {\n \"name\": \"signal\",\n \"data\": signal,\n \"modifiers\": [\n {\"name\": \"mu\", \"type\": \"normfactor\", \"data\": None}\n ],\n },\n {\n \"name\": \"background\",\n \"data\": bkg,\n \"modifiers\": [\n {\n \"name\": \"correlated_bkg_uncertainty\",\n \"type\": \"histosys\",\n \"data\": {\"hi_data\": bkg_up, \"lo_data\": bkg_down},\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n\n\ndef uncorrelated_background(signal, bkg, bkg_uncertainty, batch_size=None):\n \"\"\"\n Construct a simple single channel :class:`~pyhf.pdf.Model` with a\n :class:`~pyhf.modifiers.shapesys` modifier representing an uncorrelated\n background uncertainty.\n\n Example:\n >>> import pyhf\n >>> pyhf.set_backend(\"numpy\")\n >>> model = pyhf.simplemodels.uncorrelated_background(\n ... signal=[12.0, 11.0], bkg=[50.0, 52.0], bkg_uncertainty=[3.0, 7.0]\n ... )\n >>> model.schema\n 'model.json'\n >>> model.config.channels\n ['singlechannel']\n >>> model.config.samples\n ['background', 'signal']\n >>> model.config.parameters\n ['mu', 'uncorr_bkguncrt']\n >>> model.expected_data(model.config.suggested_init())\n array([ 62. , 63. , 277.77777778, 55.18367347])\n\n Args:\n signal (:obj:`list`): The data in the signal sample\n bkg (:obj:`list`): The data in the background sample\n bkg_uncertainty (:obj:`list`): The statistical uncertainty on the background sample counts\n batch_size (:obj:`None` or :obj:`int`): Number of simultaneous (batched) Models to compute\n\n Returns:\n ~pyhf.pdf.Model: The statistical model adhering to the :obj:`model.json` schema\n\n \"\"\"\n spec = {\n 'channels': [\n {\n 'name': 'singlechannel',\n 'samples': [\n {\n 'name': 'signal',\n 'data': signal,\n 'modifiers': [\n {'name': 'mu', 'type': 'normfactor', 'data': None}\n ],\n },\n {\n 'name': 'background',\n 'data': bkg,\n 'modifiers': [\n {\n 'name': 'uncorr_bkguncrt',\n 'type': 'shapesys',\n 'data': bkg_uncertainty,\n }\n ],\n },\n ],\n }\n ]\n }\n return Model(spec, batch_size=batch_size)\n\n\n# Deprecated APIs\ndef _deprecated_api_warning(\n deprecated_api, new_api, deprecated_release, remove_release\n):\n warn(\n f\"{deprecated_api} is deprecated in favor of {new_api} as of pyhf v{deprecated_release} and will be removed in release {remove_release}.\"\n + f\" Please use {new_api}.\",\n DeprecationWarning,\n stacklevel=3, # Raise to user level\n )\n", "path": "src/pyhf/simplemodels.py"}]}
2,163
237
gh_patches_debug_5408
rasdani/github-patches
git_diff
Mailu__Mailu-2982
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Use official clamav docker image for Mailu clamav image With https://github.com/Cisco-Talos/clamav having official docker support https://hub.docker.com/r/clamav/clamav it might be worth considering referring or preferring that container in the future? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `optional/clamav/start.py` Content: ``` 1 #!/usr/bin/env python3 2 3 import os 4 import logging as logger 5 import sys 6 from socrate import system 7 8 system.set_env(log_filters=r'SelfCheck: Database status OK\.$') 9 10 # Bootstrap the database if clamav is running for the first time 11 if not os.path.isfile("/data/main.cvd"): 12 logger.info("Starting primary virus DB download") 13 os.system("freshclam") 14 15 # Run the update daemon 16 logger.info("Starting the update daemon") 17 os.system("freshclam -d -c 6") 18 19 # Run clamav 20 logger.info("Starting clamav") 21 os.system("clamd") 22 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/optional/clamav/start.py b/optional/clamav/start.py deleted file mode 100755 --- a/optional/clamav/start.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python3 - -import os -import logging as logger -import sys -from socrate import system - -system.set_env(log_filters=r'SelfCheck: Database status OK\.$') - -# Bootstrap the database if clamav is running for the first time -if not os.path.isfile("/data/main.cvd"): - logger.info("Starting primary virus DB download") - os.system("freshclam") - -# Run the update daemon -logger.info("Starting the update daemon") -os.system("freshclam -d -c 6") - -# Run clamav -logger.info("Starting clamav") -os.system("clamd")
{"golden_diff": "diff --git a/optional/clamav/start.py b/optional/clamav/start.py\ndeleted file mode 100755\n--- a/optional/clamav/start.py\n+++ /dev/null\n@@ -1,21 +0,0 @@\n-#!/usr/bin/env python3\n-\n-import os\n-import logging as logger\n-import sys\n-from socrate import system\n-\n-system.set_env(log_filters=r'SelfCheck: Database status OK\\.$')\n-\n-# Bootstrap the database if clamav is running for the first time\n-if not os.path.isfile(\"/data/main.cvd\"):\n- logger.info(\"Starting primary virus DB download\")\n- os.system(\"freshclam\")\n-\n-# Run the update daemon\n-logger.info(\"Starting the update daemon\")\n-os.system(\"freshclam -d -c 6\")\n-\n-# Run clamav\n-logger.info(\"Starting clamav\")\n-os.system(\"clamd\")\n", "issue": "Use official clamav docker image for Mailu clamav image\nWith https://github.com/Cisco-Talos/clamav having official docker support https://hub.docker.com/r/clamav/clamav it might be worth considering referring or preferring that container in the future?\n", "before_files": [{"content": "#!/usr/bin/env python3\n\nimport os\nimport logging as logger\nimport sys\nfrom socrate import system\n\nsystem.set_env(log_filters=r'SelfCheck: Database status OK\\.$')\n\n# Bootstrap the database if clamav is running for the first time\nif not os.path.isfile(\"/data/main.cvd\"):\n logger.info(\"Starting primary virus DB download\")\n os.system(\"freshclam\")\n\n# Run the update daemon\nlogger.info(\"Starting the update daemon\")\nos.system(\"freshclam -d -c 6\")\n\n# Run clamav\nlogger.info(\"Starting clamav\")\nos.system(\"clamd\")\n", "path": "optional/clamav/start.py"}], "after_files": [{"content": null, "path": "optional/clamav/start.py"}]}
486
199
gh_patches_debug_7255
rasdani/github-patches
git_diff
kubeflow__pipelines-6703
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Upgrade cloudpickle to > 2.0.0 Hello, One of the features of pipelines is Step Caching (https://www.kubeflow.org/docs/components/pipelines/caching/) to avoid running the costly computations again and again. The key for caching is: ``` message CacheKey { map<string, ArtifactNameList> inputArtifactNames = 1; map<string, Value> inputParameters = 2; map<string, RuntimeArtifact> outputArtifactsSpec = 3; map<string, string> outputParametersSpec=4; ContainerSpec containerSpec=5; } ``` When using the option `use_code_pickling` from https://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/sdk/python/kfp/components/_python_op.py#L516 the pickle of the function gets embedded in the `ContainerSpec` (and hence becomes part of the key). So far, all good. However, the pickle is generated with `cloudpickle` which leads to non deterministic pickles every time you run the pipeline. As you can imagine, this makes caching feature useless because it will invalidate the cache every time it is run. This non determinism was removed from `cloudpickle` with the following commit: https://github.com/cloudpipe/cloudpickle/pull/428 and released as part of `2.0.0` release: https://github.com/cloudpipe/cloudpickle/releases/tag/v2.0.0 Currently, `kfp` has bounded cloudpickle to less than v2.0.0 here: https://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/sdk/python/setup.py#L37 Would it be possible to make a new `kfp` release with upgraded cloudpickle? Without this cloudpickle version, step caching is currently impossible to use (or at the mercy of dictionary insertion order of cloudpickle). Thanks! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `sdk/python/setup.py` Content: ``` 1 # Copyright 2018 The Kubeflow Authors 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import os 16 import re 17 18 from setuptools import setup 19 20 NAME = 'kfp' 21 #VERSION = .... Change the version in kfp/__init__.py 22 23 # NOTICE, after any updates to the following, ./requirements.in should be updated 24 # accordingly. 25 REQUIRES = [ 26 'absl-py>=0.9,<=0.11', 27 'PyYAML>=5.3,<6', 28 # `Blob.from_string` was introduced in google-cloud-storage 1.20.0 29 # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200 30 'google-cloud-storage>=1.20.0,<2', 31 'kubernetes>=8.0.0,<19', 32 # google-api-python-client v2 doesn't work for private dicovery by default: 33 # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235 34 'google-api-python-client>=1.7.8,<2', 35 'google-auth>=1.6.1,<2', 36 'requests-toolbelt>=0.8.0,<1', 37 'cloudpickle>=1.3.0,<2', 38 # Update the upper version whenever a new major version of the 39 # kfp-server-api package is released. 40 # Update the lower version when kfp sdk depends on new apis/fields in 41 # kfp-server-api. 42 # Note, please also update ./requirements.in 43 'kfp-server-api>=1.1.2,<2.0.0', 44 'jsonschema>=3.0.1,<4', 45 'tabulate>=0.8.6,<1', 46 'click>=7.1.2,<9', 47 'Deprecated>=1.2.7,<2', 48 'strip-hints>=0.1.8,<1', 49 'docstring-parser>=0.7.3,<1', 50 'kfp-pipeline-spec>=0.1.10,<0.2.0', 51 'fire>=0.3.1,<1', 52 'protobuf>=3.13.0,<4', 53 'uritemplate>=3.0.1,<4', 54 'pydantic>=1.8.2,<2', 55 # Standard library backports 56 'dataclasses;python_version<"3.7"', 57 'typing-extensions>=3.7.4,<4;python_version<"3.9"', 58 ] 59 60 TESTS_REQUIRE = [ 61 'frozendict', 62 ] 63 64 65 def find_version(*file_path_parts): 66 here = os.path.abspath(os.path.dirname(__file__)) 67 with open(os.path.join(here, *file_path_parts), 'r') as fp: 68 version_file_text = fp.read() 69 70 version_match = re.search( 71 r"^__version__ = ['\"]([^'\"]*)['\"]", 72 version_file_text, 73 re.M, 74 ) 75 if version_match: 76 return version_match.group(1) 77 78 raise RuntimeError('Unable to find version string.') 79 80 81 setup( 82 name=NAME, 83 version=find_version('kfp', '__init__.py'), 84 description='KubeFlow Pipelines SDK', 85 author='The Kubeflow Authors', 86 url="https://github.com/kubeflow/pipelines", 87 project_urls={ 88 "Documentation": "https://kubeflow-pipelines.readthedocs.io/en/stable/", 89 "Bug Tracker": "https://github.com/kubeflow/pipelines/issues", 90 "Source": "https://github.com/kubeflow/pipelines/tree/master/sdk", 91 "Changelog": "https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md", 92 }, 93 install_requires=REQUIRES, 94 tests_require=TESTS_REQUIRE, 95 packages=[ 96 'kfp', 97 'kfp.auth', 98 'kfp.cli', 99 'kfp.cli.diagnose_me', 100 'kfp.compiler', 101 'kfp.components', 102 'kfp.components.structures', 103 'kfp.containers', 104 'kfp.dsl', 105 'kfp.dsl.extensions', 106 'kfp.notebook', 107 'kfp.v2', 108 'kfp.v2.compiler', 109 'kfp.v2.components', 110 'kfp.v2.components.types', 111 'kfp.v2.components.experimental', 112 'kfp.v2.dsl', 113 'kfp.v2.google.client', 114 'kfp.v2.google.experimental', 115 ], 116 classifiers=[ 117 'Intended Audience :: Developers', 118 'Intended Audience :: Education', 119 'Intended Audience :: Science/Research', 120 'License :: OSI Approved :: Apache Software License', 121 'Programming Language :: Python :: 3', 122 'Programming Language :: Python :: 3.6', 123 'Programming Language :: Python :: 3.7', 124 'Programming Language :: Python :: 3.8', 125 'Programming Language :: Python :: 3.9', 126 'Topic :: Scientific/Engineering', 127 'Topic :: Scientific/Engineering :: Artificial Intelligence', 128 'Topic :: Software Development', 129 'Topic :: Software Development :: Libraries', 130 'Topic :: Software Development :: Libraries :: Python Modules', 131 ], 132 python_requires='>=3.6.1', 133 include_package_data=True, 134 entry_points={ 135 'console_scripts': [ 136 'dsl-compile = kfp.compiler.main:main', 137 'dsl-compile-v2 = kfp.v2.compiler.main:main', 138 'kfp=kfp.__main__:main' 139 ] 140 }) 141 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/sdk/python/setup.py b/sdk/python/setup.py --- a/sdk/python/setup.py +++ b/sdk/python/setup.py @@ -34,7 +34,7 @@ 'google-api-python-client>=1.7.8,<2', 'google-auth>=1.6.1,<2', 'requests-toolbelt>=0.8.0,<1', - 'cloudpickle>=1.3.0,<2', + 'cloudpickle>=2.0.0,<3', # Update the upper version whenever a new major version of the # kfp-server-api package is released. # Update the lower version when kfp sdk depends on new apis/fields in
{"golden_diff": "diff --git a/sdk/python/setup.py b/sdk/python/setup.py\n--- a/sdk/python/setup.py\n+++ b/sdk/python/setup.py\n@@ -34,7 +34,7 @@\n 'google-api-python-client>=1.7.8,<2',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n- 'cloudpickle>=1.3.0,<2',\n+ 'cloudpickle>=2.0.0,<3',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n", "issue": "Upgrade cloudpickle to > 2.0.0\nHello,\r\n\r\nOne of the features of pipelines is Step Caching (https://www.kubeflow.org/docs/components/pipelines/caching/) to avoid running the costly computations again and again. \r\n\r\nThe key for caching is:\r\n\r\n```\r\nmessage CacheKey {\r\n map<string, ArtifactNameList> inputArtifactNames = 1;\r\n map<string, Value> inputParameters = 2;\r\n map<string, RuntimeArtifact> outputArtifactsSpec = 3;\r\n map<string, string> outputParametersSpec=4;\r\n ContainerSpec containerSpec=5;\r\n}\r\n```\r\n\r\nWhen using the option `use_code_pickling` from https://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/sdk/python/kfp/components/_python_op.py#L516\r\n\r\nthe pickle of the function gets embedded in the `ContainerSpec` (and hence becomes part of the key). \r\n\r\nSo far, all good. \r\n\r\nHowever, the pickle is generated with `cloudpickle` which leads to non deterministic pickles every time you run the pipeline. As you can imagine, this makes caching feature useless because it will invalidate the cache every time it is run.\r\n\r\nThis non determinism was removed from `cloudpickle` with the following commit:\r\nhttps://github.com/cloudpipe/cloudpickle/pull/428 and released as part of `2.0.0` release:\r\nhttps://github.com/cloudpipe/cloudpickle/releases/tag/v2.0.0\r\n\r\nCurrently, `kfp` has bounded cloudpickle to less than v2.0.0 here:\r\nhttps://github.com/kubeflow/pipelines/blob/74c7773ca40decfd0d4ed40dc93a6af591bbc190/sdk/python/setup.py#L37\r\n\r\n\r\nWould it be possible to make a new `kfp` release with upgraded cloudpickle? Without this cloudpickle version, step caching is currently impossible to use (or at the mercy of dictionary insertion order of cloudpickle). \r\n\r\nThanks!\n", "before_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<=0.11',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n 'google-cloud-storage>=1.20.0,<2',\n 'kubernetes>=8.0.0,<19',\n # google-api-python-client v2 doesn't work for private dicovery by default:\n # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235\n 'google-api-python-client>=1.7.8,<2',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=1.3.0,<2',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.2,<9',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.10,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n 'pydantic>=1.8.2,<2',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n 'typing-extensions>=3.7.4,<4;python_version<\"3.9\"',\n]\n\nTESTS_REQUIRE = [\n 'frozendict',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='The Kubeflow Authors',\n url=\"https://github.com/kubeflow/pipelines\",\n project_urls={\n \"Documentation\": \"https://kubeflow-pipelines.readthedocs.io/en/stable/\",\n \"Bug Tracker\": \"https://github.com/kubeflow/pipelines/issues\",\n \"Source\": \"https://github.com/kubeflow/pipelines/tree/master/sdk\",\n \"Changelog\": \"https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md\",\n },\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.auth',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.components.types',\n 'kfp.v2.components.experimental',\n 'kfp.v2.dsl',\n 'kfp.v2.google.client',\n 'kfp.v2.google.experimental',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}], "after_files": [{"content": "# Copyright 2018 The Kubeflow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport re\n\nfrom setuptools import setup\n\nNAME = 'kfp'\n#VERSION = .... Change the version in kfp/__init__.py\n\n# NOTICE, after any updates to the following, ./requirements.in should be updated\n# accordingly.\nREQUIRES = [\n 'absl-py>=0.9,<=0.11',\n 'PyYAML>=5.3,<6',\n # `Blob.from_string` was introduced in google-cloud-storage 1.20.0\n # https://github.com/googleapis/python-storage/blob/master/CHANGELOG.md#1200\n 'google-cloud-storage>=1.20.0,<2',\n 'kubernetes>=8.0.0,<19',\n # google-api-python-client v2 doesn't work for private dicovery by default:\n # https://github.com/googleapis/google-api-python-client/issues/1225#issuecomment-791058235\n 'google-api-python-client>=1.7.8,<2',\n 'google-auth>=1.6.1,<2',\n 'requests-toolbelt>=0.8.0,<1',\n 'cloudpickle>=2.0.0,<3',\n # Update the upper version whenever a new major version of the\n # kfp-server-api package is released.\n # Update the lower version when kfp sdk depends on new apis/fields in\n # kfp-server-api.\n # Note, please also update ./requirements.in\n 'kfp-server-api>=1.1.2,<2.0.0',\n 'jsonschema>=3.0.1,<4',\n 'tabulate>=0.8.6,<1',\n 'click>=7.1.2,<9',\n 'Deprecated>=1.2.7,<2',\n 'strip-hints>=0.1.8,<1',\n 'docstring-parser>=0.7.3,<1',\n 'kfp-pipeline-spec>=0.1.10,<0.2.0',\n 'fire>=0.3.1,<1',\n 'protobuf>=3.13.0,<4',\n 'uritemplate>=3.0.1,<4',\n 'pydantic>=1.8.2,<2',\n # Standard library backports\n 'dataclasses;python_version<\"3.7\"',\n 'typing-extensions>=3.7.4,<4;python_version<\"3.9\"',\n]\n\nTESTS_REQUIRE = [\n 'frozendict',\n]\n\n\ndef find_version(*file_path_parts):\n here = os.path.abspath(os.path.dirname(__file__))\n with open(os.path.join(here, *file_path_parts), 'r') as fp:\n version_file_text = fp.read()\n\n version_match = re.search(\n r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\",\n version_file_text,\n re.M,\n )\n if version_match:\n return version_match.group(1)\n\n raise RuntimeError('Unable to find version string.')\n\n\nsetup(\n name=NAME,\n version=find_version('kfp', '__init__.py'),\n description='KubeFlow Pipelines SDK',\n author='The Kubeflow Authors',\n url=\"https://github.com/kubeflow/pipelines\",\n project_urls={\n \"Documentation\": \"https://kubeflow-pipelines.readthedocs.io/en/stable/\",\n \"Bug Tracker\": \"https://github.com/kubeflow/pipelines/issues\",\n \"Source\": \"https://github.com/kubeflow/pipelines/tree/master/sdk\",\n \"Changelog\": \"https://github.com/kubeflow/pipelines/blob/master/sdk/RELEASE.md\",\n },\n install_requires=REQUIRES,\n tests_require=TESTS_REQUIRE,\n packages=[\n 'kfp',\n 'kfp.auth',\n 'kfp.cli',\n 'kfp.cli.diagnose_me',\n 'kfp.compiler',\n 'kfp.components',\n 'kfp.components.structures',\n 'kfp.containers',\n 'kfp.dsl',\n 'kfp.dsl.extensions',\n 'kfp.notebook',\n 'kfp.v2',\n 'kfp.v2.compiler',\n 'kfp.v2.components',\n 'kfp.v2.components.types',\n 'kfp.v2.components.experimental',\n 'kfp.v2.dsl',\n 'kfp.v2.google.client',\n 'kfp.v2.google.experimental',\n ],\n classifiers=[\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n python_requires='>=3.6.1',\n include_package_data=True,\n entry_points={\n 'console_scripts': [\n 'dsl-compile = kfp.compiler.main:main',\n 'dsl-compile-v2 = kfp.v2.compiler.main:main',\n 'kfp=kfp.__main__:main'\n ]\n })\n", "path": "sdk/python/setup.py"}]}
2,345
152
gh_patches_debug_32883
rasdani/github-patches
git_diff
marshmallow-code__webargs-555
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- FalconParser should ideally support falcon's native media decoding Falcon has a native media handling mechanism which can decode an incoming request body based on the `Content-Type` header and adding the dictionary of resulting key-value pairs as a cached property `req.media`. I've written my own FalconParser subclass that (very naively) uses this, but it seems like something that might be worth supporting out of the box. ```python def parse_json(self, req, name, field): """ Pull a JSON body value from the request. uses falcon's native req.media """ json_data = self._cache.get("json_data") if json_data is None: self._cache["json_data"] = json_data = req.media return core.get_value(json_data, name, field, allow_many_nested=True) ``` This could probably be improved upon; since the `media` property is already cached on the request object, we could just access `req.media` directly without caching on the parser. (Not sure if this impacts other things that might use that cache, though; I haven't dug deep enough to fully understand that implication.) Also, since `media` was added in 1.3, if webargs still wanted to support older versions of falcon we could add a check for it and fall back to the existing behavior. Maybe something like: ```python def parse_json(self, req, name, field): """Pull a JSON body value from the request. .. note:: The request stream will be read and left at EOF. """ json_data = req.media if hasattr(req, 'media') else self._cache.get("json_data") if json_data is None: self._cache["json_data"] = json_data = parse_json_body(req) return core.get_value(json_data, name, field, allow_many_nested=True) ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/webargs/falconparser.py` Content: ``` 1 """Falcon request argument parsing module. 2 """ 3 import falcon 4 from falcon.util.uri import parse_query_string 5 6 from webargs import core 7 from webargs.multidictproxy import MultiDictProxy 8 9 HTTP_422 = "422 Unprocessable Entity" 10 11 # Mapping of int status codes to string status 12 status_map = {422: HTTP_422} 13 14 15 # Collect all exceptions from falcon.status_codes 16 def _find_exceptions(): 17 for name in filter(lambda n: n.startswith("HTTP"), dir(falcon.status_codes)): 18 status = getattr(falcon.status_codes, name) 19 status_code = int(status.split(" ")[0]) 20 status_map[status_code] = status 21 22 23 _find_exceptions() 24 del _find_exceptions 25 26 27 def is_json_request(req): 28 content_type = req.get_header("Content-Type") 29 return content_type and core.is_json(content_type) 30 31 32 # NOTE: Adapted from falcon.request.Request._parse_form_urlencoded 33 def parse_form_body(req): 34 if ( 35 req.content_type is not None 36 and "application/x-www-form-urlencoded" in req.content_type 37 ): 38 body = req.stream.read(req.content_length or 0) 39 try: 40 body = body.decode("ascii") 41 except UnicodeDecodeError: 42 body = None 43 req.log_error( 44 "Non-ASCII characters found in form body " 45 "with Content-Type of " 46 "application/x-www-form-urlencoded. Body " 47 "will be ignored." 48 ) 49 50 if body: 51 return parse_query_string(body, keep_blank=req.options.keep_blank_qs_values) 52 53 return core.missing 54 55 56 class HTTPError(falcon.HTTPError): 57 """HTTPError that stores a dictionary of validation error messages.""" 58 59 def __init__(self, status, errors, *args, **kwargs): 60 self.errors = errors 61 super().__init__(status, *args, **kwargs) 62 63 def to_dict(self, *args, **kwargs): 64 """Override `falcon.HTTPError` to include error messages in responses.""" 65 ret = super().to_dict(*args, **kwargs) 66 if self.errors is not None: 67 ret["errors"] = self.errors 68 return ret 69 70 71 class FalconParser(core.Parser): 72 """Falcon request argument parser.""" 73 74 # Note on the use of MultiDictProxy throughout: 75 # Falcon parses query strings and form values into ordinary dicts, but with 76 # the values listified where appropriate 77 # it is still therefore necessary in these cases to wrap them in 78 # MultiDictProxy because we need to use the schema to determine when single 79 # values should be wrapped in lists due to the type of the destination 80 # field 81 82 def load_querystring(self, req, schema): 83 """Return query params from the request as a MultiDictProxy.""" 84 return MultiDictProxy(req.params, schema) 85 86 def load_form(self, req, schema): 87 """Return form values from the request as a MultiDictProxy 88 89 .. note:: 90 91 The request stream will be read and left at EOF. 92 """ 93 form = parse_form_body(req) 94 if form is core.missing: 95 return form 96 return MultiDictProxy(form, schema) 97 98 def _raw_load_json(self, req): 99 """Return a json payload from the request for the core parser's load_json 100 101 Checks the input mimetype and may return 'missing' if the mimetype is 102 non-json, even if the request body is parseable as json.""" 103 if not is_json_request(req) or req.content_length in (None, 0): 104 return core.missing 105 body = req.stream.read(req.content_length) 106 if body: 107 return core.parse_json(body) 108 return core.missing 109 110 def load_headers(self, req, schema): 111 """Return headers from the request.""" 112 # Falcon only exposes headers as a dict (not multidict) 113 return req.headers 114 115 def load_cookies(self, req, schema): 116 """Return cookies from the request.""" 117 # Cookies are expressed in Falcon as a dict, but the possibility of 118 # multiple values for a cookie is preserved internally -- if desired in 119 # the future, webargs could add a MultiDict type for Cookies here built 120 # from (req, schema), but Falcon does not provide one out of the box 121 return req.cookies 122 123 def get_request_from_view_args(self, view, args, kwargs): 124 """Get request from a resource method's arguments. Assumes that 125 request is the second argument. 126 """ 127 req = args[1] 128 if not isinstance(req, falcon.Request): 129 raise TypeError("Argument is not a falcon.Request") 130 return req 131 132 def load_files(self, req, schema): 133 raise NotImplementedError( 134 f"Parsing files not yet supported by {self.__class__.__name__}" 135 ) 136 137 def handle_error(self, error, req, schema, *, error_status_code, error_headers): 138 """Handles errors during parsing.""" 139 status = status_map.get(error_status_code or self.DEFAULT_VALIDATION_STATUS) 140 if status is None: 141 raise LookupError(f"Status code {error_status_code} not supported") 142 raise HTTPError(status, errors=error.messages, headers=error_headers) 143 144 def _handle_invalid_json_error(self, error, req, *args, **kwargs): 145 status = status_map[400] 146 messages = {"json": ["Invalid JSON body."]} 147 raise HTTPError(status, errors=messages) 148 149 150 parser = FalconParser() 151 use_args = parser.use_args 152 use_kwargs = parser.use_kwargs 153 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/webargs/falconparser.py b/src/webargs/falconparser.py --- a/src/webargs/falconparser.py +++ b/src/webargs/falconparser.py @@ -3,6 +3,8 @@ import falcon from falcon.util.uri import parse_query_string +import marshmallow as ma + from webargs import core from webargs.multidictproxy import MultiDictProxy @@ -69,7 +71,21 @@ class FalconParser(core.Parser): - """Falcon request argument parser.""" + """Falcon request argument parser. + + Defaults to using the `media` location. See :py:meth:`~FalconParser.load_media` for + details on the media location.""" + + # by default, Falcon will use the 'media' location to load data + # + # this effectively looks the same as loading JSON data by default, but if + # you add a handler for a different media type to Falcon, webargs will + # automatically pick up on that capability + DEFAULT_LOCATION = "media" + DEFAULT_UNKNOWN_BY_LOCATION = dict( + media=ma.RAISE, **core.Parser.DEFAULT_UNKNOWN_BY_LOCATION + ) + __location_map__ = dict(media="load_media", **core.Parser.__location_map__) # Note on the use of MultiDictProxy throughout: # Falcon parses query strings and form values into ordinary dicts, but with @@ -95,6 +111,25 @@ return form return MultiDictProxy(form, schema) + def load_media(self, req, schema): + """Return data unpacked and parsed by one of Falcon's media handlers. + By default, Falcon only handles JSON payloads. + + To configure additional media handlers, see the + `Falcon documentation on media types`__. + + .. _FalconMedia: https://falcon.readthedocs.io/en/stable/api/media.html + __ FalconMedia_ + + .. note:: + + The request stream will be read and left at EOF. + """ + # if there is no body, return missing instead of erroring + if req.content_length in (None, 0): + return core.missing + return req.media + def _raw_load_json(self, req): """Return a json payload from the request for the core parser's load_json
{"golden_diff": "diff --git a/src/webargs/falconparser.py b/src/webargs/falconparser.py\n--- a/src/webargs/falconparser.py\n+++ b/src/webargs/falconparser.py\n@@ -3,6 +3,8 @@\n import falcon\n from falcon.util.uri import parse_query_string\n \n+import marshmallow as ma\n+\n from webargs import core\n from webargs.multidictproxy import MultiDictProxy\n \n@@ -69,7 +71,21 @@\n \n \n class FalconParser(core.Parser):\n- \"\"\"Falcon request argument parser.\"\"\"\n+ \"\"\"Falcon request argument parser.\n+\n+ Defaults to using the `media` location. See :py:meth:`~FalconParser.load_media` for\n+ details on the media location.\"\"\"\n+\n+ # by default, Falcon will use the 'media' location to load data\n+ #\n+ # this effectively looks the same as loading JSON data by default, but if\n+ # you add a handler for a different media type to Falcon, webargs will\n+ # automatically pick up on that capability\n+ DEFAULT_LOCATION = \"media\"\n+ DEFAULT_UNKNOWN_BY_LOCATION = dict(\n+ media=ma.RAISE, **core.Parser.DEFAULT_UNKNOWN_BY_LOCATION\n+ )\n+ __location_map__ = dict(media=\"load_media\", **core.Parser.__location_map__)\n \n # Note on the use of MultiDictProxy throughout:\n # Falcon parses query strings and form values into ordinary dicts, but with\n@@ -95,6 +111,25 @@\n return form\n return MultiDictProxy(form, schema)\n \n+ def load_media(self, req, schema):\n+ \"\"\"Return data unpacked and parsed by one of Falcon's media handlers.\n+ By default, Falcon only handles JSON payloads.\n+\n+ To configure additional media handlers, see the\n+ `Falcon documentation on media types`__.\n+\n+ .. _FalconMedia: https://falcon.readthedocs.io/en/stable/api/media.html\n+ __ FalconMedia_\n+\n+ .. note::\n+\n+ The request stream will be read and left at EOF.\n+ \"\"\"\n+ # if there is no body, return missing instead of erroring\n+ if req.content_length in (None, 0):\n+ return core.missing\n+ return req.media\n+\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n", "issue": "FalconParser should ideally support falcon's native media decoding\nFalcon has a native media handling mechanism which can decode an incoming request body based on the `Content-Type` header and adding the dictionary of resulting key-value pairs as a cached property `req.media`. I've written my own FalconParser subclass that (very naively) uses this, but it seems like something that might be worth supporting out of the box.\r\n\r\n```python\r\n def parse_json(self, req, name, field):\r\n \"\"\"\r\n Pull a JSON body value from the request.\r\n uses falcon's native req.media\r\n \"\"\"\r\n json_data = self._cache.get(\"json_data\")\r\n if json_data is None:\r\n self._cache[\"json_data\"] = json_data = req.media\r\n return core.get_value(json_data, name, field, allow_many_nested=True)\r\n```\r\n\r\nThis could probably be improved upon; since the `media` property is already cached on the request object, we could just access `req.media` directly without caching on the parser. (Not sure if this impacts other things that might use that cache, though; I haven't dug deep enough to fully understand that implication.) Also, since `media` was added in 1.3, if webargs still wanted to support older versions of falcon we could add a check for it and fall back to the existing behavior.\r\n\r\nMaybe something like:\r\n\r\n```python\r\n def parse_json(self, req, name, field):\r\n \"\"\"Pull a JSON body value from the request.\r\n .. note::\r\n The request stream will be read and left at EOF.\r\n \"\"\"\r\n json_data = req.media if hasattr(req, 'media') else self._cache.get(\"json_data\")\r\n if json_data is None:\r\n self._cache[\"json_data\"] = json_data = parse_json_body(req)\r\n return core.get_value(json_data, name, field, allow_many_nested=True)\r\n```\n", "before_files": [{"content": "\"\"\"Falcon request argument parsing module.\n\"\"\"\nimport falcon\nfrom falcon.util.uri import parse_query_string\n\nfrom webargs import core\nfrom webargs.multidictproxy import MultiDictProxy\n\nHTTP_422 = \"422 Unprocessable Entity\"\n\n# Mapping of int status codes to string status\nstatus_map = {422: HTTP_422}\n\n\n# Collect all exceptions from falcon.status_codes\ndef _find_exceptions():\n for name in filter(lambda n: n.startswith(\"HTTP\"), dir(falcon.status_codes)):\n status = getattr(falcon.status_codes, name)\n status_code = int(status.split(\" \")[0])\n status_map[status_code] = status\n\n\n_find_exceptions()\ndel _find_exceptions\n\n\ndef is_json_request(req):\n content_type = req.get_header(\"Content-Type\")\n return content_type and core.is_json(content_type)\n\n\n# NOTE: Adapted from falcon.request.Request._parse_form_urlencoded\ndef parse_form_body(req):\n if (\n req.content_type is not None\n and \"application/x-www-form-urlencoded\" in req.content_type\n ):\n body = req.stream.read(req.content_length or 0)\n try:\n body = body.decode(\"ascii\")\n except UnicodeDecodeError:\n body = None\n req.log_error(\n \"Non-ASCII characters found in form body \"\n \"with Content-Type of \"\n \"application/x-www-form-urlencoded. Body \"\n \"will be ignored.\"\n )\n\n if body:\n return parse_query_string(body, keep_blank=req.options.keep_blank_qs_values)\n\n return core.missing\n\n\nclass HTTPError(falcon.HTTPError):\n \"\"\"HTTPError that stores a dictionary of validation error messages.\"\"\"\n\n def __init__(self, status, errors, *args, **kwargs):\n self.errors = errors\n super().__init__(status, *args, **kwargs)\n\n def to_dict(self, *args, **kwargs):\n \"\"\"Override `falcon.HTTPError` to include error messages in responses.\"\"\"\n ret = super().to_dict(*args, **kwargs)\n if self.errors is not None:\n ret[\"errors\"] = self.errors\n return ret\n\n\nclass FalconParser(core.Parser):\n \"\"\"Falcon request argument parser.\"\"\"\n\n # Note on the use of MultiDictProxy throughout:\n # Falcon parses query strings and form values into ordinary dicts, but with\n # the values listified where appropriate\n # it is still therefore necessary in these cases to wrap them in\n # MultiDictProxy because we need to use the schema to determine when single\n # values should be wrapped in lists due to the type of the destination\n # field\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.params, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy\n\n .. note::\n\n The request stream will be read and left at EOF.\n \"\"\"\n form = parse_form_body(req)\n if form is core.missing:\n return form\n return MultiDictProxy(form, schema)\n\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req) or req.content_length in (None, 0):\n return core.missing\n body = req.stream.read(req.content_length)\n if body:\n return core.parse_json(body)\n return core.missing\n\n def load_headers(self, req, schema):\n \"\"\"Return headers from the request.\"\"\"\n # Falcon only exposes headers as a dict (not multidict)\n return req.headers\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n # Cookies are expressed in Falcon as a dict, but the possibility of\n # multiple values for a cookie is preserved internally -- if desired in\n # the future, webargs could add a MultiDict type for Cookies here built\n # from (req, schema), but Falcon does not provide one out of the box\n return req.cookies\n\n def get_request_from_view_args(self, view, args, kwargs):\n \"\"\"Get request from a resource method's arguments. Assumes that\n request is the second argument.\n \"\"\"\n req = args[1]\n if not isinstance(req, falcon.Request):\n raise TypeError(\"Argument is not a falcon.Request\")\n return req\n\n def load_files(self, req, schema):\n raise NotImplementedError(\n f\"Parsing files not yet supported by {self.__class__.__name__}\"\n )\n\n def handle_error(self, error, req, schema, *, error_status_code, error_headers):\n \"\"\"Handles errors during parsing.\"\"\"\n status = status_map.get(error_status_code or self.DEFAULT_VALIDATION_STATUS)\n if status is None:\n raise LookupError(f\"Status code {error_status_code} not supported\")\n raise HTTPError(status, errors=error.messages, headers=error_headers)\n\n def _handle_invalid_json_error(self, error, req, *args, **kwargs):\n status = status_map[400]\n messages = {\"json\": [\"Invalid JSON body.\"]}\n raise HTTPError(status, errors=messages)\n\n\nparser = FalconParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/falconparser.py"}], "after_files": [{"content": "\"\"\"Falcon request argument parsing module.\n\"\"\"\nimport falcon\nfrom falcon.util.uri import parse_query_string\n\nimport marshmallow as ma\n\nfrom webargs import core\nfrom webargs.multidictproxy import MultiDictProxy\n\nHTTP_422 = \"422 Unprocessable Entity\"\n\n# Mapping of int status codes to string status\nstatus_map = {422: HTTP_422}\n\n\n# Collect all exceptions from falcon.status_codes\ndef _find_exceptions():\n for name in filter(lambda n: n.startswith(\"HTTP\"), dir(falcon.status_codes)):\n status = getattr(falcon.status_codes, name)\n status_code = int(status.split(\" \")[0])\n status_map[status_code] = status\n\n\n_find_exceptions()\ndel _find_exceptions\n\n\ndef is_json_request(req):\n content_type = req.get_header(\"Content-Type\")\n return content_type and core.is_json(content_type)\n\n\n# NOTE: Adapted from falcon.request.Request._parse_form_urlencoded\ndef parse_form_body(req):\n if (\n req.content_type is not None\n and \"application/x-www-form-urlencoded\" in req.content_type\n ):\n body = req.stream.read(req.content_length or 0)\n try:\n body = body.decode(\"ascii\")\n except UnicodeDecodeError:\n body = None\n req.log_error(\n \"Non-ASCII characters found in form body \"\n \"with Content-Type of \"\n \"application/x-www-form-urlencoded. Body \"\n \"will be ignored.\"\n )\n\n if body:\n return parse_query_string(body, keep_blank=req.options.keep_blank_qs_values)\n\n return core.missing\n\n\nclass HTTPError(falcon.HTTPError):\n \"\"\"HTTPError that stores a dictionary of validation error messages.\"\"\"\n\n def __init__(self, status, errors, *args, **kwargs):\n self.errors = errors\n super().__init__(status, *args, **kwargs)\n\n def to_dict(self, *args, **kwargs):\n \"\"\"Override `falcon.HTTPError` to include error messages in responses.\"\"\"\n ret = super().to_dict(*args, **kwargs)\n if self.errors is not None:\n ret[\"errors\"] = self.errors\n return ret\n\n\nclass FalconParser(core.Parser):\n \"\"\"Falcon request argument parser.\n\n Defaults to using the `media` location. See :py:meth:`~FalconParser.load_media` for\n details on the media location.\"\"\"\n\n # by default, Falcon will use the 'media' location to load data\n #\n # this effectively looks the same as loading JSON data by default, but if\n # you add a handler for a different media type to Falcon, webargs will\n # automatically pick up on that capability\n DEFAULT_LOCATION = \"media\"\n DEFAULT_UNKNOWN_BY_LOCATION = dict(\n media=ma.RAISE, **core.Parser.DEFAULT_UNKNOWN_BY_LOCATION\n )\n __location_map__ = dict(media=\"load_media\", **core.Parser.__location_map__)\n\n # Note on the use of MultiDictProxy throughout:\n # Falcon parses query strings and form values into ordinary dicts, but with\n # the values listified where appropriate\n # it is still therefore necessary in these cases to wrap them in\n # MultiDictProxy because we need to use the schema to determine when single\n # values should be wrapped in lists due to the type of the destination\n # field\n\n def load_querystring(self, req, schema):\n \"\"\"Return query params from the request as a MultiDictProxy.\"\"\"\n return MultiDictProxy(req.params, schema)\n\n def load_form(self, req, schema):\n \"\"\"Return form values from the request as a MultiDictProxy\n\n .. note::\n\n The request stream will be read and left at EOF.\n \"\"\"\n form = parse_form_body(req)\n if form is core.missing:\n return form\n return MultiDictProxy(form, schema)\n\n def load_media(self, req, schema):\n \"\"\"Return data unpacked and parsed by one of Falcon's media handlers.\n By default, Falcon only handles JSON payloads.\n\n To configure additional media handlers, see the\n `Falcon documentation on media types`__.\n\n .. _FalconMedia: https://falcon.readthedocs.io/en/stable/api/media.html\n __ FalconMedia_\n\n .. note::\n\n The request stream will be read and left at EOF.\n \"\"\"\n # if there is no body, return missing instead of erroring\n if req.content_length in (None, 0):\n return core.missing\n return req.media\n\n def _raw_load_json(self, req):\n \"\"\"Return a json payload from the request for the core parser's load_json\n\n Checks the input mimetype and may return 'missing' if the mimetype is\n non-json, even if the request body is parseable as json.\"\"\"\n if not is_json_request(req) or req.content_length in (None, 0):\n return core.missing\n body = req.stream.read(req.content_length)\n if body:\n return core.parse_json(body)\n return core.missing\n\n def load_headers(self, req, schema):\n \"\"\"Return headers from the request.\"\"\"\n # Falcon only exposes headers as a dict (not multidict)\n return req.headers\n\n def load_cookies(self, req, schema):\n \"\"\"Return cookies from the request.\"\"\"\n # Cookies are expressed in Falcon as a dict, but the possibility of\n # multiple values for a cookie is preserved internally -- if desired in\n # the future, webargs could add a MultiDict type for Cookies here built\n # from (req, schema), but Falcon does not provide one out of the box\n return req.cookies\n\n def get_request_from_view_args(self, view, args, kwargs):\n \"\"\"Get request from a resource method's arguments. Assumes that\n request is the second argument.\n \"\"\"\n req = args[1]\n if not isinstance(req, falcon.Request):\n raise TypeError(\"Argument is not a falcon.Request\")\n return req\n\n def load_files(self, req, schema):\n raise NotImplementedError(\n f\"Parsing files not yet supported by {self.__class__.__name__}\"\n )\n\n def handle_error(self, error, req, schema, *, error_status_code, error_headers):\n \"\"\"Handles errors during parsing.\"\"\"\n status = status_map.get(error_status_code or self.DEFAULT_VALIDATION_STATUS)\n if status is None:\n raise LookupError(f\"Status code {error_status_code} not supported\")\n raise HTTPError(status, errors=error.messages, headers=error_headers)\n\n def _handle_invalid_json_error(self, error, req, *args, **kwargs):\n status = status_map[400]\n messages = {\"json\": [\"Invalid JSON body.\"]}\n raise HTTPError(status, errors=messages)\n\n\nparser = FalconParser()\nuse_args = parser.use_args\nuse_kwargs = parser.use_kwargs\n", "path": "src/webargs/falconparser.py"}]}
2,196
531
gh_patches_debug_17978
rasdani/github-patches
git_diff
cython__cython-4952
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Make the cythonize command line more useful A couple of more common build options, such as `--numpy`, `--include_path`, `--libraries`, `--cflags`, `--ldflags` should be added to `cythonize` command line so that it can be used easily for the majority of building tasks. Setting up a `setup.py` script for every `pyx` file I need to write is really, really tedious, which put me off from cython for years before I finally tried it for the first time (and that was in IPython notebook with cython magic, so I still don't know how to write `setup.py`). I am sure that many beginners to cython are just scared of the complicated build process like me, and never have the chance to actually try it. Please make it more accessible. The `%%cython` magic in IPython has much better sane defaults and useful command line options than the `cythonize` script. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `Cython/Build/Cythonize.py` Content: ``` 1 #!/usr/bin/env python 2 3 from __future__ import absolute_import 4 5 import os 6 import shutil 7 import tempfile 8 from distutils.core import setup 9 10 from .Dependencies import cythonize, extended_iglob 11 from ..Utils import is_package_dir 12 from ..Compiler import Options 13 14 try: 15 import multiprocessing 16 parallel_compiles = int(multiprocessing.cpu_count() * 1.5) 17 except ImportError: 18 multiprocessing = None 19 parallel_compiles = 0 20 21 22 class _FakePool(object): 23 def map_async(self, func, args): 24 try: 25 from itertools import imap 26 except ImportError: 27 imap=map 28 for _ in imap(func, args): 29 pass 30 31 def close(self): 32 pass 33 34 def terminate(self): 35 pass 36 37 def join(self): 38 pass 39 40 41 def find_package_base(path): 42 base_dir, package_path = os.path.split(path) 43 while is_package_dir(base_dir): 44 base_dir, parent = os.path.split(base_dir) 45 package_path = '%s/%s' % (parent, package_path) 46 return base_dir, package_path 47 48 49 def cython_compile(path_pattern, options): 50 pool = None 51 all_paths = map(os.path.abspath, extended_iglob(path_pattern)) 52 try: 53 for path in all_paths: 54 if options.build_inplace: 55 base_dir = path 56 while not os.path.isdir(base_dir) or is_package_dir(base_dir): 57 base_dir = os.path.dirname(base_dir) 58 else: 59 base_dir = None 60 61 if os.path.isdir(path): 62 # recursively compiling a package 63 paths = [os.path.join(path, '**', '*.{py,pyx}')] 64 else: 65 # assume it's a file(-like thing) 66 paths = [path] 67 68 ext_modules = cythonize( 69 paths, 70 nthreads=options.parallel, 71 exclude_failures=options.keep_going, 72 exclude=options.excludes, 73 compiler_directives=options.directives, 74 compile_time_env=options.compile_time_env, 75 force=options.force, 76 quiet=options.quiet, 77 depfile=options.depfile, 78 **options.options) 79 80 if ext_modules and options.build: 81 if len(ext_modules) > 1 and options.parallel > 1: 82 if pool is None: 83 try: 84 pool = multiprocessing.Pool(options.parallel) 85 except OSError: 86 pool = _FakePool() 87 pool.map_async(run_distutils, [ 88 (base_dir, [ext]) for ext in ext_modules]) 89 else: 90 run_distutils((base_dir, ext_modules)) 91 except: 92 if pool is not None: 93 pool.terminate() 94 raise 95 else: 96 if pool is not None: 97 pool.close() 98 pool.join() 99 100 101 def run_distutils(args): 102 base_dir, ext_modules = args 103 script_args = ['build_ext', '-i'] 104 cwd = os.getcwd() 105 temp_dir = None 106 try: 107 if base_dir: 108 os.chdir(base_dir) 109 temp_dir = tempfile.mkdtemp(dir=base_dir) 110 script_args.extend(['--build-temp', temp_dir]) 111 setup( 112 script_name='setup.py', 113 script_args=script_args, 114 ext_modules=ext_modules, 115 ) 116 finally: 117 if base_dir: 118 os.chdir(cwd) 119 if temp_dir and os.path.isdir(temp_dir): 120 shutil.rmtree(temp_dir) 121 122 123 def create_args_parser(): 124 from argparse import ArgumentParser 125 from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction 126 127 parser = ArgumentParser() 128 129 parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...', 130 dest='directives', default={}, type=str, 131 action=ParseDirectivesAction, 132 help='set a compiler directive') 133 parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...', 134 dest='compile_time_env', default={}, type=str, 135 action=ParseCompileTimeEnvAction, 136 help='set a compile time environment variable') 137 parser.add_argument('-s', '--option', metavar='NAME=VALUE', 138 dest='options', default={}, type=str, 139 action=ParseOptionsAction, 140 help='set a cythonize option') 141 parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None, 142 help='use Python 2 syntax mode by default') 143 parser.add_argument('-3', dest='language_level', action='store_const', const=3, 144 help='use Python 3 syntax mode by default') 145 parser.add_argument('--3str', dest='language_level', action='store_const', const='3str', 146 help='use Python 3 syntax mode by default') 147 parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate', 148 help='Produce a colorized HTML version of the source.') 149 parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate', 150 help='Produce a colorized HTML version of the source ' 151 'which includes entire generated C/C++-code.') 152 parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes', 153 action='append', default=[], 154 help='exclude certain file patterns from the compilation') 155 156 parser.add_argument('-b', '--build', dest='build', action='store_true', default=None, 157 help='build extension modules using distutils') 158 parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None, 159 help='build extension modules in place using distutils (implies -b)') 160 parser.add_argument('-j', '--parallel', dest='parallel', metavar='N', 161 type=int, default=parallel_compiles, 162 help=('run builds in N parallel jobs (default: %d)' % 163 parallel_compiles or 1)) 164 parser.add_argument('-f', '--force', dest='force', action='store_true', default=None, 165 help='force recompilation') 166 parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None, 167 help='be less verbose during compilation') 168 169 parser.add_argument('--lenient', dest='lenient', action='store_true', default=None, 170 help='increase Python compatibility by ignoring some compile time errors') 171 parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None, 172 help='compile as much as possible, ignore compilation failures') 173 parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None, 174 help='strip docstrings') 175 parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources') 176 parser.add_argument('sources', nargs='*') 177 return parser 178 179 180 def parse_args_raw(parser, args): 181 options, unknown = parser.parse_known_args(args) 182 sources = options.sources 183 # if positional arguments were interspersed 184 # some of them are in unknown 185 for option in unknown: 186 if option.startswith('-'): 187 parser.error("unknown option "+option) 188 else: 189 sources.append(option) 190 del options.sources 191 return (options, sources) 192 193 194 def parse_args(args): 195 parser = create_args_parser() 196 options, args = parse_args_raw(parser, args) 197 198 if not args: 199 parser.error("no source files provided") 200 if options.build_inplace: 201 options.build = True 202 if multiprocessing is None: 203 options.parallel = 0 204 if options.language_level: 205 assert options.language_level in (2, 3, '3str') 206 options.options['language_level'] = options.language_level 207 208 if options.lenient: 209 # increase Python compatibility by ignoring compile time errors 210 Options.error_on_unknown_names = False 211 Options.error_on_uninitialized = False 212 213 if options.annotate: 214 Options.annotate = options.annotate 215 216 if options.no_docstrings: 217 Options.docstrings = False 218 219 return options, args 220 221 222 def main(args=None): 223 options, paths = parse_args(args) 224 225 for path in paths: 226 cython_compile(path, options) 227 228 229 if __name__ == '__main__': 230 main() 231 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/Cython/Build/Cythonize.py b/Cython/Build/Cythonize.py --- a/Cython/Build/Cythonize.py +++ b/Cython/Build/Cythonize.py @@ -121,10 +121,18 @@ def create_args_parser(): - from argparse import ArgumentParser + from argparse import ArgumentParser, RawDescriptionHelpFormatter from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction - parser = ArgumentParser() + parser = ArgumentParser( + formatter_class=RawDescriptionHelpFormatter, + epilog="""\ +Environment variables: + CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless + of modification times and changes. + Environment variables accepted by setuptools are supported to configure the C compiler and build: + https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options""" + ) parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...', dest='directives', default={}, type=str,
{"golden_diff": "diff --git a/Cython/Build/Cythonize.py b/Cython/Build/Cythonize.py\n--- a/Cython/Build/Cythonize.py\n+++ b/Cython/Build/Cythonize.py\n@@ -121,10 +121,18 @@\n \n \n def create_args_parser():\n- from argparse import ArgumentParser\n+ from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction\n \n- parser = ArgumentParser()\n+ parser = ArgumentParser(\n+ formatter_class=RawDescriptionHelpFormatter,\n+ epilog=\"\"\"\\\n+Environment variables:\n+ CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless\n+ of modification times and changes.\n+ Environment variables accepted by setuptools are supported to configure the C compiler and build:\n+ https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options\"\"\"\n+ )\n \n parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',\n dest='directives', default={}, type=str,\n", "issue": "Make the cythonize command line more useful\nA couple of more common build options, such as `--numpy`, `--include_path`, `--libraries`, `--cflags`, `--ldflags` should be added to `cythonize` command line so that it can be used easily for the majority of building tasks. \r\n\r\nSetting up a `setup.py` script for every `pyx` file I need to write is really, really tedious, which put me off from cython for years before I finally tried it for the first time (and that was in IPython notebook with cython magic, so I still don't know how to write `setup.py`). I am sure that many beginners to cython are just scared of the complicated build process like me, and never have the chance to actually try it. Please make it more accessible.\r\n\r\nThe `%%cython` magic in IPython has much better sane defaults and useful command line options than the `cythonize` script.\n", "before_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\nimport tempfile\nfrom distutils.core import setup\n\nfrom .Dependencies import cythonize, extended_iglob\nfrom ..Utils import is_package_dir\nfrom ..Compiler import Options\n\ntry:\n import multiprocessing\n parallel_compiles = int(multiprocessing.cpu_count() * 1.5)\nexcept ImportError:\n multiprocessing = None\n parallel_compiles = 0\n\n\nclass _FakePool(object):\n def map_async(self, func, args):\n try:\n from itertools import imap\n except ImportError:\n imap=map\n for _ in imap(func, args):\n pass\n\n def close(self):\n pass\n\n def terminate(self):\n pass\n\n def join(self):\n pass\n\n\ndef find_package_base(path):\n base_dir, package_path = os.path.split(path)\n while is_package_dir(base_dir):\n base_dir, parent = os.path.split(base_dir)\n package_path = '%s/%s' % (parent, package_path)\n return base_dir, package_path\n\n\ndef cython_compile(path_pattern, options):\n pool = None\n all_paths = map(os.path.abspath, extended_iglob(path_pattern))\n try:\n for path in all_paths:\n if options.build_inplace:\n base_dir = path\n while not os.path.isdir(base_dir) or is_package_dir(base_dir):\n base_dir = os.path.dirname(base_dir)\n else:\n base_dir = None\n\n if os.path.isdir(path):\n # recursively compiling a package\n paths = [os.path.join(path, '**', '*.{py,pyx}')]\n else:\n # assume it's a file(-like thing)\n paths = [path]\n\n ext_modules = cythonize(\n paths,\n nthreads=options.parallel,\n exclude_failures=options.keep_going,\n exclude=options.excludes,\n compiler_directives=options.directives,\n compile_time_env=options.compile_time_env,\n force=options.force,\n quiet=options.quiet,\n depfile=options.depfile,\n **options.options)\n\n if ext_modules and options.build:\n if len(ext_modules) > 1 and options.parallel > 1:\n if pool is None:\n try:\n pool = multiprocessing.Pool(options.parallel)\n except OSError:\n pool = _FakePool()\n pool.map_async(run_distutils, [\n (base_dir, [ext]) for ext in ext_modules])\n else:\n run_distutils((base_dir, ext_modules))\n except:\n if pool is not None:\n pool.terminate()\n raise\n else:\n if pool is not None:\n pool.close()\n pool.join()\n\n\ndef run_distutils(args):\n base_dir, ext_modules = args\n script_args = ['build_ext', '-i']\n cwd = os.getcwd()\n temp_dir = None\n try:\n if base_dir:\n os.chdir(base_dir)\n temp_dir = tempfile.mkdtemp(dir=base_dir)\n script_args.extend(['--build-temp', temp_dir])\n setup(\n script_name='setup.py',\n script_args=script_args,\n ext_modules=ext_modules,\n )\n finally:\n if base_dir:\n os.chdir(cwd)\n if temp_dir and os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n\n\ndef create_args_parser():\n from argparse import ArgumentParser\n from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction\n\n parser = ArgumentParser()\n\n parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',\n dest='directives', default={}, type=str,\n action=ParseDirectivesAction,\n help='set a compiler directive')\n parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',\n dest='compile_time_env', default={}, type=str,\n action=ParseCompileTimeEnvAction,\n help='set a compile time environment variable')\n parser.add_argument('-s', '--option', metavar='NAME=VALUE',\n dest='options', default={}, type=str,\n action=ParseOptionsAction,\n help='set a cythonize option')\n parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,\n help='use Python 2 syntax mode by default')\n parser.add_argument('-3', dest='language_level', action='store_const', const=3,\n help='use Python 3 syntax mode by default')\n parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',\n help='use Python 3 syntax mode by default')\n parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',\n help='Produce a colorized HTML version of the source.')\n parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',\n help='Produce a colorized HTML version of the source '\n 'which includes entire generated C/C++-code.')\n parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',\n action='append', default=[],\n help='exclude certain file patterns from the compilation')\n\n parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,\n help='build extension modules using distutils')\n parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,\n help='build extension modules in place using distutils (implies -b)')\n parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',\n type=int, default=parallel_compiles,\n help=('run builds in N parallel jobs (default: %d)' %\n parallel_compiles or 1))\n parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,\n help='force recompilation')\n parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,\n help='be less verbose during compilation')\n\n parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,\n help='increase Python compatibility by ignoring some compile time errors')\n parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,\n help='compile as much as possible, ignore compilation failures')\n parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,\n help='strip docstrings')\n parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')\n parser.add_argument('sources', nargs='*')\n return parser\n\n\ndef parse_args_raw(parser, args):\n options, unknown = parser.parse_known_args(args)\n sources = options.sources\n # if positional arguments were interspersed\n # some of them are in unknown\n for option in unknown:\n if option.startswith('-'):\n parser.error(\"unknown option \"+option)\n else:\n sources.append(option)\n del options.sources\n return (options, sources)\n\n\ndef parse_args(args):\n parser = create_args_parser()\n options, args = parse_args_raw(parser, args)\n\n if not args:\n parser.error(\"no source files provided\")\n if options.build_inplace:\n options.build = True\n if multiprocessing is None:\n options.parallel = 0\n if options.language_level:\n assert options.language_level in (2, 3, '3str')\n options.options['language_level'] = options.language_level\n\n if options.lenient:\n # increase Python compatibility by ignoring compile time errors\n Options.error_on_unknown_names = False\n Options.error_on_uninitialized = False\n\n if options.annotate:\n Options.annotate = options.annotate\n\n if options.no_docstrings:\n Options.docstrings = False\n\n return options, args\n\n\ndef main(args=None):\n options, paths = parse_args(args)\n\n for path in paths:\n cython_compile(path, options)\n\n\nif __name__ == '__main__':\n main()\n", "path": "Cython/Build/Cythonize.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nfrom __future__ import absolute_import\n\nimport os\nimport shutil\nimport tempfile\nfrom distutils.core import setup\n\nfrom .Dependencies import cythonize, extended_iglob\nfrom ..Utils import is_package_dir\nfrom ..Compiler import Options\n\ntry:\n import multiprocessing\n parallel_compiles = int(multiprocessing.cpu_count() * 1.5)\nexcept ImportError:\n multiprocessing = None\n parallel_compiles = 0\n\n\nclass _FakePool(object):\n def map_async(self, func, args):\n try:\n from itertools import imap\n except ImportError:\n imap=map\n for _ in imap(func, args):\n pass\n\n def close(self):\n pass\n\n def terminate(self):\n pass\n\n def join(self):\n pass\n\n\ndef find_package_base(path):\n base_dir, package_path = os.path.split(path)\n while is_package_dir(base_dir):\n base_dir, parent = os.path.split(base_dir)\n package_path = '%s/%s' % (parent, package_path)\n return base_dir, package_path\n\n\ndef cython_compile(path_pattern, options):\n pool = None\n all_paths = map(os.path.abspath, extended_iglob(path_pattern))\n try:\n for path in all_paths:\n if options.build_inplace:\n base_dir = path\n while not os.path.isdir(base_dir) or is_package_dir(base_dir):\n base_dir = os.path.dirname(base_dir)\n else:\n base_dir = None\n\n if os.path.isdir(path):\n # recursively compiling a package\n paths = [os.path.join(path, '**', '*.{py,pyx}')]\n else:\n # assume it's a file(-like thing)\n paths = [path]\n\n ext_modules = cythonize(\n paths,\n nthreads=options.parallel,\n exclude_failures=options.keep_going,\n exclude=options.excludes,\n compiler_directives=options.directives,\n compile_time_env=options.compile_time_env,\n force=options.force,\n quiet=options.quiet,\n depfile=options.depfile,\n **options.options)\n\n if ext_modules and options.build:\n if len(ext_modules) > 1 and options.parallel > 1:\n if pool is None:\n try:\n pool = multiprocessing.Pool(options.parallel)\n except OSError:\n pool = _FakePool()\n pool.map_async(run_distutils, [\n (base_dir, [ext]) for ext in ext_modules])\n else:\n run_distutils((base_dir, ext_modules))\n except:\n if pool is not None:\n pool.terminate()\n raise\n else:\n if pool is not None:\n pool.close()\n pool.join()\n\n\ndef run_distutils(args):\n base_dir, ext_modules = args\n script_args = ['build_ext', '-i']\n cwd = os.getcwd()\n temp_dir = None\n try:\n if base_dir:\n os.chdir(base_dir)\n temp_dir = tempfile.mkdtemp(dir=base_dir)\n script_args.extend(['--build-temp', temp_dir])\n setup(\n script_name='setup.py',\n script_args=script_args,\n ext_modules=ext_modules,\n )\n finally:\n if base_dir:\n os.chdir(cwd)\n if temp_dir and os.path.isdir(temp_dir):\n shutil.rmtree(temp_dir)\n\n\ndef create_args_parser():\n from argparse import ArgumentParser, RawDescriptionHelpFormatter\n from ..Compiler.CmdLine import ParseDirectivesAction, ParseOptionsAction, ParseCompileTimeEnvAction\n\n parser = ArgumentParser(\n formatter_class=RawDescriptionHelpFormatter,\n epilog=\"\"\"\\\nEnvironment variables:\n CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless\n of modification times and changes.\n Environment variables accepted by setuptools are supported to configure the C compiler and build:\n https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options\"\"\"\n )\n\n parser.add_argument('-X', '--directive', metavar='NAME=VALUE,...',\n dest='directives', default={}, type=str,\n action=ParseDirectivesAction,\n help='set a compiler directive')\n parser.add_argument('-E', '--compile-time-env', metavar='NAME=VALUE,...',\n dest='compile_time_env', default={}, type=str,\n action=ParseCompileTimeEnvAction,\n help='set a compile time environment variable')\n parser.add_argument('-s', '--option', metavar='NAME=VALUE',\n dest='options', default={}, type=str,\n action=ParseOptionsAction,\n help='set a cythonize option')\n parser.add_argument('-2', dest='language_level', action='store_const', const=2, default=None,\n help='use Python 2 syntax mode by default')\n parser.add_argument('-3', dest='language_level', action='store_const', const=3,\n help='use Python 3 syntax mode by default')\n parser.add_argument('--3str', dest='language_level', action='store_const', const='3str',\n help='use Python 3 syntax mode by default')\n parser.add_argument('-a', '--annotate', action='store_const', const='default', dest='annotate',\n help='Produce a colorized HTML version of the source.')\n parser.add_argument('--annotate-fullc', action='store_const', const='fullc', dest='annotate',\n help='Produce a colorized HTML version of the source '\n 'which includes entire generated C/C++-code.')\n parser.add_argument('-x', '--exclude', metavar='PATTERN', dest='excludes',\n action='append', default=[],\n help='exclude certain file patterns from the compilation')\n\n parser.add_argument('-b', '--build', dest='build', action='store_true', default=None,\n help='build extension modules using distutils')\n parser.add_argument('-i', '--inplace', dest='build_inplace', action='store_true', default=None,\n help='build extension modules in place using distutils (implies -b)')\n parser.add_argument('-j', '--parallel', dest='parallel', metavar='N',\n type=int, default=parallel_compiles,\n help=('run builds in N parallel jobs (default: %d)' %\n parallel_compiles or 1))\n parser.add_argument('-f', '--force', dest='force', action='store_true', default=None,\n help='force recompilation')\n parser.add_argument('-q', '--quiet', dest='quiet', action='store_true', default=None,\n help='be less verbose during compilation')\n\n parser.add_argument('--lenient', dest='lenient', action='store_true', default=None,\n help='increase Python compatibility by ignoring some compile time errors')\n parser.add_argument('-k', '--keep-going', dest='keep_going', action='store_true', default=None,\n help='compile as much as possible, ignore compilation failures')\n parser.add_argument('--no-docstrings', dest='no_docstrings', action='store_true', default=None,\n help='strip docstrings')\n parser.add_argument('-M', '--depfile', action='store_true', help='produce depfiles for the sources')\n parser.add_argument('sources', nargs='*')\n return parser\n\n\ndef parse_args_raw(parser, args):\n options, unknown = parser.parse_known_args(args)\n sources = options.sources\n # if positional arguments were interspersed\n # some of them are in unknown\n for option in unknown:\n if option.startswith('-'):\n parser.error(\"unknown option \"+option)\n else:\n sources.append(option)\n del options.sources\n return (options, sources)\n\n\ndef parse_args(args):\n parser = create_args_parser()\n options, args = parse_args_raw(parser, args)\n\n if not args:\n parser.error(\"no source files provided\")\n if options.build_inplace:\n options.build = True\n if multiprocessing is None:\n options.parallel = 0\n if options.language_level:\n assert options.language_level in (2, 3, '3str')\n options.options['language_level'] = options.language_level\n\n if options.lenient:\n # increase Python compatibility by ignoring compile time errors\n Options.error_on_unknown_names = False\n Options.error_on_uninitialized = False\n\n if options.annotate:\n Options.annotate = options.annotate\n\n if options.no_docstrings:\n Options.docstrings = False\n\n return options, args\n\n\ndef main(args=None):\n options, paths = parse_args(args)\n\n for path in paths:\n cython_compile(path, options)\n\n\nif __name__ == '__main__':\n main()\n", "path": "Cython/Build/Cythonize.py"}]}
2,811
249
gh_patches_debug_27959
rasdani/github-patches
git_diff
pwndbg__pwndbg-2009
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- pattern_create & run features It is annoying to have to have multiple versions of gdb to complete some of my work. I don't understand why this feature hasn't been brought over yet like gdb-peda has implemented. Reversing takes long enough, this would make our lives a little bit easier. I would like to add the pattern_create feature into pwndbg. As well as run, so that I can quickly create our cyclic values and then run our output (run < payload-100.txt) so we can check the registers in under 10 seconds without restarting the program. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `pwndbg/commands/cyclic.py` Content: ``` 1 from __future__ import annotations 2 3 import argparse 4 import string 5 6 import gdb 7 from pwnlib.util.cyclic import cyclic 8 from pwnlib.util.cyclic import cyclic_find 9 10 import pwndbg.commands 11 import pwndbg.gdblib.arch 12 from pwndbg.color import message 13 14 parser = argparse.ArgumentParser(description="Cyclic pattern creator/finder.") 15 16 parser.add_argument( 17 "-a", 18 "--alphabet", 19 metavar="charset", 20 default=string.ascii_lowercase, 21 type=str.encode, 22 help="The alphabet to use in the cyclic pattern", 23 ) 24 25 parser.add_argument( 26 "-n", 27 "--length", 28 metavar="length", 29 type=int, 30 help="Size of the unique subsequences (defaults to the pointer size for the current arch)", 31 ) 32 33 group = parser.add_mutually_exclusive_group(required=False) 34 group.add_argument( 35 "-l", 36 "-o", 37 "--offset", 38 "--lookup", 39 dest="lookup", 40 metavar="lookup_value", 41 type=str, 42 help="Do a lookup instead of printing the sequence (accepts constant values as well as expressions)", 43 ) 44 45 group.add_argument( 46 "count", 47 type=int, 48 nargs="?", 49 default=100, 50 help="Number of characters to print from the sequence (default: print the entire sequence)", 51 ) 52 53 54 @pwndbg.commands.ArgparsedCommand(parser, command_name="cyclic") 55 def cyclic_cmd(alphabet, length, lookup, count=100) -> None: 56 if length: 57 # Convert from gdb.Value 58 length = int(length) 59 else: 60 length = pwndbg.gdblib.arch.ptrsize 61 62 if lookup: 63 lookup = pwndbg.commands.fix(lookup, sloppy=True) 64 65 if isinstance(lookup, (gdb.Value, int)): 66 lookup = int(lookup).to_bytes(length, pwndbg.gdblib.arch.endian) 67 elif isinstance(lookup, str): 68 lookup = bytes(lookup, "utf-8") 69 70 if len(lookup) != length: 71 print( 72 message.error( 73 f"Lookup pattern must be {length} bytes (use `-n <length>` to lookup pattern of different length)" 74 ) 75 ) 76 return 77 78 hexstr = "0x" + lookup.hex() 79 print( 80 message.notice( 81 f"Finding cyclic pattern of {length} bytes: {str(lookup)} (hex: {hexstr})" 82 ) 83 ) 84 85 if any(c not in alphabet for c in lookup): 86 print(message.error("Pattern contains characters not present in the alphabet")) 87 return 88 89 offset = cyclic_find(lookup, alphabet, length) 90 91 if offset == -1: 92 print(message.error("Given lookup pattern does not exist in the sequence")) 93 else: 94 print(message.success(f"Found at offset {offset}")) 95 else: 96 sequence = cyclic(int(count), alphabet, length) 97 print(sequence.decode()) 98 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/pwndbg/commands/cyclic.py b/pwndbg/commands/cyclic.py --- a/pwndbg/commands/cyclic.py +++ b/pwndbg/commands/cyclic.py @@ -30,6 +30,7 @@ help="Size of the unique subsequences (defaults to the pointer size for the current arch)", ) + group = parser.add_mutually_exclusive_group(required=False) group.add_argument( "-l", @@ -50,9 +51,17 @@ help="Number of characters to print from the sequence (default: print the entire sequence)", ) +parser.add_argument( + "filename", + type=str, + help="Name (path) of the file to save the cyclic pattern to", + default="", + nargs="?", +) + @pwndbg.commands.ArgparsedCommand(parser, command_name="cyclic") -def cyclic_cmd(alphabet, length, lookup, count=100) -> None: +def cyclic_cmd(alphabet, length, lookup, count=100, filename="") -> None: if length: # Convert from gdb.Value length = int(length) @@ -93,5 +102,12 @@ else: print(message.success(f"Found at offset {offset}")) else: - sequence = cyclic(int(count), alphabet, length) - print(sequence.decode()) + count = int(count) + sequence = cyclic(count, alphabet, length) + + if not filename: + print(sequence.decode()) + else: + with open(filename, "wb") as f: + f.write(sequence) + print(f"Written a cyclic sequence of length {count} to file {filename}")
{"golden_diff": "diff --git a/pwndbg/commands/cyclic.py b/pwndbg/commands/cyclic.py\n--- a/pwndbg/commands/cyclic.py\n+++ b/pwndbg/commands/cyclic.py\n@@ -30,6 +30,7 @@\n help=\"Size of the unique subsequences (defaults to the pointer size for the current arch)\",\n )\n \n+\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\n \"-l\",\n@@ -50,9 +51,17 @@\n help=\"Number of characters to print from the sequence (default: print the entire sequence)\",\n )\n \n+parser.add_argument(\n+ \"filename\",\n+ type=str,\n+ help=\"Name (path) of the file to save the cyclic pattern to\",\n+ default=\"\",\n+ nargs=\"?\",\n+)\n+\n \n @pwndbg.commands.ArgparsedCommand(parser, command_name=\"cyclic\")\n-def cyclic_cmd(alphabet, length, lookup, count=100) -> None:\n+def cyclic_cmd(alphabet, length, lookup, count=100, filename=\"\") -> None:\n if length:\n # Convert from gdb.Value\n length = int(length)\n@@ -93,5 +102,12 @@\n else:\n print(message.success(f\"Found at offset {offset}\"))\n else:\n- sequence = cyclic(int(count), alphabet, length)\n- print(sequence.decode())\n+ count = int(count)\n+ sequence = cyclic(count, alphabet, length)\n+\n+ if not filename:\n+ print(sequence.decode())\n+ else:\n+ with open(filename, \"wb\") as f:\n+ f.write(sequence)\n+ print(f\"Written a cyclic sequence of length {count} to file {filename}\")\n", "issue": "pattern_create & run features\nIt is annoying to have to have multiple versions of gdb to complete some of my work. I don't understand why this feature hasn't been brought over yet like gdb-peda has implemented. Reversing takes long enough, this would make our lives a little bit easier.\r\n\r\nI would like to add the pattern_create feature into pwndbg. As well as run, so that I can quickly create our cyclic values and then run our output (run < payload-100.txt) so we can check the registers in under 10 seconds without restarting the program. \n", "before_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport string\n\nimport gdb\nfrom pwnlib.util.cyclic import cyclic\nfrom pwnlib.util.cyclic import cyclic_find\n\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nfrom pwndbg.color import message\n\nparser = argparse.ArgumentParser(description=\"Cyclic pattern creator/finder.\")\n\nparser.add_argument(\n \"-a\",\n \"--alphabet\",\n metavar=\"charset\",\n default=string.ascii_lowercase,\n type=str.encode,\n help=\"The alphabet to use in the cyclic pattern\",\n)\n\nparser.add_argument(\n \"-n\",\n \"--length\",\n metavar=\"length\",\n type=int,\n help=\"Size of the unique subsequences (defaults to the pointer size for the current arch)\",\n)\n\ngroup = parser.add_mutually_exclusive_group(required=False)\ngroup.add_argument(\n \"-l\",\n \"-o\",\n \"--offset\",\n \"--lookup\",\n dest=\"lookup\",\n metavar=\"lookup_value\",\n type=str,\n help=\"Do a lookup instead of printing the sequence (accepts constant values as well as expressions)\",\n)\n\ngroup.add_argument(\n \"count\",\n type=int,\n nargs=\"?\",\n default=100,\n help=\"Number of characters to print from the sequence (default: print the entire sequence)\",\n)\n\n\[email protected](parser, command_name=\"cyclic\")\ndef cyclic_cmd(alphabet, length, lookup, count=100) -> None:\n if length:\n # Convert from gdb.Value\n length = int(length)\n else:\n length = pwndbg.gdblib.arch.ptrsize\n\n if lookup:\n lookup = pwndbg.commands.fix(lookup, sloppy=True)\n\n if isinstance(lookup, (gdb.Value, int)):\n lookup = int(lookup).to_bytes(length, pwndbg.gdblib.arch.endian)\n elif isinstance(lookup, str):\n lookup = bytes(lookup, \"utf-8\")\n\n if len(lookup) != length:\n print(\n message.error(\n f\"Lookup pattern must be {length} bytes (use `-n <length>` to lookup pattern of different length)\"\n )\n )\n return\n\n hexstr = \"0x\" + lookup.hex()\n print(\n message.notice(\n f\"Finding cyclic pattern of {length} bytes: {str(lookup)} (hex: {hexstr})\"\n )\n )\n\n if any(c not in alphabet for c in lookup):\n print(message.error(\"Pattern contains characters not present in the alphabet\"))\n return\n\n offset = cyclic_find(lookup, alphabet, length)\n\n if offset == -1:\n print(message.error(\"Given lookup pattern does not exist in the sequence\"))\n else:\n print(message.success(f\"Found at offset {offset}\"))\n else:\n sequence = cyclic(int(count), alphabet, length)\n print(sequence.decode())\n", "path": "pwndbg/commands/cyclic.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport argparse\nimport string\n\nimport gdb\nfrom pwnlib.util.cyclic import cyclic\nfrom pwnlib.util.cyclic import cyclic_find\n\nimport pwndbg.commands\nimport pwndbg.gdblib.arch\nfrom pwndbg.color import message\n\nparser = argparse.ArgumentParser(description=\"Cyclic pattern creator/finder.\")\n\nparser.add_argument(\n \"-a\",\n \"--alphabet\",\n metavar=\"charset\",\n default=string.ascii_lowercase,\n type=str.encode,\n help=\"The alphabet to use in the cyclic pattern\",\n)\n\nparser.add_argument(\n \"-n\",\n \"--length\",\n metavar=\"length\",\n type=int,\n help=\"Size of the unique subsequences (defaults to the pointer size for the current arch)\",\n)\n\n\ngroup = parser.add_mutually_exclusive_group(required=False)\ngroup.add_argument(\n \"-l\",\n \"-o\",\n \"--offset\",\n \"--lookup\",\n dest=\"lookup\",\n metavar=\"lookup_value\",\n type=str,\n help=\"Do a lookup instead of printing the sequence (accepts constant values as well as expressions)\",\n)\n\ngroup.add_argument(\n \"count\",\n type=int,\n nargs=\"?\",\n default=100,\n help=\"Number of characters to print from the sequence (default: print the entire sequence)\",\n)\n\nparser.add_argument(\n \"filename\",\n type=str,\n help=\"Name (path) of the file to save the cyclic pattern to\",\n default=\"\",\n nargs=\"?\",\n)\n\n\[email protected](parser, command_name=\"cyclic\")\ndef cyclic_cmd(alphabet, length, lookup, count=100, filename=\"\") -> None:\n if length:\n # Convert from gdb.Value\n length = int(length)\n else:\n length = pwndbg.gdblib.arch.ptrsize\n\n if lookup:\n lookup = pwndbg.commands.fix(lookup, sloppy=True)\n\n if isinstance(lookup, (gdb.Value, int)):\n lookup = int(lookup).to_bytes(length, pwndbg.gdblib.arch.endian)\n elif isinstance(lookup, str):\n lookup = bytes(lookup, \"utf-8\")\n\n if len(lookup) != length:\n print(\n message.error(\n f\"Lookup pattern must be {length} bytes (use `-n <length>` to lookup pattern of different length)\"\n )\n )\n return\n\n hexstr = \"0x\" + lookup.hex()\n print(\n message.notice(\n f\"Finding cyclic pattern of {length} bytes: {str(lookup)} (hex: {hexstr})\"\n )\n )\n\n if any(c not in alphabet for c in lookup):\n print(message.error(\"Pattern contains characters not present in the alphabet\"))\n return\n\n offset = cyclic_find(lookup, alphabet, length)\n\n if offset == -1:\n print(message.error(\"Given lookup pattern does not exist in the sequence\"))\n else:\n print(message.success(f\"Found at offset {offset}\"))\n else:\n count = int(count)\n sequence = cyclic(count, alphabet, length)\n\n if not filename:\n print(sequence.decode())\n else:\n with open(filename, \"wb\") as f:\n f.write(sequence)\n print(f\"Written a cyclic sequence of length {count} to file {filename}\")\n", "path": "pwndbg/commands/cyclic.py"}]}
1,190
378
gh_patches_debug_22972
rasdani/github-patches
git_diff
mars-project__mars-3021
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [BUG] Duplicate operands execution <!-- Thank you for your contribution! Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue. --> **Describe the bug** The case is `mars.core.entity.tests.test_utils.test_recursive_tile`. It generates a chunk graph with duplicate operands (the op key are same, the execution output are same). I added some code to check the operands in stage.py: ``` python class TaskStageProcessor: def __init__( self, stage_id: str, task: Task, chunk_graph: ChunkGraph, subtask_graph: SubtaskGraph, bands: List[BandType], tile_context: Dict[TileableType, TileableType], scheduling_api: SchedulingAPI, meta_api: MetaAPI, ): self.stage_id = stage_id self.task = task self.chunk_graph = chunk_graph self.subtask_graph = subtask_graph self._bands = bands self._tile_context = tile_context # Check for duplicate operand. from collections import Counter unique_keys = Counter(c.key for c in chunk_graph) if len(unique_keys) < len(chunk_graph): import pprint pprint.pprint([c for c in chunk_graph]) pprint.pprint([k for k, v in unique_keys.items() if v > 1]) subtask_graph.view() ``` The output is: ```python [Chunk <op=TensorRand, key=560bdcf1ae4978a999cbe3fd620c65d7_0>, Chunk <op=TensorRand, key=5aed4a149f5084ce95b9212afb61f24d_0>, Chunk <op=TensorRand, key=6fa461793a5d0feef9905c945653653f_0>, Chunk <op=TensorRand, key=688bd5f2d8fe39ce3e1a8af91b53b65c_0>, Chunk <op=TensorSlice, key=a9cd6ffd308ed11c3fccd2753f9a4aba_0>, Chunk <op=PSRSShuffle, stage=reduce, key=94c1f8fa627bcd5960ad795f7c692671_0>, Chunk <op=TensorShuffleProxy, key=6e7794b968faabcbebdfb289789c1ab9_0>, Chunk <op=PSRSShuffle, stage=map, key=0ebf73717ef2479456a6351c6209a90e_0>, Chunk <op=PSRSShuffle, stage=map, key=eb053dc8499bc7cdc12d8382f199e096_0>, Chunk <op=PSRSSortRegularSample, key=125e3a7f78bcb6c939668f03a3b94787_0>, Chunk <op=PSRSConcatPivot, key=58ce89bb01132fd0fd757246ef654c40_0>, Chunk <op=PSRSSortRegularSample, key=98e601719051e980b4d9f3f20b20e437_1>, Chunk <op=PSRSSortRegularSample, key=125e3a7f78bcb6c939668f03a3b94787_1>, Chunk <op=PSRSSortRegularSample, key=98e601719051e980b4d9f3f20b20e437_0>, Chunk <op=TensorSlice, key=bdbeadf9202694d9c8f86b9fc8fc2db2_0>, Chunk <op=PSRSShuffle, stage=reduce, key=22974984dc0b50cff4d077e0413202d2_0>, Chunk <op=PSRSShuffle, stage=reduce, key=6c78a27cd13360463638ae139958297e_0>, Chunk <op=TensorShuffleProxy, key=fd341cca1ae125e359e9a2274928605f_0>, Chunk <op=PSRSShuffle, stage=map, key=7d9f9ea924cf86e2aba771257f46db95_0>, Chunk <op=PSRSShuffle, stage=map, key=45359c01dfab34dd6d552687e3e9cf86_0>, Chunk <op=PSRSSortRegularSample, key=cac04cc478ecb467c0f1a7bca4bf809c_0>, Chunk <op=PSRSConcatPivot, key=ee1f386a09653687d7f3802aaf6d8cd3_0>, Chunk <op=PSRSSortRegularSample, key=321f2eda3a690282eb2d75ea0aa2614e_1>, Chunk <op=PSRSSortRegularSample, key=cac04cc478ecb467c0f1a7bca4bf809c_1>, Chunk <op=PSRSSortRegularSample, key=321f2eda3a690282eb2d75ea0aa2614e_0>, Chunk <op=PSRSShuffle, stage=reduce, key=02d268f3091f3c95271acc314e0ef0fd_0>, Chunk <op=PSRSShuffle, stage=reduce, key=94c1f8fa627bcd5960ad795f7c692671_0>, Chunk <op=TensorShuffleProxy, key=6e7794b968faabcbebdfb289789c1ab9_0>, Chunk <op=PSRSShuffle, stage=map, key=0ebf73717ef2479456a6351c6209a90e_0>, Chunk <op=PSRSShuffle, stage=map, key=eb053dc8499bc7cdc12d8382f199e096_0>, Chunk <op=PSRSSortRegularSample, key=125e3a7f78bcb6c939668f03a3b94787_0>, Chunk <op=PSRSConcatPivot, key=58ce89bb01132fd0fd757246ef654c40_0>, Chunk <op=PSRSSortRegularSample, key=98e601719051e980b4d9f3f20b20e437_1>, Chunk <op=PSRSSortRegularSample, key=125e3a7f78bcb6c939668f03a3b94787_1>, Chunk <op=PSRSSortRegularSample, key=98e601719051e980b4d9f3f20b20e437_0>, Chunk <op=PSRSShuffle, stage=reduce, key=22974984dc0b50cff4d077e0413202d2_0>] ['94c1f8fa627bcd5960ad795f7c692671_0', '6e7794b968faabcbebdfb289789c1ab9_0', '0ebf73717ef2479456a6351c6209a90e_0', 'eb053dc8499bc7cdc12d8382f199e096_0', '125e3a7f78bcb6c939668f03a3b94787_0', '58ce89bb01132fd0fd757246ef654c40_0', '98e601719051e980b4d9f3f20b20e437_1', '125e3a7f78bcb6c939668f03a3b94787_1', '98e601719051e980b4d9f3f20b20e437_0', '22974984dc0b50cff4d077e0413202d2_0'] ``` Dump graph: [default.pdf](https://github.com/mars-project/mars/files/8645133/default.pdf) I added check output code to the `PSRSShuffle._execute_map`: ```python for i in range(op.n_partition): key = (out.key, (i,)) value = tuple(reduce_outputs[i].ravel()) if key in ctx: existing_value = ctx[key] np.testing.assert_array_equal(existing_value, value) raise Exception(f"The key already exists: {key}") ctx[out.key, (i,)] = tuple(reduce_outputs[i].ravel()) ``` It raises an exception and the `assert_array_equal` passes. ```python for i in range(op.n_partition): key = (out.key, (i,)) value = tuple(reduce_outputs[i].ravel()) if key in ctx: existing_value = ctx[key] np.testing.assert_array_equal(existing_value, value) > raise Exception(f"The key already exists: {key}") E Exception: The key already exists: ('eb053dc8499bc7cdc12d8382f199e096_0', (0,)) mars/tensor/base/psrs.py:753: Exception ``` **To Reproduce** To help us reproducing this bug, please provide information below: 1. Your Python version 3.7.7 2. The version of Mars you use Latest master 3. Versions of crucial packages, such as numpy, scipy and pandas 4. Full stack of the error. 5. Minimized code to reproduce the error. **Expected behavior** A clear and concise description of what you expected to happen. **Additional context** Add any other context about the problem here. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mars/core/entity/utils.py` Content: ``` 1 # Copyright 1999-2021 Alibaba Group Holding Ltd. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 from typing import List, Union, Generator 16 17 from ...typing import TileableType, ChunkType 18 from ...utils import has_unknown_shape, calc_nsplits 19 20 21 def refresh_tileable_shape(tileable): 22 if has_unknown_shape(tileable): 23 # update shape 24 nsplits = calc_nsplits({c.index: c.shape for c in tileable.chunks}) 25 shape = tuple(sum(ns) for ns in nsplits) 26 tileable._nsplits = nsplits 27 tileable._shape = shape 28 29 30 def tile(tileable, *tileables: TileableType): 31 from ..graph import ( 32 TileableGraph, 33 TileableGraphBuilder, 34 ChunkGraphBuilder, 35 TileContext, 36 ) 37 38 raw_tileables = target_tileables = [tileable] + list(tileables) 39 target_tileables = [t.data if hasattr(t, "data") else t for t in target_tileables] 40 41 tileable_graph = TileableGraph(target_tileables) 42 tileable_graph_builder = TileableGraphBuilder(tileable_graph) 43 next(tileable_graph_builder.build()) 44 45 # tile 46 tile_context = TileContext() 47 chunk_graph_builder = ChunkGraphBuilder( 48 tileable_graph, fuse_enabled=False, tile_context=tile_context 49 ) 50 next(chunk_graph_builder.build()) 51 52 if len(tileables) == 0: 53 return type(tileable)(tile_context[target_tileables[0]]) 54 else: 55 return [ 56 type(raw_t)(tile_context[t]) 57 for raw_t, t in zip(raw_tileables, target_tileables) 58 ] 59 60 61 def recursive_tile( 62 tileable: TileableType, *tileables: TileableType 63 ) -> Generator[ 64 List[ChunkType], List[ChunkType], Union[TileableType, List[TileableType]] 65 ]: 66 from .tileables import handler 67 68 return_list = len(tileables) > 0 69 if not return_list and isinstance(tileable, (list, tuple)): 70 return_list = True 71 raw = tileable 72 tileable = raw[0] 73 tileables = raw[1:] 74 75 to_tile = [tileable] + list(tileables) 76 q = [t for t in to_tile if t.is_coarse()] 77 while q: 78 t = q[-1] 79 cs = [c for c in t.inputs if c.is_coarse()] 80 if cs: 81 q.extend(cs) 82 continue 83 for obj in handler.tile(t.op.outputs): 84 to_update_inputs = [] 85 chunks = [] 86 for inp in t.op.inputs: 87 chunks.extend(inp.chunks) 88 if has_unknown_shape(inp): 89 to_update_inputs.append(inp) 90 if obj is None: 91 yield chunks + to_update_inputs 92 else: 93 yield obj + to_update_inputs 94 q.pop() 95 96 if not return_list: 97 return tileable 98 else: 99 return [tileable] + list(tileables) 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mars/core/entity/utils.py b/mars/core/entity/utils.py --- a/mars/core/entity/utils.py +++ b/mars/core/entity/utils.py @@ -76,21 +76,24 @@ q = [t for t in to_tile if t.is_coarse()] while q: t = q[-1] - cs = [c for c in t.inputs if c.is_coarse()] - if cs: - q.extend(cs) - continue - for obj in handler.tile(t.op.outputs): - to_update_inputs = [] - chunks = [] - for inp in t.op.inputs: - chunks.extend(inp.chunks) - if has_unknown_shape(inp): - to_update_inputs.append(inp) - if obj is None: - yield chunks + to_update_inputs - else: - yield obj + to_update_inputs + if t.is_coarse(): + # t may be put into q repeatedly, + # so we check if it's tiled or not + cs = [c for c in t.inputs if c.is_coarse()] + if cs: + q.extend(cs) + continue + for obj in handler.tile(t.op.outputs): + to_update_inputs = [] + chunks = [] + for inp in t.op.inputs: + chunks.extend(inp.chunks) + if has_unknown_shape(inp): + to_update_inputs.append(inp) + if obj is None: + yield chunks + to_update_inputs + else: + yield obj + to_update_inputs q.pop() if not return_list:
{"golden_diff": "diff --git a/mars/core/entity/utils.py b/mars/core/entity/utils.py\n--- a/mars/core/entity/utils.py\n+++ b/mars/core/entity/utils.py\n@@ -76,21 +76,24 @@\n q = [t for t in to_tile if t.is_coarse()]\n while q:\n t = q[-1]\n- cs = [c for c in t.inputs if c.is_coarse()]\n- if cs:\n- q.extend(cs)\n- continue\n- for obj in handler.tile(t.op.outputs):\n- to_update_inputs = []\n- chunks = []\n- for inp in t.op.inputs:\n- chunks.extend(inp.chunks)\n- if has_unknown_shape(inp):\n- to_update_inputs.append(inp)\n- if obj is None:\n- yield chunks + to_update_inputs\n- else:\n- yield obj + to_update_inputs\n+ if t.is_coarse():\n+ # t may be put into q repeatedly,\n+ # so we check if it's tiled or not\n+ cs = [c for c in t.inputs if c.is_coarse()]\n+ if cs:\n+ q.extend(cs)\n+ continue\n+ for obj in handler.tile(t.op.outputs):\n+ to_update_inputs = []\n+ chunks = []\n+ for inp in t.op.inputs:\n+ chunks.extend(inp.chunks)\n+ if has_unknown_shape(inp):\n+ to_update_inputs.append(inp)\n+ if obj is None:\n+ yield chunks + to_update_inputs\n+ else:\n+ yield obj + to_update_inputs\n q.pop()\n \n if not return_list:\n", "issue": "[BUG] Duplicate operands execution\n<!--\r\nThank you for your contribution!\r\n\r\nPlease review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.\r\n-->\r\n\r\n**Describe the bug**\r\n\r\nThe case is `mars.core.entity.tests.test_utils.test_recursive_tile`. It generates a chunk graph with duplicate operands (the op key are same, the execution output are same).\r\n\r\nI added some code to check the operands in stage.py:\r\n\r\n``` python\r\nclass TaskStageProcessor:\r\n def __init__(\r\n self,\r\n stage_id: str,\r\n task: Task,\r\n chunk_graph: ChunkGraph,\r\n subtask_graph: SubtaskGraph,\r\n bands: List[BandType],\r\n tile_context: Dict[TileableType, TileableType],\r\n scheduling_api: SchedulingAPI,\r\n meta_api: MetaAPI,\r\n ):\r\n self.stage_id = stage_id\r\n self.task = task\r\n self.chunk_graph = chunk_graph\r\n self.subtask_graph = subtask_graph\r\n self._bands = bands\r\n self._tile_context = tile_context\r\n\r\n # Check for duplicate operand.\r\n from collections import Counter\r\n unique_keys = Counter(c.key for c in chunk_graph)\r\n if len(unique_keys) < len(chunk_graph):\r\n import pprint\r\n pprint.pprint([c for c in chunk_graph])\r\n pprint.pprint([k for k, v in unique_keys.items() if v > 1])\r\n subtask_graph.view()\r\n```\r\n\r\nThe output is:\r\n\r\n```python\r\n[Chunk <op=TensorRand, key=560bdcf1ae4978a999cbe3fd620c65d7_0>,\r\n Chunk <op=TensorRand, key=5aed4a149f5084ce95b9212afb61f24d_0>,\r\n Chunk <op=TensorRand, key=6fa461793a5d0feef9905c945653653f_0>,\r\n Chunk <op=TensorRand, key=688bd5f2d8fe39ce3e1a8af91b53b65c_0>,\r\n Chunk <op=TensorSlice, key=a9cd6ffd308ed11c3fccd2753f9a4aba_0>,\r\n Chunk <op=PSRSShuffle, stage=reduce, key=94c1f8fa627bcd5960ad795f7c692671_0>,\r\n Chunk <op=TensorShuffleProxy, key=6e7794b968faabcbebdfb289789c1ab9_0>,\r\n Chunk <op=PSRSShuffle, stage=map, key=0ebf73717ef2479456a6351c6209a90e_0>,\r\n Chunk <op=PSRSShuffle, stage=map, key=eb053dc8499bc7cdc12d8382f199e096_0>,\r\n Chunk <op=PSRSSortRegularSample, key=125e3a7f78bcb6c939668f03a3b94787_0>,\r\n Chunk <op=PSRSConcatPivot, key=58ce89bb01132fd0fd757246ef654c40_0>,\r\n Chunk <op=PSRSSortRegularSample, key=98e601719051e980b4d9f3f20b20e437_1>,\r\n Chunk <op=PSRSSortRegularSample, key=125e3a7f78bcb6c939668f03a3b94787_1>,\r\n Chunk <op=PSRSSortRegularSample, key=98e601719051e980b4d9f3f20b20e437_0>,\r\n Chunk <op=TensorSlice, key=bdbeadf9202694d9c8f86b9fc8fc2db2_0>,\r\n Chunk <op=PSRSShuffle, stage=reduce, key=22974984dc0b50cff4d077e0413202d2_0>,\r\n Chunk <op=PSRSShuffle, stage=reduce, key=6c78a27cd13360463638ae139958297e_0>,\r\n Chunk <op=TensorShuffleProxy, key=fd341cca1ae125e359e9a2274928605f_0>,\r\n Chunk <op=PSRSShuffle, stage=map, key=7d9f9ea924cf86e2aba771257f46db95_0>,\r\n Chunk <op=PSRSShuffle, stage=map, key=45359c01dfab34dd6d552687e3e9cf86_0>,\r\n Chunk <op=PSRSSortRegularSample, key=cac04cc478ecb467c0f1a7bca4bf809c_0>,\r\n Chunk <op=PSRSConcatPivot, key=ee1f386a09653687d7f3802aaf6d8cd3_0>,\r\n Chunk <op=PSRSSortRegularSample, key=321f2eda3a690282eb2d75ea0aa2614e_1>,\r\n Chunk <op=PSRSSortRegularSample, key=cac04cc478ecb467c0f1a7bca4bf809c_1>,\r\n Chunk <op=PSRSSortRegularSample, key=321f2eda3a690282eb2d75ea0aa2614e_0>,\r\n Chunk <op=PSRSShuffle, stage=reduce, key=02d268f3091f3c95271acc314e0ef0fd_0>,\r\n Chunk <op=PSRSShuffle, stage=reduce, key=94c1f8fa627bcd5960ad795f7c692671_0>,\r\n Chunk <op=TensorShuffleProxy, key=6e7794b968faabcbebdfb289789c1ab9_0>,\r\n Chunk <op=PSRSShuffle, stage=map, key=0ebf73717ef2479456a6351c6209a90e_0>,\r\n Chunk <op=PSRSShuffle, stage=map, key=eb053dc8499bc7cdc12d8382f199e096_0>,\r\n Chunk <op=PSRSSortRegularSample, key=125e3a7f78bcb6c939668f03a3b94787_0>,\r\n Chunk <op=PSRSConcatPivot, key=58ce89bb01132fd0fd757246ef654c40_0>,\r\n Chunk <op=PSRSSortRegularSample, key=98e601719051e980b4d9f3f20b20e437_1>,\r\n Chunk <op=PSRSSortRegularSample, key=125e3a7f78bcb6c939668f03a3b94787_1>,\r\n Chunk <op=PSRSSortRegularSample, key=98e601719051e980b4d9f3f20b20e437_0>,\r\n Chunk <op=PSRSShuffle, stage=reduce, key=22974984dc0b50cff4d077e0413202d2_0>]\r\n['94c1f8fa627bcd5960ad795f7c692671_0',\r\n '6e7794b968faabcbebdfb289789c1ab9_0',\r\n '0ebf73717ef2479456a6351c6209a90e_0',\r\n 'eb053dc8499bc7cdc12d8382f199e096_0',\r\n '125e3a7f78bcb6c939668f03a3b94787_0',\r\n '58ce89bb01132fd0fd757246ef654c40_0',\r\n '98e601719051e980b4d9f3f20b20e437_1',\r\n '125e3a7f78bcb6c939668f03a3b94787_1',\r\n '98e601719051e980b4d9f3f20b20e437_0',\r\n '22974984dc0b50cff4d077e0413202d2_0']\r\n```\r\n\r\nDump graph: [default.pdf](https://github.com/mars-project/mars/files/8645133/default.pdf)\r\n\r\nI added check output code to the `PSRSShuffle._execute_map`:\r\n```python\r\nfor i in range(op.n_partition):\r\n key = (out.key, (i,))\r\n value = tuple(reduce_outputs[i].ravel())\r\n if key in ctx:\r\n existing_value = ctx[key]\r\n np.testing.assert_array_equal(existing_value, value)\r\n raise Exception(f\"The key already exists: {key}\")\r\n ctx[out.key, (i,)] = tuple(reduce_outputs[i].ravel())\r\n```\r\n\r\nIt raises an exception and the `assert_array_equal` passes.\r\n\r\n```python\r\n for i in range(op.n_partition):\r\n key = (out.key, (i,))\r\n value = tuple(reduce_outputs[i].ravel())\r\n if key in ctx:\r\n existing_value = ctx[key]\r\n np.testing.assert_array_equal(existing_value, value)\r\n> raise Exception(f\"The key already exists: {key}\")\r\nE Exception: The key already exists: ('eb053dc8499bc7cdc12d8382f199e096_0', (0,))\r\n\r\nmars/tensor/base/psrs.py:753: Exception\r\n```\r\n\r\n**To Reproduce**\r\nTo help us reproducing this bug, please provide information below:\r\n1. Your Python version 3.7.7\r\n2. The version of Mars you use Latest master\r\n3. Versions of crucial packages, such as numpy, scipy and pandas\r\n4. Full stack of the error.\r\n5. Minimized code to reproduce the error.\r\n\r\n**Expected behavior**\r\nA clear and concise description of what you expected to happen.\r\n\r\n**Additional context**\r\nAdd any other context about the problem here.\r\n\n", "before_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Union, Generator\n\nfrom ...typing import TileableType, ChunkType\nfrom ...utils import has_unknown_shape, calc_nsplits\n\n\ndef refresh_tileable_shape(tileable):\n if has_unknown_shape(tileable):\n # update shape\n nsplits = calc_nsplits({c.index: c.shape for c in tileable.chunks})\n shape = tuple(sum(ns) for ns in nsplits)\n tileable._nsplits = nsplits\n tileable._shape = shape\n\n\ndef tile(tileable, *tileables: TileableType):\n from ..graph import (\n TileableGraph,\n TileableGraphBuilder,\n ChunkGraphBuilder,\n TileContext,\n )\n\n raw_tileables = target_tileables = [tileable] + list(tileables)\n target_tileables = [t.data if hasattr(t, \"data\") else t for t in target_tileables]\n\n tileable_graph = TileableGraph(target_tileables)\n tileable_graph_builder = TileableGraphBuilder(tileable_graph)\n next(tileable_graph_builder.build())\n\n # tile\n tile_context = TileContext()\n chunk_graph_builder = ChunkGraphBuilder(\n tileable_graph, fuse_enabled=False, tile_context=tile_context\n )\n next(chunk_graph_builder.build())\n\n if len(tileables) == 0:\n return type(tileable)(tile_context[target_tileables[0]])\n else:\n return [\n type(raw_t)(tile_context[t])\n for raw_t, t in zip(raw_tileables, target_tileables)\n ]\n\n\ndef recursive_tile(\n tileable: TileableType, *tileables: TileableType\n) -> Generator[\n List[ChunkType], List[ChunkType], Union[TileableType, List[TileableType]]\n]:\n from .tileables import handler\n\n return_list = len(tileables) > 0\n if not return_list and isinstance(tileable, (list, tuple)):\n return_list = True\n raw = tileable\n tileable = raw[0]\n tileables = raw[1:]\n\n to_tile = [tileable] + list(tileables)\n q = [t for t in to_tile if t.is_coarse()]\n while q:\n t = q[-1]\n cs = [c for c in t.inputs if c.is_coarse()]\n if cs:\n q.extend(cs)\n continue\n for obj in handler.tile(t.op.outputs):\n to_update_inputs = []\n chunks = []\n for inp in t.op.inputs:\n chunks.extend(inp.chunks)\n if has_unknown_shape(inp):\n to_update_inputs.append(inp)\n if obj is None:\n yield chunks + to_update_inputs\n else:\n yield obj + to_update_inputs\n q.pop()\n\n if not return_list:\n return tileable\n else:\n return [tileable] + list(tileables)\n", "path": "mars/core/entity/utils.py"}], "after_files": [{"content": "# Copyright 1999-2021 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import List, Union, Generator\n\nfrom ...typing import TileableType, ChunkType\nfrom ...utils import has_unknown_shape, calc_nsplits\n\n\ndef refresh_tileable_shape(tileable):\n if has_unknown_shape(tileable):\n # update shape\n nsplits = calc_nsplits({c.index: c.shape for c in tileable.chunks})\n shape = tuple(sum(ns) for ns in nsplits)\n tileable._nsplits = nsplits\n tileable._shape = shape\n\n\ndef tile(tileable, *tileables: TileableType):\n from ..graph import (\n TileableGraph,\n TileableGraphBuilder,\n ChunkGraphBuilder,\n TileContext,\n )\n\n raw_tileables = target_tileables = [tileable] + list(tileables)\n target_tileables = [t.data if hasattr(t, \"data\") else t for t in target_tileables]\n\n tileable_graph = TileableGraph(target_tileables)\n tileable_graph_builder = TileableGraphBuilder(tileable_graph)\n next(tileable_graph_builder.build())\n\n # tile\n tile_context = TileContext()\n chunk_graph_builder = ChunkGraphBuilder(\n tileable_graph, fuse_enabled=False, tile_context=tile_context\n )\n next(chunk_graph_builder.build())\n\n if len(tileables) == 0:\n return type(tileable)(tile_context[target_tileables[0]])\n else:\n return [\n type(raw_t)(tile_context[t])\n for raw_t, t in zip(raw_tileables, target_tileables)\n ]\n\n\ndef recursive_tile(\n tileable: TileableType, *tileables: TileableType\n) -> Generator[\n List[ChunkType], List[ChunkType], Union[TileableType, List[TileableType]]\n]:\n from .tileables import handler\n\n return_list = len(tileables) > 0\n if not return_list and isinstance(tileable, (list, tuple)):\n return_list = True\n raw = tileable\n tileable = raw[0]\n tileables = raw[1:]\n\n to_tile = [tileable] + list(tileables)\n q = [t for t in to_tile if t.is_coarse()]\n while q:\n t = q[-1]\n if t.is_coarse():\n # t may be put into q repeatedly,\n # so we check if it's tiled or not\n cs = [c for c in t.inputs if c.is_coarse()]\n if cs:\n q.extend(cs)\n continue\n for obj in handler.tile(t.op.outputs):\n to_update_inputs = []\n chunks = []\n for inp in t.op.inputs:\n chunks.extend(inp.chunks)\n if has_unknown_shape(inp):\n to_update_inputs.append(inp)\n if obj is None:\n yield chunks + to_update_inputs\n else:\n yield obj + to_update_inputs\n q.pop()\n\n if not return_list:\n return tileable\n else:\n return [tileable] + list(tileables)\n", "path": "mars/core/entity/utils.py"}]}
3,860
354
gh_patches_debug_40208
rasdani/github-patches
git_diff
rlworkgroup__garage-1583
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- RaySampler does not use all workers In release 2019.10, [RaySampler](https://github.com/rlworkgroup/garage/blob/release-2019.10/src/garage/sampler/ray_sampler.py) sees only one of the workers as idle for all iterations after the first iteration. This can be seen with [examples/tf/trpo_swimmer_ray_sampler.py](https://github.com/rlworkgroup/garage/blob/release-2019.10/examples/tf/trpo_swimmer_ray_sampler.py). To me it looks like `self._idle_worker_ids` is not correctly updated at the end of `obtain_samples()` [here](https://github.com/rlworkgroup/garage/blob/d6c993526c23c289ebc94635a78186d9bf197f32/src/garage/sampler/ray_sampler.py#L130). Only those workers which have returned with a result are put back in the idle pool. This is a problem because results from some workers are not processed at the end and these workers don't go back into the idle pool. A quick fix (?) is to reset the list of idle workers ids to all workers ids at the start of `obtain samples()`. This does result in all the workers being used for all iterations. Since the agent parameters are updated in the next iteration, the samples from the previously not ready workers must be valid. However, I am not totally sure. @avnishn @ryanjulian Appreciate it if the fix can be back-ported to 2019.10! --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/garage/sampler/ray_sampler.py` Content: ``` 1 """This is an implementation of an on policy batch sampler. 2 3 Uses a data parallel design. 4 Included is a sampler that deploys sampler workers. 5 6 The sampler workers must implement some type of set agent parameters 7 function, and a rollout function 8 """ 9 from collections import defaultdict 10 import pickle 11 12 import numpy as np 13 import psutil 14 import ray 15 16 from garage.experiment import deterministic 17 from garage.misc.prog_bar_counter import ProgBarCounter 18 from garage.sampler.base import BaseSampler 19 20 21 class RaySampler(BaseSampler): 22 """Collects Policy Rollouts in a data parallel fashion. 23 24 Args: 25 algo (garage.np.algo.RLAlgorithm): A garage algo object 26 env (gym.Env): A gym/akro env object 27 seed (int): Random seed. 28 should_render (bool): If the sampler render the trajectories. 29 num_processors (int): Number of processors to be used. 30 sampler_worker_cls (garage.sampler.ray_sampler.SamplerWorker): 31 If none, uses the default SamplerWorker class 32 33 """ 34 35 def __init__(self, 36 algo, 37 env, 38 seed, 39 should_render=False, 40 num_processors=None, 41 sampler_worker_cls=None): 42 super().__init__(algo, env) 43 self._sampler_worker = ray.remote(SamplerWorker if sampler_worker_cls 44 is None else sampler_worker_cls) 45 self._seed = seed 46 deterministic.set_seed(self._seed) 47 self._max_path_length = self.algo.max_path_length 48 self._should_render = should_render 49 if not ray.is_initialized(): 50 ray.init(log_to_driver=False) 51 self._num_workers = (num_processors if num_processors else 52 psutil.cpu_count(logical=False)) 53 self._all_workers = defaultdict(None) 54 self._idle_worker_ids = list(range(self._num_workers)) 55 self._active_worker_ids = [] 56 57 def start_worker(self): 58 """Initialize a new ray worker.""" 59 # Pickle the environment once, instead of once per worker. 60 env_pkl = pickle.dumps(self.env) 61 # We need to pickle the agent so that we can e.g. set up the TF Session 62 # in the worker *before* unpicling it. 63 agent_pkl = pickle.dumps(self.algo.policy) 64 for worker_id in range(self._num_workers): 65 self._all_workers[worker_id] = self._sampler_worker.remote( 66 worker_id, env_pkl, agent_pkl, self._seed, 67 self._max_path_length, self._should_render) 68 69 # pylint: disable=arguments-differ 70 def obtain_samples(self, itr, num_samples): 71 """Sample the policy for new trajectories. 72 73 Args: 74 itr (int): Iteration number. 75 num_samples (int): Number of steps the the sampler should collect. 76 77 Returns: 78 list[dict]: Sample paths, each path with key 79 * observations: (numpy.ndarray) 80 * actions: (numpy.ndarray) 81 * rewards: (numpy.ndarray) 82 * agent_infos: (dict) 83 * env_infos: (dict) 84 85 """ 86 _active_workers = [] 87 self._active_worker_ids = [] 88 pbar = ProgBarCounter(num_samples) 89 completed_samples = 0 90 traj = [] 91 updating_workers = [] 92 93 # update the policy params of each worker before sampling 94 # for the current iteration 95 curr_policy_params = self.algo.policy.get_param_values() 96 params_id = ray.put(curr_policy_params) 97 while self._idle_worker_ids: 98 worker_id = self._idle_worker_ids.pop() 99 worker = self._all_workers[worker_id] 100 updating_workers.append(worker.set_agent.remote(params_id)) 101 102 while completed_samples < num_samples: 103 # if there are workers still being updated, check 104 # which ones are still updating and take the workers that 105 # are done updating, and start collecting trajectories on 106 # those workers. 107 if updating_workers: 108 updated, updating_workers = ray.wait(updating_workers, 109 num_returns=1, 110 timeout=0.1) 111 upd = [ray.get(up) for up in updated] 112 self._idle_worker_ids.extend(upd) 113 114 # if there are idle workers, use them to collect trajectories 115 # mark the newly busy workers as active 116 while self._idle_worker_ids: 117 idle_worker_id = self._idle_worker_ids.pop() 118 self._active_worker_ids.append(idle_worker_id) 119 worker = self._all_workers[idle_worker_id] 120 _active_workers.append(worker.rollout.remote()) 121 122 # check which workers are done/not done collecting a sample 123 # if any are done, send them to process the collected trajectory 124 # if they are not, keep checking if they are done 125 ready, not_ready = ray.wait(_active_workers, 126 num_returns=1, 127 timeout=0.001) 128 _active_workers = not_ready 129 for result in ready: 130 trajectory, num_returned_samples = self._process_trajectory( 131 result) 132 completed_samples += num_returned_samples 133 pbar.inc(num_returned_samples) 134 traj.append(trajectory) 135 pbar.stop() 136 return traj 137 138 def shutdown_worker(self): 139 """Shuts down the worker.""" 140 ray.shutdown() 141 142 def _process_trajectory(self, result): 143 """Collect trajectory from ray object store. 144 145 Converts that trajectory to garage friendly format. 146 147 Args: 148 result (obj): Ray object id of ready to be collected trajectory. 149 150 Returns: 151 dict: One trajectory, with keys 152 * observations: (numpy.ndarray) 153 * actions: (numpy.ndarray) 154 * rewards: (numpy.ndarray) 155 * agent_infos: (dict) 156 * env_infos: (dict) 157 int: Number of returned samples in the trajectory 158 159 """ 160 trajectory = ray.get(result) 161 ready_worker_id = trajectory[0] 162 self._active_worker_ids.remove(ready_worker_id) 163 self._idle_worker_ids.append(ready_worker_id) 164 trajectory = dict(observations=np.asarray(trajectory[1]), 165 actions=np.asarray(trajectory[2]), 166 rewards=np.asarray(trajectory[3]), 167 agent_infos=trajectory[4], 168 env_infos=trajectory[5]) 169 num_returned_samples = len(trajectory['observations']) 170 return trajectory, num_returned_samples 171 172 173 class SamplerWorker: 174 """Constructs a single sampler worker. 175 176 The worker can have its parameters updated, and sampler its policy for 177 trajectories or rollouts. 178 179 Args: 180 worker_id (int): the id of the sampler_worker 181 env_pkl (bytes): A pickled gym or akro env object 182 agent_pkl (bytes): A pickled agent 183 seed (int): Random seed. 184 max_path_length (int): max trajectory length 185 should_render (bool): if true, renders trajectories after 186 sampling them 187 188 """ 189 190 def __init__(self, 191 worker_id, 192 env_pkl, 193 agent_pkl, 194 seed, 195 max_path_length, 196 should_render=False): 197 self.worker_id = worker_id 198 self._env = pickle.loads(env_pkl) 199 self.agent = pickle.loads(agent_pkl) 200 self._seed = seed + self.worker_id 201 deterministic.set_seed(self._seed) 202 self._max_path_length = max_path_length 203 self._should_render = should_render 204 self.agent_updates = 0 205 206 def set_agent(self, flattened_params): 207 """Set the agent params. 208 209 Args: 210 flattened_params (list[np.ndarray]): model parameters 211 212 Returns: 213 int: Worker id of this sampler worker. 214 215 """ 216 self.agent.set_param_values(flattened_params) 217 self.agent_updates += 1 218 return self.worker_id 219 220 def rollout(self): 221 """Sample a single rollout from the agent/policy. 222 223 The following value for the following keys will be a 2D array, 224 with the first dimension corresponding to the time dimension. 225 226 - observations 227 - actions 228 - rewards 229 - next_observations 230 - terminals 231 The next two elements will be lists of dictionaries, with 232 the index into the list being the index into the time 233 - agent_infos 234 - env_infos 235 236 Returns: 237 int: ID of this work 238 numpy.ndarray: observations 239 numpy.ndarray: actions 240 numpy.ndarray: rewards 241 dict[list]: agent info 242 dict[list]: environment info 243 244 """ 245 observations = [] 246 actions = [] 247 rewards = [] 248 agent_infos = defaultdict(list) 249 env_infos = defaultdict(list) 250 o = self._env.reset() 251 self.agent.reset() 252 next_o = None 253 path_length = 0 254 while path_length < self._max_path_length: 255 a, agent_info = self.agent.get_action(o) 256 next_o, r, d, env_info = self._env.step(a) 257 observations.append(o) 258 rewards.append(r) 259 actions.append(a) 260 for k, v in agent_info.items(): 261 agent_infos[k].append(v) 262 for k, v in env_info.items(): 263 env_infos[k].append(v) 264 path_length += 1 265 if d: 266 break 267 o = next_o 268 for k, v in agent_infos.items(): 269 agent_infos[k] = np.asarray(v) 270 for k, v in env_infos.items(): 271 env_infos[k] = np.asarray(v) 272 return self.worker_id,\ 273 np.array(observations),\ 274 np.array(actions),\ 275 np.array(rewards),\ 276 dict(agent_infos),\ 277 dict(env_infos) 278 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/garage/sampler/ray_sampler.py b/src/garage/sampler/ray_sampler.py --- a/src/garage/sampler/ray_sampler.py +++ b/src/garage/sampler/ray_sampler.py @@ -89,7 +89,7 @@ completed_samples = 0 traj = [] updating_workers = [] - + samples_to_be_collected = 0 # update the policy params of each worker before sampling # for the current iteration curr_policy_params = self.algo.policy.get_param_values() @@ -98,7 +98,6 @@ worker_id = self._idle_worker_ids.pop() worker = self._all_workers[worker_id] updating_workers.append(worker.set_agent.remote(params_id)) - while completed_samples < num_samples: # if there are workers still being updated, check # which ones are still updating and take the workers that @@ -113,12 +112,20 @@ # if there are idle workers, use them to collect trajectories # mark the newly busy workers as active - while self._idle_worker_ids: + workers_to_use = int( + np.clip( + np.ceil( + (num_samples - completed_samples - + samples_to_be_collected) / self._max_path_length) - + len(self._active_worker_ids), 0, len(self._all_workers))) + workers_started = 0 + while self._idle_worker_ids and workers_started < workers_to_use: idle_worker_id = self._idle_worker_ids.pop() + workers_started += 1 self._active_worker_ids.append(idle_worker_id) + samples_to_be_collected += self._max_path_length worker = self._all_workers[idle_worker_id] _active_workers.append(worker.rollout.remote()) - # check which workers are done/not done collecting a sample # if any are done, send them to process the collected trajectory # if they are not, keep checking if they are done @@ -129,9 +136,12 @@ for result in ready: trajectory, num_returned_samples = self._process_trajectory( result) + samples_to_be_collected -= self._max_path_length completed_samples += num_returned_samples pbar.inc(num_returned_samples) traj.append(trajectory) + self._idle_worker_ids = list(range(self._num_workers)) + self._active_worker_ids = [] pbar.stop() return traj
{"golden_diff": "diff --git a/src/garage/sampler/ray_sampler.py b/src/garage/sampler/ray_sampler.py\n--- a/src/garage/sampler/ray_sampler.py\n+++ b/src/garage/sampler/ray_sampler.py\n@@ -89,7 +89,7 @@\n completed_samples = 0\n traj = []\n updating_workers = []\n-\n+ samples_to_be_collected = 0\n # update the policy params of each worker before sampling\n # for the current iteration\n curr_policy_params = self.algo.policy.get_param_values()\n@@ -98,7 +98,6 @@\n worker_id = self._idle_worker_ids.pop()\n worker = self._all_workers[worker_id]\n updating_workers.append(worker.set_agent.remote(params_id))\n-\n while completed_samples < num_samples:\n # if there are workers still being updated, check\n # which ones are still updating and take the workers that\n@@ -113,12 +112,20 @@\n \n # if there are idle workers, use them to collect trajectories\n # mark the newly busy workers as active\n- while self._idle_worker_ids:\n+ workers_to_use = int(\n+ np.clip(\n+ np.ceil(\n+ (num_samples - completed_samples -\n+ samples_to_be_collected) / self._max_path_length) -\n+ len(self._active_worker_ids), 0, len(self._all_workers)))\n+ workers_started = 0\n+ while self._idle_worker_ids and workers_started < workers_to_use:\n idle_worker_id = self._idle_worker_ids.pop()\n+ workers_started += 1\n self._active_worker_ids.append(idle_worker_id)\n+ samples_to_be_collected += self._max_path_length\n worker = self._all_workers[idle_worker_id]\n _active_workers.append(worker.rollout.remote())\n-\n # check which workers are done/not done collecting a sample\n # if any are done, send them to process the collected trajectory\n # if they are not, keep checking if they are done\n@@ -129,9 +136,12 @@\n for result in ready:\n trajectory, num_returned_samples = self._process_trajectory(\n result)\n+ samples_to_be_collected -= self._max_path_length\n completed_samples += num_returned_samples\n pbar.inc(num_returned_samples)\n traj.append(trajectory)\n+ self._idle_worker_ids = list(range(self._num_workers))\n+ self._active_worker_ids = []\n pbar.stop()\n return traj\n", "issue": "RaySampler does not use all workers\nIn release 2019.10, [RaySampler](https://github.com/rlworkgroup/garage/blob/release-2019.10/src/garage/sampler/ray_sampler.py) sees only one of the workers as idle for all iterations after the first iteration. This can be seen with [examples/tf/trpo_swimmer_ray_sampler.py](https://github.com/rlworkgroup/garage/blob/release-2019.10/examples/tf/trpo_swimmer_ray_sampler.py). \r\n\r\nTo me it looks like `self._idle_worker_ids` is not correctly updated at the end of `obtain_samples()` [here](https://github.com/rlworkgroup/garage/blob/d6c993526c23c289ebc94635a78186d9bf197f32/src/garage/sampler/ray_sampler.py#L130). Only those workers which have returned with a result are put back in the idle pool. This is a problem because results from some workers are not processed at the end and these workers don't go back into the idle pool. \r\n\r\nA quick fix (?) is to reset the list of idle workers ids to all workers ids at the start of `obtain samples()`. This does result in all the workers being used for all iterations. Since the agent parameters are updated in the next iteration, the samples from the previously not ready workers must be valid. However, I am not totally sure. \r\n\r\n@avnishn @ryanjulian Appreciate it if the fix can be back-ported to 2019.10!\n", "before_files": [{"content": "\"\"\"This is an implementation of an on policy batch sampler.\n\nUses a data parallel design.\nIncluded is a sampler that deploys sampler workers.\n\nThe sampler workers must implement some type of set agent parameters\nfunction, and a rollout function\n\"\"\"\nfrom collections import defaultdict\nimport pickle\n\nimport numpy as np\nimport psutil\nimport ray\n\nfrom garage.experiment import deterministic\nfrom garage.misc.prog_bar_counter import ProgBarCounter\nfrom garage.sampler.base import BaseSampler\n\n\nclass RaySampler(BaseSampler):\n \"\"\"Collects Policy Rollouts in a data parallel fashion.\n\n Args:\n algo (garage.np.algo.RLAlgorithm): A garage algo object\n env (gym.Env): A gym/akro env object\n seed (int): Random seed.\n should_render (bool): If the sampler render the trajectories.\n num_processors (int): Number of processors to be used.\n sampler_worker_cls (garage.sampler.ray_sampler.SamplerWorker):\n If none, uses the default SamplerWorker class\n\n \"\"\"\n\n def __init__(self,\n algo,\n env,\n seed,\n should_render=False,\n num_processors=None,\n sampler_worker_cls=None):\n super().__init__(algo, env)\n self._sampler_worker = ray.remote(SamplerWorker if sampler_worker_cls\n is None else sampler_worker_cls)\n self._seed = seed\n deterministic.set_seed(self._seed)\n self._max_path_length = self.algo.max_path_length\n self._should_render = should_render\n if not ray.is_initialized():\n ray.init(log_to_driver=False)\n self._num_workers = (num_processors if num_processors else\n psutil.cpu_count(logical=False))\n self._all_workers = defaultdict(None)\n self._idle_worker_ids = list(range(self._num_workers))\n self._active_worker_ids = []\n\n def start_worker(self):\n \"\"\"Initialize a new ray worker.\"\"\"\n # Pickle the environment once, instead of once per worker.\n env_pkl = pickle.dumps(self.env)\n # We need to pickle the agent so that we can e.g. set up the TF Session\n # in the worker *before* unpicling it.\n agent_pkl = pickle.dumps(self.algo.policy)\n for worker_id in range(self._num_workers):\n self._all_workers[worker_id] = self._sampler_worker.remote(\n worker_id, env_pkl, agent_pkl, self._seed,\n self._max_path_length, self._should_render)\n\n # pylint: disable=arguments-differ\n def obtain_samples(self, itr, num_samples):\n \"\"\"Sample the policy for new trajectories.\n\n Args:\n itr (int): Iteration number.\n num_samples (int): Number of steps the the sampler should collect.\n\n Returns:\n list[dict]: Sample paths, each path with key\n * observations: (numpy.ndarray)\n * actions: (numpy.ndarray)\n * rewards: (numpy.ndarray)\n * agent_infos: (dict)\n * env_infos: (dict)\n\n \"\"\"\n _active_workers = []\n self._active_worker_ids = []\n pbar = ProgBarCounter(num_samples)\n completed_samples = 0\n traj = []\n updating_workers = []\n\n # update the policy params of each worker before sampling\n # for the current iteration\n curr_policy_params = self.algo.policy.get_param_values()\n params_id = ray.put(curr_policy_params)\n while self._idle_worker_ids:\n worker_id = self._idle_worker_ids.pop()\n worker = self._all_workers[worker_id]\n updating_workers.append(worker.set_agent.remote(params_id))\n\n while completed_samples < num_samples:\n # if there are workers still being updated, check\n # which ones are still updating and take the workers that\n # are done updating, and start collecting trajectories on\n # those workers.\n if updating_workers:\n updated, updating_workers = ray.wait(updating_workers,\n num_returns=1,\n timeout=0.1)\n upd = [ray.get(up) for up in updated]\n self._idle_worker_ids.extend(upd)\n\n # if there are idle workers, use them to collect trajectories\n # mark the newly busy workers as active\n while self._idle_worker_ids:\n idle_worker_id = self._idle_worker_ids.pop()\n self._active_worker_ids.append(idle_worker_id)\n worker = self._all_workers[idle_worker_id]\n _active_workers.append(worker.rollout.remote())\n\n # check which workers are done/not done collecting a sample\n # if any are done, send them to process the collected trajectory\n # if they are not, keep checking if they are done\n ready, not_ready = ray.wait(_active_workers,\n num_returns=1,\n timeout=0.001)\n _active_workers = not_ready\n for result in ready:\n trajectory, num_returned_samples = self._process_trajectory(\n result)\n completed_samples += num_returned_samples\n pbar.inc(num_returned_samples)\n traj.append(trajectory)\n pbar.stop()\n return traj\n\n def shutdown_worker(self):\n \"\"\"Shuts down the worker.\"\"\"\n ray.shutdown()\n\n def _process_trajectory(self, result):\n \"\"\"Collect trajectory from ray object store.\n\n Converts that trajectory to garage friendly format.\n\n Args:\n result (obj): Ray object id of ready to be collected trajectory.\n\n Returns:\n dict: One trajectory, with keys\n * observations: (numpy.ndarray)\n * actions: (numpy.ndarray)\n * rewards: (numpy.ndarray)\n * agent_infos: (dict)\n * env_infos: (dict)\n int: Number of returned samples in the trajectory\n\n \"\"\"\n trajectory = ray.get(result)\n ready_worker_id = trajectory[0]\n self._active_worker_ids.remove(ready_worker_id)\n self._idle_worker_ids.append(ready_worker_id)\n trajectory = dict(observations=np.asarray(trajectory[1]),\n actions=np.asarray(trajectory[2]),\n rewards=np.asarray(trajectory[3]),\n agent_infos=trajectory[4],\n env_infos=trajectory[5])\n num_returned_samples = len(trajectory['observations'])\n return trajectory, num_returned_samples\n\n\nclass SamplerWorker:\n \"\"\"Constructs a single sampler worker.\n\n The worker can have its parameters updated, and sampler its policy for\n trajectories or rollouts.\n\n Args:\n worker_id (int): the id of the sampler_worker\n env_pkl (bytes): A pickled gym or akro env object\n agent_pkl (bytes): A pickled agent\n seed (int): Random seed.\n max_path_length (int): max trajectory length\n should_render (bool): if true, renders trajectories after\n sampling them\n\n \"\"\"\n\n def __init__(self,\n worker_id,\n env_pkl,\n agent_pkl,\n seed,\n max_path_length,\n should_render=False):\n self.worker_id = worker_id\n self._env = pickle.loads(env_pkl)\n self.agent = pickle.loads(agent_pkl)\n self._seed = seed + self.worker_id\n deterministic.set_seed(self._seed)\n self._max_path_length = max_path_length\n self._should_render = should_render\n self.agent_updates = 0\n\n def set_agent(self, flattened_params):\n \"\"\"Set the agent params.\n\n Args:\n flattened_params (list[np.ndarray]): model parameters\n\n Returns:\n int: Worker id of this sampler worker.\n\n \"\"\"\n self.agent.set_param_values(flattened_params)\n self.agent_updates += 1\n return self.worker_id\n\n def rollout(self):\n \"\"\"Sample a single rollout from the agent/policy.\n\n The following value for the following keys will be a 2D array,\n with the first dimension corresponding to the time dimension.\n\n - observations\n - actions\n - rewards\n - next_observations\n - terminals\n The next two elements will be lists of dictionaries, with\n the index into the list being the index into the time\n - agent_infos\n - env_infos\n\n Returns:\n int: ID of this work\n numpy.ndarray: observations\n numpy.ndarray: actions\n numpy.ndarray: rewards\n dict[list]: agent info\n dict[list]: environment info\n\n \"\"\"\n observations = []\n actions = []\n rewards = []\n agent_infos = defaultdict(list)\n env_infos = defaultdict(list)\n o = self._env.reset()\n self.agent.reset()\n next_o = None\n path_length = 0\n while path_length < self._max_path_length:\n a, agent_info = self.agent.get_action(o)\n next_o, r, d, env_info = self._env.step(a)\n observations.append(o)\n rewards.append(r)\n actions.append(a)\n for k, v in agent_info.items():\n agent_infos[k].append(v)\n for k, v in env_info.items():\n env_infos[k].append(v)\n path_length += 1\n if d:\n break\n o = next_o\n for k, v in agent_infos.items():\n agent_infos[k] = np.asarray(v)\n for k, v in env_infos.items():\n env_infos[k] = np.asarray(v)\n return self.worker_id,\\\n np.array(observations),\\\n np.array(actions),\\\n np.array(rewards),\\\n dict(agent_infos),\\\n dict(env_infos)\n", "path": "src/garage/sampler/ray_sampler.py"}], "after_files": [{"content": "\"\"\"This is an implementation of an on policy batch sampler.\n\nUses a data parallel design.\nIncluded is a sampler that deploys sampler workers.\n\nThe sampler workers must implement some type of set agent parameters\nfunction, and a rollout function\n\"\"\"\nfrom collections import defaultdict\nimport pickle\n\nimport numpy as np\nimport psutil\nimport ray\n\nfrom garage.experiment import deterministic\nfrom garage.misc.prog_bar_counter import ProgBarCounter\nfrom garage.sampler.base import BaseSampler\n\n\nclass RaySampler(BaseSampler):\n \"\"\"Collects Policy Rollouts in a data parallel fashion.\n\n Args:\n algo (garage.np.algo.RLAlgorithm): A garage algo object\n env (gym.Env): A gym/akro env object\n seed (int): Random seed.\n should_render (bool): If the sampler render the trajectories.\n num_processors (int): Number of processors to be used.\n sampler_worker_cls (garage.sampler.ray_sampler.SamplerWorker):\n If none, uses the default SamplerWorker class\n\n \"\"\"\n\n def __init__(self,\n algo,\n env,\n seed,\n should_render=False,\n num_processors=None,\n sampler_worker_cls=None):\n super().__init__(algo, env)\n self._sampler_worker = ray.remote(SamplerWorker if sampler_worker_cls\n is None else sampler_worker_cls)\n self._seed = seed\n deterministic.set_seed(self._seed)\n self._max_path_length = self.algo.max_path_length\n self._should_render = should_render\n if not ray.is_initialized():\n ray.init(log_to_driver=False)\n self._num_workers = (num_processors if num_processors else\n psutil.cpu_count(logical=False))\n self._all_workers = defaultdict(None)\n self._idle_worker_ids = list(range(self._num_workers))\n self._active_worker_ids = []\n\n def start_worker(self):\n \"\"\"Initialize a new ray worker.\"\"\"\n # Pickle the environment once, instead of once per worker.\n env_pkl = pickle.dumps(self.env)\n # We need to pickle the agent so that we can e.g. set up the TF Session\n # in the worker *before* unpicling it.\n agent_pkl = pickle.dumps(self.algo.policy)\n for worker_id in range(self._num_workers):\n self._all_workers[worker_id] = self._sampler_worker.remote(\n worker_id, env_pkl, agent_pkl, self._seed,\n self._max_path_length, self._should_render)\n\n # pylint: disable=arguments-differ\n def obtain_samples(self, itr, num_samples):\n \"\"\"Sample the policy for new trajectories.\n\n Args:\n itr (int): Iteration number.\n num_samples (int): Number of steps the the sampler should collect.\n\n Returns:\n list[dict]: Sample paths, each path with key\n * observations: (numpy.ndarray)\n * actions: (numpy.ndarray)\n * rewards: (numpy.ndarray)\n * agent_infos: (dict)\n * env_infos: (dict)\n\n \"\"\"\n _active_workers = []\n self._active_worker_ids = []\n pbar = ProgBarCounter(num_samples)\n completed_samples = 0\n traj = []\n updating_workers = []\n samples_to_be_collected = 0\n # update the policy params of each worker before sampling\n # for the current iteration\n curr_policy_params = self.algo.policy.get_param_values()\n params_id = ray.put(curr_policy_params)\n while self._idle_worker_ids:\n worker_id = self._idle_worker_ids.pop()\n worker = self._all_workers[worker_id]\n updating_workers.append(worker.set_agent.remote(params_id))\n while completed_samples < num_samples:\n # if there are workers still being updated, check\n # which ones are still updating and take the workers that\n # are done updating, and start collecting trajectories on\n # those workers.\n if updating_workers:\n updated, updating_workers = ray.wait(updating_workers,\n num_returns=1,\n timeout=0.1)\n upd = [ray.get(up) for up in updated]\n self._idle_worker_ids.extend(upd)\n\n # if there are idle workers, use them to collect trajectories\n # mark the newly busy workers as active\n workers_to_use = int(\n np.clip(\n np.ceil(\n (num_samples - completed_samples -\n samples_to_be_collected) / self._max_path_length) -\n len(self._active_worker_ids), 0, len(self._all_workers)))\n workers_started = 0\n while self._idle_worker_ids and workers_started < workers_to_use:\n idle_worker_id = self._idle_worker_ids.pop()\n workers_started += 1\n self._active_worker_ids.append(idle_worker_id)\n samples_to_be_collected += self._max_path_length\n worker = self._all_workers[idle_worker_id]\n _active_workers.append(worker.rollout.remote())\n # check which workers are done/not done collecting a sample\n # if any are done, send them to process the collected trajectory\n # if they are not, keep checking if they are done\n ready, not_ready = ray.wait(_active_workers,\n num_returns=1,\n timeout=0.001)\n _active_workers = not_ready\n for result in ready:\n trajectory, num_returned_samples = self._process_trajectory(\n result)\n samples_to_be_collected -= self._max_path_length\n completed_samples += num_returned_samples\n pbar.inc(num_returned_samples)\n traj.append(trajectory)\n self._idle_worker_ids = list(range(self._num_workers))\n self._active_worker_ids = []\n pbar.stop()\n return traj\n\n def shutdown_worker(self):\n \"\"\"Shuts down the worker.\"\"\"\n ray.shutdown()\n\n def _process_trajectory(self, result):\n \"\"\"Collect trajectory from ray object store.\n\n Converts that trajectory to garage friendly format.\n\n Args:\n result (obj): Ray object id of ready to be collected trajectory.\n\n Returns:\n dict: One trajectory, with keys\n * observations: (numpy.ndarray)\n * actions: (numpy.ndarray)\n * rewards: (numpy.ndarray)\n * agent_infos: (dict)\n * env_infos: (dict)\n int: Number of returned samples in the trajectory\n\n \"\"\"\n trajectory = ray.get(result)\n ready_worker_id = trajectory[0]\n self._active_worker_ids.remove(ready_worker_id)\n self._idle_worker_ids.append(ready_worker_id)\n trajectory = dict(observations=np.asarray(trajectory[1]),\n actions=np.asarray(trajectory[2]),\n rewards=np.asarray(trajectory[3]),\n agent_infos=trajectory[4],\n env_infos=trajectory[5])\n num_returned_samples = len(trajectory['observations'])\n return trajectory, num_returned_samples\n\n\nclass SamplerWorker:\n \"\"\"Constructs a single sampler worker.\n\n The worker can have its parameters updated, and sampler its policy for\n trajectories or rollouts.\n\n Args:\n worker_id (int): the id of the sampler_worker\n env_pkl (bytes): A pickled gym or akro env object\n agent_pkl (bytes): A pickled agent\n seed (int): Random seed.\n max_path_length (int): max trajectory length\n should_render (bool): if true, renders trajectories after\n sampling them\n\n \"\"\"\n\n def __init__(self,\n worker_id,\n env_pkl,\n agent_pkl,\n seed,\n max_path_length,\n should_render=False):\n self.worker_id = worker_id\n self._env = pickle.loads(env_pkl)\n self.agent = pickle.loads(agent_pkl)\n self._seed = seed + self.worker_id\n deterministic.set_seed(self._seed)\n self._max_path_length = max_path_length\n self._should_render = should_render\n self.agent_updates = 0\n\n def set_agent(self, flattened_params):\n \"\"\"Set the agent params.\n\n Args:\n flattened_params (list[np.ndarray]): model parameters\n\n Returns:\n int: Worker id of this sampler worker.\n\n \"\"\"\n self.agent.set_param_values(flattened_params)\n self.agent_updates += 1\n return self.worker_id\n\n def rollout(self):\n \"\"\"Sample a single rollout from the agent/policy.\n\n The following value for the following keys will be a 2D array,\n with the first dimension corresponding to the time dimension.\n\n - observations\n - actions\n - rewards\n - next_observations\n - terminals\n The next two elements will be lists of dictionaries, with\n the index into the list being the index into the time\n - agent_infos\n - env_infos\n\n Returns:\n int: ID of this work\n numpy.ndarray: observations\n numpy.ndarray: actions\n numpy.ndarray: rewards\n dict[list]: agent info\n dict[list]: environment info\n\n \"\"\"\n observations = []\n actions = []\n rewards = []\n agent_infos = defaultdict(list)\n env_infos = defaultdict(list)\n o = self._env.reset()\n self.agent.reset()\n next_o = None\n path_length = 0\n while path_length < self._max_path_length:\n a, agent_info = self.agent.get_action(o)\n next_o, r, d, env_info = self._env.step(a)\n observations.append(o)\n rewards.append(r)\n actions.append(a)\n for k, v in agent_info.items():\n agent_infos[k].append(v)\n for k, v in env_info.items():\n env_infos[k].append(v)\n path_length += 1\n if d:\n break\n o = next_o\n for k, v in agent_infos.items():\n agent_infos[k] = np.asarray(v)\n for k, v in env_infos.items():\n env_infos[k] = np.asarray(v)\n return self.worker_id,\\\n np.array(observations),\\\n np.array(actions),\\\n np.array(rewards),\\\n dict(agent_infos),\\\n dict(env_infos)\n", "path": "src/garage/sampler/ray_sampler.py"}]}
3,391
557
gh_patches_debug_9800
rasdani/github-patches
git_diff
hpcaitech__ColossalAI-3898
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [DOC]: A bug about usage of booster api in the document ### 📚 The doc issue [zero_with_chunk](https://colossalai.org/docs/features/zero_with_chunk) and [booster_api](https://colossalai.org/docs/basics/booster_api) both used torch.optim.Adam as the type of optimizer in the code examples implemented with booster api. However, this will trigger an AssertionError when passing this kind of optimizer to booster.boost: ![20230605-161741](https://github.com/hpcaitech/ColossalAI/assets/56809903/789fda8d-40eb-489c-9026-56d3d872e1c1) [tensor] fix some unittests [tensor] fix some unittests --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `colossalai/booster/booster.py` Content: ``` 1 import warnings 2 from contextlib import contextmanager 3 from typing import Callable, Iterator, List, Optional, Tuple, Union 4 5 import torch 6 import torch.nn as nn 7 from torch.optim import Optimizer 8 from torch.optim.lr_scheduler import _LRScheduler as LRScheduler 9 from torch.utils.data import DataLoader 10 11 from colossalai.checkpoint_io import GeneralCheckpointIO 12 13 from .accelerator import Accelerator 14 from .mixed_precision import MixedPrecision, mixed_precision_factory 15 from .plugin import Plugin 16 17 __all__ = ['Booster'] 18 19 20 class Booster: 21 """ 22 Booster is a high-level API for training neural networks. It provides a unified interface for 23 training with different precision, accelerator, and plugin. 24 25 Examples: 26 ```python 27 colossalai.launch(...) 28 plugin = GeminiPlugin(stage=3, ...) 29 booster = Booster(precision='fp16', plugin=plugin) 30 31 model = GPT2() 32 optimizer = Adam(model.parameters()) 33 dataloader = Dataloader(Dataset) 34 lr_scheduler = LinearWarmupScheduler() 35 criterion = GPTLMLoss() 36 37 model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader) 38 39 for epoch in range(max_epochs): 40 for input_ids, attention_mask in dataloader: 41 outputs = model(input_ids, attention_mask) 42 loss = criterion(outputs.logits, input_ids) 43 booster.backward(loss, optimizer) 44 optimizer.step() 45 lr_scheduler.step() 46 optimizer.zero_grad() 47 ``` 48 49 Args: 50 device (str or torch.device): The device to run the training. Default: 'cuda'. 51 mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None. 52 If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'. 53 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex. 54 plugin (Plugin): The plugin to run the training. Default: None. 55 """ 56 57 def __init__(self, 58 device: str = 'cuda', 59 mixed_precision: Union[MixedPrecision, str] = None, 60 plugin: Optional[Plugin] = None) -> None: 61 if plugin is not None: 62 assert isinstance( 63 plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.' 64 self.plugin = plugin 65 66 # set accelerator 67 if self.plugin and self.plugin.control_device(): 68 self.accelerator = None 69 warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.') 70 else: 71 self.accelerator = Accelerator(device) 72 73 # set precision 74 if self.plugin and self.plugin.control_precision(): 75 warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.') 76 self.mixed_precision = None 77 elif mixed_precision is None: 78 self.mixed_precision = None 79 else: 80 # validate and set precision 81 if isinstance(mixed_precision, str): 82 # the user will take the default arguments for amp training 83 self.mixed_precision = mixed_precision_factory(mixed_precision) 84 elif isinstance(mixed_precision, MixedPrecision): 85 # the user can customize the arguments by passing the precision object 86 self.mixed_precision = mixed_precision 87 else: 88 raise ValueError( 89 f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.' 90 ) 91 92 if self.plugin is not None and self.plugin.control_checkpoint_io(): 93 self.checkpoint_io = self.plugin.get_checkpoint_io() 94 else: 95 self.checkpoint_io = GeneralCheckpointIO() 96 97 def boost( 98 self, 99 model: nn.Module, 100 optimizer: Optimizer, 101 criterion: Callable = None, 102 dataloader: DataLoader = None, 103 lr_scheduler: LRScheduler = None, 104 ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]: 105 """ 106 Boost the model, optimizer, criterion, lr_scheduler, and dataloader. 107 108 Args: 109 model (nn.Module): The model to be boosted. 110 optimizer (Optimizer): The optimizer to be boosted. 111 criterion (Callable): The criterion to be boosted. 112 dataloader (DataLoader): The dataloader to be boosted. 113 lr_scheduler (LRScheduler): The lr_scheduler to be boosted. 114 """ 115 # TODO(FrankLeeeee): consider multi-model and multi-optimizer case 116 # TODO(FrankLeeeee): consider multi-dataloader case 117 # transform model for mixed precision 118 if self.plugin: 119 model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure( 120 model, optimizer, criterion, dataloader, lr_scheduler) 121 122 if self.plugin and not self.plugin.control_device(): 123 # transform model for accelerator 124 model = self.accelerator.configure(model) 125 126 if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()): 127 # transform model for mixed precision 128 # when mixed_precision is specified and the plugin is not given or does not control the precision 129 model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion) 130 131 return model, optimizer, criterion, dataloader, lr_scheduler 132 133 def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None: 134 """Backward pass. 135 136 Args: 137 loss (torch.Tensor): The loss to be backpropagated. 138 optimizer (Optimizer): The optimizer to be updated. 139 """ 140 # TODO: implement this method with plugin 141 optimizer.backward(loss) 142 143 def execute_pipeline(self, 144 data_iter: Iterator, 145 model: nn.Module, 146 criterion: Callable[[torch.Tensor], torch.Tensor], 147 optimizer: Optimizer, 148 return_loss: bool = True, 149 return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]: 150 # TODO: implement this method 151 # run pipeline forward backward pass 152 # return loss or outputs if needed 153 pass 154 155 def no_sync(self, model: nn.Module) -> contextmanager: 156 """Context manager to disable gradient synchronization across DP process groups. 157 158 Args: 159 model (nn.Module): The model to be disabled gradient synchronization. 160 161 Returns: 162 contextmanager: Context to disable gradient synchronization. 163 """ 164 assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.' 165 assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.' 166 return self.plugin.no_sync(model) 167 168 def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True): 169 """Load model from checkpoint. 170 171 Args: 172 model (nn.Module): A model boosted by Booster. 173 checkpoint (str): Path to the checkpoint. It must be a local path. 174 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path. 175 strict (bool, optional): whether to strictly enforce that the keys 176 in :attr:`state_dict` match the keys returned by this module's 177 :meth:`~torch.nn.Module.state_dict` function. Defaults to True. 178 """ 179 self.checkpoint_io.load_model(model, checkpoint, strict) 180 181 def save_model(self, 182 model: nn.Module, 183 checkpoint: str, 184 prefix: str = None, 185 shard: bool = False, 186 size_per_shard: int = 1024): 187 """Save model to checkpoint. 188 189 Args: 190 model (nn.Module): A model boosted by Booster. 191 checkpoint (str): Path to the checkpoint. It must be a local path. 192 It is a file path if ``shard=False``. Otherwise, it is a directory path. 193 prefix (str, optional): A prefix added to parameter and buffer 194 names to compose the keys in state_dict. Defaults to None. 195 shard (bool, optional): Whether to save checkpoint a sharded way. 196 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False. 197 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024. 198 """ 199 self.checkpoint_io.save_model(model, checkpoint=checkpoint, shard=shard, size_per_shard=size_per_shard) 200 201 def load_optimizer(self, optimizer: Optimizer, checkpoint: str): 202 """Load optimizer from checkpoint. 203 204 Args: 205 optimizer (Optimizer): An optimizer boosted by Booster. 206 checkpoint (str): Path to the checkpoint. It must be a local path. 207 It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path. 208 """ 209 self.checkpoint_io.load_optimizer(optimizer, checkpoint) 210 211 def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024): 212 """Save optimizer to checkpoint. 213 Warning: Saving sharded optimizer checkpoint is not supported yet. 214 215 Args: 216 optimizer (Optimizer): An optimizer boosted by Booster. 217 checkpoint (str): Path to the checkpoint. It must be a local path. 218 It is a file path if ``shard=False``. Otherwise, it is a directory path. 219 shard (bool, optional): Whether to save checkpoint a sharded way. 220 If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False. 221 size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024. 222 """ 223 self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard) 224 225 def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str): 226 """Save lr scheduler to checkpoint. 227 228 Args: 229 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster. 230 checkpoint (str): Path to the checkpoint. It must be a local file path. 231 """ 232 self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint) 233 234 def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str): 235 """Load lr scheduler from checkpoint. 236 237 Args: 238 lr_scheduler (LRScheduler): A lr scheduler boosted by Booster. 239 checkpoint (str): Path to the checkpoint. It must be a local file path. 240 """ 241 self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint) 242 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py --- a/colossalai/booster/booster.py +++ b/colossalai/booster/booster.py @@ -25,11 +25,11 @@ Examples: ```python colossalai.launch(...) - plugin = GeminiPlugin(stage=3, ...) + plugin = GeminiPlugin(...) booster = Booster(precision='fp16', plugin=plugin) model = GPT2() - optimizer = Adam(model.parameters()) + optimizer = HybridAdam(model.parameters()) dataloader = Dataloader(Dataset) lr_scheduler = LinearWarmupScheduler() criterion = GPTLMLoss()
{"golden_diff": "diff --git a/colossalai/booster/booster.py b/colossalai/booster/booster.py\n--- a/colossalai/booster/booster.py\n+++ b/colossalai/booster/booster.py\n@@ -25,11 +25,11 @@\n Examples:\n ```python\n colossalai.launch(...)\n- plugin = GeminiPlugin(stage=3, ...)\n+ plugin = GeminiPlugin(...)\n booster = Booster(precision='fp16', plugin=plugin)\n \n model = GPT2()\n- optimizer = Adam(model.parameters())\n+ optimizer = HybridAdam(model.parameters())\n dataloader = Dataloader(Dataset)\n lr_scheduler = LinearWarmupScheduler()\n criterion = GPTLMLoss()\n", "issue": "[DOC]: A bug about usage of booster api in the document\n### \ud83d\udcda The doc issue\n\n[zero_with_chunk](https://colossalai.org/docs/features/zero_with_chunk) and [booster_api](https://colossalai.org/docs/basics/booster_api) both used torch.optim.Adam as the type of optimizer in the code examples implemented with booster api. However, this will trigger an AssertionError when passing this kind of optimizer to booster.boost:\r\n\r\n![20230605-161741](https://github.com/hpcaitech/ColossalAI/assets/56809903/789fda8d-40eb-489c-9026-56d3d872e1c1)\r\n\n[tensor] fix some unittests\n\n[tensor] fix some unittests\n\n", "before_files": [{"content": "import warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\n\nfrom colossalai.checkpoint_io import GeneralCheckpointIO\n\nfrom .accelerator import Accelerator\nfrom .mixed_precision import MixedPrecision, mixed_precision_factory\nfrom .plugin import Plugin\n\n__all__ = ['Booster']\n\n\nclass Booster:\n \"\"\"\n Booster is a high-level API for training neural networks. It provides a unified interface for\n training with different precision, accelerator, and plugin.\n\n Examples:\n ```python\n colossalai.launch(...)\n plugin = GeminiPlugin(stage=3, ...)\n booster = Booster(precision='fp16', plugin=plugin)\n\n model = GPT2()\n optimizer = Adam(model.parameters())\n dataloader = Dataloader(Dataset)\n lr_scheduler = LinearWarmupScheduler()\n criterion = GPTLMLoss()\n\n model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n\n for epoch in range(max_epochs):\n for input_ids, attention_mask in dataloader:\n outputs = model(input_ids, attention_mask)\n loss = criterion(outputs.logits, input_ids)\n booster.backward(loss, optimizer)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n ```\n\n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.\n If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.\n 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.\n plugin (Plugin): The plugin to run the training. Default: None.\n \"\"\"\n\n def __init__(self,\n device: str = 'cuda',\n mixed_precision: Union[MixedPrecision, str] = None,\n plugin: Optional[Plugin] = None) -> None:\n if plugin is not None:\n assert isinstance(\n plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'\n self.plugin = plugin\n\n # set accelerator\n if self.plugin and self.plugin.control_device():\n self.accelerator = None\n warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')\n else:\n self.accelerator = Accelerator(device)\n\n # set precision\n if self.plugin and self.plugin.control_precision():\n warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')\n self.mixed_precision = None\n elif mixed_precision is None:\n self.mixed_precision = None\n else:\n # validate and set precision\n if isinstance(mixed_precision, str):\n # the user will take the default arguments for amp training\n self.mixed_precision = mixed_precision_factory(mixed_precision)\n elif isinstance(mixed_precision, MixedPrecision):\n # the user can customize the arguments by passing the precision object\n self.mixed_precision = mixed_precision\n else:\n raise ValueError(\n f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'\n )\n\n if self.plugin is not None and self.plugin.control_checkpoint_io():\n self.checkpoint_io = self.plugin.get_checkpoint_io()\n else:\n self.checkpoint_io = GeneralCheckpointIO()\n\n def boost(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: Callable = None,\n dataloader: DataLoader = None,\n lr_scheduler: LRScheduler = None,\n ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:\n \"\"\"\n Boost the model, optimizer, criterion, lr_scheduler, and dataloader.\n\n Args:\n model (nn.Module): The model to be boosted.\n optimizer (Optimizer): The optimizer to be boosted.\n criterion (Callable): The criterion to be boosted.\n dataloader (DataLoader): The dataloader to be boosted.\n lr_scheduler (LRScheduler): The lr_scheduler to be boosted.\n \"\"\"\n # TODO(FrankLeeeee): consider multi-model and multi-optimizer case\n # TODO(FrankLeeeee): consider multi-dataloader case\n # transform model for mixed precision\n if self.plugin:\n model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(\n model, optimizer, criterion, dataloader, lr_scheduler)\n\n if self.plugin and not self.plugin.control_device():\n # transform model for accelerator\n model = self.accelerator.configure(model)\n\n if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):\n # transform model for mixed precision\n # when mixed_precision is specified and the plugin is not given or does not control the precision\n model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)\n\n return model, optimizer, criterion, dataloader, lr_scheduler\n\n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n \"\"\"Backward pass.\n\n Args:\n loss (torch.Tensor): The loss to be backpropagated.\n optimizer (Optimizer): The optimizer to be updated.\n \"\"\"\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n\n def execute_pipeline(self,\n data_iter: Iterator,\n model: nn.Module,\n criterion: Callable[[torch.Tensor], torch.Tensor],\n optimizer: Optimizer,\n return_loss: bool = True,\n return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:\n # TODO: implement this method\n # run pipeline forward backward pass\n # return loss or outputs if needed\n pass\n\n def no_sync(self, model: nn.Module) -> contextmanager:\n \"\"\"Context manager to disable gradient synchronization across DP process groups.\n\n Args:\n model (nn.Module): The model to be disabled gradient synchronization.\n\n Returns:\n contextmanager: Context to disable gradient synchronization.\n \"\"\"\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n\n def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):\n \"\"\"Load model from checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n strict (bool, optional): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Defaults to True.\n \"\"\"\n self.checkpoint_io.load_model(model, checkpoint, strict)\n\n def save_model(self,\n model: nn.Module,\n checkpoint: str,\n prefix: str = None,\n shard: bool = False,\n size_per_shard: int = 1024):\n \"\"\"Save model to checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n prefix (str, optional): A prefix added to parameter and buffer\n names to compose the keys in state_dict. Defaults to None.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_model(model, checkpoint=checkpoint, shard=shard, size_per_shard=size_per_shard)\n\n def load_optimizer(self, optimizer: Optimizer, checkpoint: str):\n \"\"\"Load optimizer from checkpoint.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n \"\"\"\n self.checkpoint_io.load_optimizer(optimizer, checkpoint)\n\n def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):\n \"\"\"Save optimizer to checkpoint.\n Warning: Saving sharded optimizer checkpoint is not supported yet.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)\n\n def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Save lr scheduler to checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)\n\n def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Load lr scheduler from checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)\n", "path": "colossalai/booster/booster.py"}], "after_files": [{"content": "import warnings\nfrom contextlib import contextmanager\nfrom typing import Callable, Iterator, List, Optional, Tuple, Union\n\nimport torch\nimport torch.nn as nn\nfrom torch.optim import Optimizer\nfrom torch.optim.lr_scheduler import _LRScheduler as LRScheduler\nfrom torch.utils.data import DataLoader\n\nfrom colossalai.checkpoint_io import GeneralCheckpointIO\n\nfrom .accelerator import Accelerator\nfrom .mixed_precision import MixedPrecision, mixed_precision_factory\nfrom .plugin import Plugin\n\n__all__ = ['Booster']\n\n\nclass Booster:\n \"\"\"\n Booster is a high-level API for training neural networks. It provides a unified interface for\n training with different precision, accelerator, and plugin.\n\n Examples:\n ```python\n colossalai.launch(...)\n plugin = GeminiPlugin(...)\n booster = Booster(precision='fp16', plugin=plugin)\n\n model = GPT2()\n optimizer = HybridAdam(model.parameters())\n dataloader = Dataloader(Dataset)\n lr_scheduler = LinearWarmupScheduler()\n criterion = GPTLMLoss()\n\n model, optimizer, lr_scheduler, dataloader = booster.boost(model, optimizer, lr_scheduler, dataloader)\n\n for epoch in range(max_epochs):\n for input_ids, attention_mask in dataloader:\n outputs = model(input_ids, attention_mask)\n loss = criterion(outputs.logits, input_ids)\n booster.backward(loss, optimizer)\n optimizer.step()\n lr_scheduler.step()\n optimizer.zero_grad()\n ```\n\n Args:\n device (str or torch.device): The device to run the training. Default: 'cuda'.\n mixed_precision (str or MixedPrecision): The mixed precision to run the training. Default: None.\n If the argument is a string, it can be 'fp16', 'fp16_apex', 'bf16', or 'fp8'.\n 'fp16' would use PyTorch AMP while `fp16_apex` would use Nvidia Apex.\n plugin (Plugin): The plugin to run the training. Default: None.\n \"\"\"\n\n def __init__(self,\n device: str = 'cuda',\n mixed_precision: Union[MixedPrecision, str] = None,\n plugin: Optional[Plugin] = None) -> None:\n if plugin is not None:\n assert isinstance(\n plugin, Plugin), f'Expected the argument plugin to be an instance of Plugin, but got {type(plugin)}.'\n self.plugin = plugin\n\n # set accelerator\n if self.plugin and self.plugin.control_device():\n self.accelerator = None\n warnings.warn('The plugin will control the accelerator, so the device argument will be ignored.')\n else:\n self.accelerator = Accelerator(device)\n\n # set precision\n if self.plugin and self.plugin.control_precision():\n warnings.warn('The plugin will control the precision, so the mixed_precision argument will be ignored.')\n self.mixed_precision = None\n elif mixed_precision is None:\n self.mixed_precision = None\n else:\n # validate and set precision\n if isinstance(mixed_precision, str):\n # the user will take the default arguments for amp training\n self.mixed_precision = mixed_precision_factory(mixed_precision)\n elif isinstance(mixed_precision, MixedPrecision):\n # the user can customize the arguments by passing the precision object\n self.mixed_precision = mixed_precision\n else:\n raise ValueError(\n f'Expected the argument mixed_precision to be a string or an instance of Precision, but got {type(mixed_precision)}.'\n )\n\n if self.plugin is not None and self.plugin.control_checkpoint_io():\n self.checkpoint_io = self.plugin.get_checkpoint_io()\n else:\n self.checkpoint_io = GeneralCheckpointIO()\n\n def boost(\n self,\n model: nn.Module,\n optimizer: Optimizer,\n criterion: Callable = None,\n dataloader: DataLoader = None,\n lr_scheduler: LRScheduler = None,\n ) -> List[Union[nn.Module, Optimizer, LRScheduler, DataLoader]]:\n \"\"\"\n Boost the model, optimizer, criterion, lr_scheduler, and dataloader.\n\n Args:\n model (nn.Module): The model to be boosted.\n optimizer (Optimizer): The optimizer to be boosted.\n criterion (Callable): The criterion to be boosted.\n dataloader (DataLoader): The dataloader to be boosted.\n lr_scheduler (LRScheduler): The lr_scheduler to be boosted.\n \"\"\"\n # TODO(FrankLeeeee): consider multi-model and multi-optimizer case\n # TODO(FrankLeeeee): consider multi-dataloader case\n # transform model for mixed precision\n if self.plugin:\n model, optimizer, criterion, dataloader, lr_scheduler = self.plugin.configure(\n model, optimizer, criterion, dataloader, lr_scheduler)\n\n if self.plugin and not self.plugin.control_device():\n # transform model for accelerator\n model = self.accelerator.configure(model)\n\n if self.mixed_precision and (self.plugin is None or self.plugin and not self.plugin.control_precision()):\n # transform model for mixed precision\n # when mixed_precision is specified and the plugin is not given or does not control the precision\n model, optimizer, criterion = self.mixed_precision.configure(model, optimizer, criterion)\n\n return model, optimizer, criterion, dataloader, lr_scheduler\n\n def backward(self, loss: torch.Tensor, optimizer: Optimizer) -> None:\n \"\"\"Backward pass.\n\n Args:\n loss (torch.Tensor): The loss to be backpropagated.\n optimizer (Optimizer): The optimizer to be updated.\n \"\"\"\n # TODO: implement this method with plugin\n optimizer.backward(loss)\n\n def execute_pipeline(self,\n data_iter: Iterator,\n model: nn.Module,\n criterion: Callable[[torch.Tensor], torch.Tensor],\n optimizer: Optimizer,\n return_loss: bool = True,\n return_outputs: bool = False) -> Tuple[Optional[torch.Tensor], ...]:\n # TODO: implement this method\n # run pipeline forward backward pass\n # return loss or outputs if needed\n pass\n\n def no_sync(self, model: nn.Module) -> contextmanager:\n \"\"\"Context manager to disable gradient synchronization across DP process groups.\n\n Args:\n model (nn.Module): The model to be disabled gradient synchronization.\n\n Returns:\n contextmanager: Context to disable gradient synchronization.\n \"\"\"\n assert self.plugin is not None, f'no_sync is only enabled when a plugin is provided and the plugin supports no_sync.'\n assert self.plugin.support_no_sync, f'The plugin {self.plugin.__class__.__name__} does not support no_sync.'\n return self.plugin.no_sync(model)\n\n def load_model(self, model: nn.Module, checkpoint: str, strict: bool = True):\n \"\"\"Load model from checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n strict (bool, optional): whether to strictly enforce that the keys\n in :attr:`state_dict` match the keys returned by this module's\n :meth:`~torch.nn.Module.state_dict` function. Defaults to True.\n \"\"\"\n self.checkpoint_io.load_model(model, checkpoint, strict)\n\n def save_model(self,\n model: nn.Module,\n checkpoint: str,\n prefix: str = None,\n shard: bool = False,\n size_per_shard: int = 1024):\n \"\"\"Save model to checkpoint.\n\n Args:\n model (nn.Module): A model boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n prefix (str, optional): A prefix added to parameter and buffer\n names to compose the keys in state_dict. Defaults to None.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_model(model, checkpoint=checkpoint, shard=shard, size_per_shard=size_per_shard)\n\n def load_optimizer(self, optimizer: Optimizer, checkpoint: str):\n \"\"\"Load optimizer from checkpoint.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It should be a directory path if the checkpoint is sharded. Otherwise, it should be a file path.\n \"\"\"\n self.checkpoint_io.load_optimizer(optimizer, checkpoint)\n\n def save_optimizer(self, optimizer: Optimizer, checkpoint: str, shard: bool = False, size_per_shard: int = 1024):\n \"\"\"Save optimizer to checkpoint.\n Warning: Saving sharded optimizer checkpoint is not supported yet.\n\n Args:\n optimizer (Optimizer): An optimizer boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local path.\n It is a file path if ``shard=False``. Otherwise, it is a directory path.\n shard (bool, optional): Whether to save checkpoint a sharded way.\n If true, the checkpoint will be a folder. Otherwise, it will be a single file. Defaults to False.\n size_per_shard (int, optional): Maximum size of checkpoint shard file in MB. This is useful only when ``shard=True``. Defaults to 1024.\n \"\"\"\n self.checkpoint_io.save_optimizer(optimizer, checkpoint, shard, size_per_shard)\n\n def save_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Save lr scheduler to checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.save_lr_scheduler(lr_scheduler, checkpoint)\n\n def load_lr_scheduler(self, lr_scheduler: LRScheduler, checkpoint: str):\n \"\"\"Load lr scheduler from checkpoint.\n\n Args:\n lr_scheduler (LRScheduler): A lr scheduler boosted by Booster.\n checkpoint (str): Path to the checkpoint. It must be a local file path.\n \"\"\"\n self.checkpoint_io.load_lr_scheduler(lr_scheduler, checkpoint)\n", "path": "colossalai/booster/booster.py"}]}
3,341
168
gh_patches_debug_10745
rasdani/github-patches
git_diff
secdev__scapy-1682
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- VXLAN Binding to IPv6 or IPv4 doesn't work #### Brief description When doing a VXLAN()/IP or VXLAN()/IPv6 the binding does not work properly. Both IP layers are interpreted as Ethernet Layers. #### Environment - Scapy version: `2.4.0` - Python version: `3.5.2` - Operating System: `Ubuntu 16.04` #### How to reproduce ```python >>> issue = Ether()/IP()/UDP()/VXLAN(flags=4, NextProtocol=1)/IP() >>> issue.show2() ###[ Ethernet ]### dst= ff:ff:ff:ff:ff:ff src= 00:00:00:00:00:00 type= 0x800 ###[ IP ]### version= 4 ihl= 5 tos= 0x0 len= 56 id= 1 flags= frag= 0 ttl= 64 proto= udp chksum= 0x7cb2 src= 127.0.0.1 dst= 127.0.0.1 \options\ ###[ UDP ]### sport= 4789 dport= 4789 len= 36 chksum= 0xd838 ###[ VXLAN ]### flags= NextProtocol reserved0= 0 NextProtocol= IPv4 vni= 0x0 reserved2= 0x0 ###[ Ethernet ]### dst= 45:00:00:14:00:01 src= 00:00:40:00:7c:e7 type= 0x7f00 ###[ Raw ]### load= '\x00\x01\x7f\x00\x00\x01' ``` #### Actual result ```python ###[ Ethernet ]### dst= ff:ff:ff:ff:ff:ff src= 00:00:00:00:00:00 type= 0x800 ###[ IP ]### version= 4 ihl= 5 tos= 0x0 len= 56 id= 1 flags= frag= 0 ttl= 64 proto= udp chksum= 0x7cb2 src= 127.0.0.1 dst= 127.0.0.1 \options\ ###[ UDP ]### sport= 4789 dport= 4789 len= 36 chksum= 0xd838 ###[ VXLAN ]### flags= NextProtocol reserved0= 0 NextProtocol= IPv4 vni= 0x0 reserved2= 0x0 ###[ Ethernet ]### dst= 45:00:00:14:00:01 src= 00:00:40:00:7c:e7 type= 0x7f00 ###[ Raw ]### load= '\x00\x01\x7f\x00\x00\x01' ``` #### Expected result ```python ###[ Ethernet ]### dst= ff:ff:ff:ff:ff:ff src= 00:00:00:00:00:00 type= 0x800 ###[ IP ]### version= 4 ihl= 5 tos= 0x0 len= 56 id= 1 flags= frag= 0 ttl= 64 proto= udp chksum= 0x7cb2 src= 127.0.0.1 dst= 127.0.0.1 \options\ ###[ UDP ]### sport= 4789 dport= 4789 len= 36 chksum= 0xd838 ###[ VXLAN ]### flags= NextProtocol reserved0= 0 NextProtocol= IPv4 vni= 0x0 reserved2= 0x0 ###[ IP ]### version= 4 ihl= 5 tos= 0x0 len= 20 id= 1 flags= frag= 0 ttl= 64 proto= hopopt chksum= 0x7ce7 src= 127.0.0.1 dst= 127.0.0.1 \options\ ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scapy/layers/vxlan.py` Content: ``` 1 # This file is part of Scapy 2 # See http://www.secdev.org/projects/scapy for more information 3 # Copyright (C) Philippe Biondi <[email protected]> 4 # This program is published under a GPLv2 license 5 6 """ 7 Virtual eXtensible Local Area Network (VXLAN) 8 - RFC 7348 - 9 10 A Framework for Overlaying Virtualized Layer 2 Networks over Layer 3 Networks 11 http://tools.ietf.org/html/rfc7348 12 https://www.ietf.org/id/draft-ietf-nvo3-vxlan-gpe-02.txt 13 14 VXLAN Group Policy Option: 15 http://tools.ietf.org/html/draft-smith-vxlan-group-policy-00 16 """ 17 18 from scapy.packet import Packet, bind_layers 19 from scapy.layers.l2 import Ether 20 from scapy.layers.inet import IP, UDP 21 from scapy.layers.inet6 import IPv6 22 from scapy.fields import FlagsField, XByteField, ThreeBytesField, \ 23 ConditionalField, ShortField, ByteEnumField, X3BytesField 24 25 _GP_FLAGS = ["R", "R", "R", "A", "R", "R", "D", "R"] 26 27 28 class VXLAN(Packet): 29 name = "VXLAN" 30 31 fields_desc = [ 32 FlagsField("flags", 0x8, 8, 33 ['OAM', 'R', 'NextProtocol', 'Instance', 34 'V1', 'V2', 'R', 'G']), 35 ConditionalField( 36 ShortField("reserved0", 0), 37 lambda pkt: pkt.flags.NextProtocol, 38 ), 39 ConditionalField( 40 ByteEnumField('NextProtocol', 0, 41 {0: 'NotDefined', 42 1: 'IPv4', 43 2: 'IPv6', 44 3: 'Ethernet', 45 4: 'NSH'}), 46 lambda pkt: pkt.flags.NextProtocol, 47 ), 48 ConditionalField( 49 ThreeBytesField("reserved1", 0), 50 lambda pkt: (not pkt.flags.G) and (not pkt.flags.NextProtocol), 51 ), 52 ConditionalField( 53 FlagsField("gpflags", 0, 8, _GP_FLAGS), 54 lambda pkt: pkt.flags.G, 55 ), 56 ConditionalField( 57 ShortField("gpid", 0), 58 lambda pkt: pkt.flags.G, 59 ), 60 X3BytesField("vni", 0), 61 XByteField("reserved2", 0), 62 ] 63 64 # Use default linux implementation port 65 overload_fields = { 66 UDP: {'dport': 8472}, 67 } 68 69 def mysummary(self): 70 if self.flags.G: 71 return self.sprintf("VXLAN (vni=%VXLAN.vni% gpid=%VXLAN.gpid%)") 72 else: 73 return self.sprintf("VXLAN (vni=%VXLAN.vni%)") 74 75 76 bind_layers(UDP, VXLAN, dport=4789) # RFC standard vxlan port 77 bind_layers(UDP, VXLAN, dport=4790) # RFC standard vxlan-gpe port 78 bind_layers(UDP, VXLAN, dport=6633) # New IANA assigned port for use with NSH 79 bind_layers(UDP, VXLAN, dport=8472) # Linux implementation port 80 bind_layers(UDP, VXLAN, dport=48879) # Cisco ACI 81 bind_layers(UDP, VXLAN, sport=4789) 82 bind_layers(UDP, VXLAN, sport=4790) 83 bind_layers(UDP, VXLAN, sport=6633) 84 bind_layers(UDP, VXLAN, sport=8472) 85 # By default, set both ports to the RFC standard 86 bind_layers(UDP, VXLAN, sport=4789, dport=4789) 87 88 bind_layers(VXLAN, Ether) 89 bind_layers(VXLAN, IP, NextProtocol=1) 90 bind_layers(VXLAN, IPv6, NextProtocol=2) 91 bind_layers(VXLAN, Ether, flags=4, NextProtocol=0) 92 bind_layers(VXLAN, IP, flags=4, NextProtocol=1) 93 bind_layers(VXLAN, IPv6, flags=4, NextProtocol=2) 94 bind_layers(VXLAN, Ether, flags=4, NextProtocol=3) 95 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scapy/layers/vxlan.py b/scapy/layers/vxlan.py --- a/scapy/layers/vxlan.py +++ b/scapy/layers/vxlan.py @@ -85,9 +85,10 @@ # By default, set both ports to the RFC standard bind_layers(UDP, VXLAN, sport=4789, dport=4789) -bind_layers(VXLAN, Ether) +bind_layers(VXLAN, Ether, NextProtocol=0) bind_layers(VXLAN, IP, NextProtocol=1) bind_layers(VXLAN, IPv6, NextProtocol=2) +bind_layers(VXLAN, Ether, NextProtocol=3) bind_layers(VXLAN, Ether, flags=4, NextProtocol=0) bind_layers(VXLAN, IP, flags=4, NextProtocol=1) bind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)
{"golden_diff": "diff --git a/scapy/layers/vxlan.py b/scapy/layers/vxlan.py\n--- a/scapy/layers/vxlan.py\n+++ b/scapy/layers/vxlan.py\n@@ -85,9 +85,10 @@\n # By default, set both ports to the RFC standard\n bind_layers(UDP, VXLAN, sport=4789, dport=4789)\n \n-bind_layers(VXLAN, Ether)\n+bind_layers(VXLAN, Ether, NextProtocol=0)\n bind_layers(VXLAN, IP, NextProtocol=1)\n bind_layers(VXLAN, IPv6, NextProtocol=2)\n+bind_layers(VXLAN, Ether, NextProtocol=3)\n bind_layers(VXLAN, Ether, flags=4, NextProtocol=0)\n bind_layers(VXLAN, IP, flags=4, NextProtocol=1)\n bind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)\n", "issue": "VXLAN Binding to IPv6 or IPv4 doesn't work\n#### Brief description\r\nWhen doing a VXLAN()/IP or VXLAN()/IPv6 the binding does not work properly. Both IP layers are interpreted as Ethernet Layers.\r\n\r\n#### Environment\r\n\r\n- Scapy version: `2.4.0`\r\n- Python version: `3.5.2`\r\n- Operating System: `Ubuntu 16.04`\r\n\r\n#### How to reproduce\r\n```python\r\n>>> issue = Ether()/IP()/UDP()/VXLAN(flags=4, NextProtocol=1)/IP()\r\n>>> issue.show2()\r\n###[ Ethernet ]### \r\n dst= ff:ff:ff:ff:ff:ff\r\n src= 00:00:00:00:00:00\r\n type= 0x800\r\n###[ IP ]### \r\n version= 4\r\n ihl= 5\r\n tos= 0x0\r\n len= 56\r\n id= 1\r\n flags= \r\n frag= 0\r\n ttl= 64\r\n proto= udp\r\n chksum= 0x7cb2\r\n src= 127.0.0.1\r\n dst= 127.0.0.1\r\n \\options\\\r\n###[ UDP ]### \r\n sport= 4789\r\n dport= 4789\r\n len= 36\r\n chksum= 0xd838\r\n###[ VXLAN ]### \r\n flags= NextProtocol\r\n reserved0= 0\r\n NextProtocol= IPv4\r\n vni= 0x0\r\n reserved2= 0x0\r\n###[ Ethernet ]### \r\n dst= 45:00:00:14:00:01\r\n src= 00:00:40:00:7c:e7\r\n type= 0x7f00\r\n###[ Raw ]### \r\n load= '\\x00\\x01\\x7f\\x00\\x00\\x01'\r\n```\r\n\r\n#### Actual result\r\n```python\r\n###[ Ethernet ]### \r\n dst= ff:ff:ff:ff:ff:ff\r\n src= 00:00:00:00:00:00\r\n type= 0x800\r\n###[ IP ]### \r\n version= 4\r\n ihl= 5\r\n tos= 0x0\r\n len= 56\r\n id= 1\r\n flags= \r\n frag= 0\r\n ttl= 64\r\n proto= udp\r\n chksum= 0x7cb2\r\n src= 127.0.0.1\r\n dst= 127.0.0.1\r\n \\options\\\r\n###[ UDP ]### \r\n sport= 4789\r\n dport= 4789\r\n len= 36\r\n chksum= 0xd838\r\n###[ VXLAN ]### \r\n flags= NextProtocol\r\n reserved0= 0\r\n NextProtocol= IPv4\r\n vni= 0x0\r\n reserved2= 0x0\r\n###[ Ethernet ]### \r\n dst= 45:00:00:14:00:01\r\n src= 00:00:40:00:7c:e7\r\n type= 0x7f00\r\n###[ Raw ]### \r\n load= '\\x00\\x01\\x7f\\x00\\x00\\x01'\r\n```\r\n\r\n#### Expected result\r\n```python\r\n###[ Ethernet ]### \r\n dst= ff:ff:ff:ff:ff:ff\r\n src= 00:00:00:00:00:00\r\n type= 0x800\r\n###[ IP ]### \r\n version= 4\r\n ihl= 5\r\n tos= 0x0\r\n len= 56\r\n id= 1\r\n flags= \r\n frag= 0\r\n ttl= 64\r\n proto= udp\r\n chksum= 0x7cb2\r\n src= 127.0.0.1\r\n dst= 127.0.0.1\r\n \\options\\\r\n###[ UDP ]### \r\n sport= 4789\r\n dport= 4789\r\n len= 36\r\n chksum= 0xd838\r\n###[ VXLAN ]### \r\n flags= NextProtocol\r\n reserved0= 0\r\n NextProtocol= IPv4\r\n vni= 0x0\r\n reserved2= 0x0\r\n###[ IP ]### \r\n version= 4\r\n ihl= 5\r\n tos= 0x0\r\n len= 20\r\n id= 1\r\n flags= \r\n frag= 0\r\n ttl= 64\r\n proto= hopopt\r\n chksum= 0x7ce7\r\n src= 127.0.0.1\r\n dst= 127.0.0.1\r\n \\options\\\r\n```\r\n\n", "before_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nVirtual eXtensible Local Area Network (VXLAN)\n- RFC 7348 -\n\nA Framework for Overlaying Virtualized Layer 2 Networks over Layer 3 Networks\nhttp://tools.ietf.org/html/rfc7348\nhttps://www.ietf.org/id/draft-ietf-nvo3-vxlan-gpe-02.txt\n\nVXLAN Group Policy Option:\nhttp://tools.ietf.org/html/draft-smith-vxlan-group-policy-00\n\"\"\"\n\nfrom scapy.packet import Packet, bind_layers\nfrom scapy.layers.l2 import Ether\nfrom scapy.layers.inet import IP, UDP\nfrom scapy.layers.inet6 import IPv6\nfrom scapy.fields import FlagsField, XByteField, ThreeBytesField, \\\n ConditionalField, ShortField, ByteEnumField, X3BytesField\n\n_GP_FLAGS = [\"R\", \"R\", \"R\", \"A\", \"R\", \"R\", \"D\", \"R\"]\n\n\nclass VXLAN(Packet):\n name = \"VXLAN\"\n\n fields_desc = [\n FlagsField(\"flags\", 0x8, 8,\n ['OAM', 'R', 'NextProtocol', 'Instance',\n 'V1', 'V2', 'R', 'G']),\n ConditionalField(\n ShortField(\"reserved0\", 0),\n lambda pkt: pkt.flags.NextProtocol,\n ),\n ConditionalField(\n ByteEnumField('NextProtocol', 0,\n {0: 'NotDefined',\n 1: 'IPv4',\n 2: 'IPv6',\n 3: 'Ethernet',\n 4: 'NSH'}),\n lambda pkt: pkt.flags.NextProtocol,\n ),\n ConditionalField(\n ThreeBytesField(\"reserved1\", 0),\n lambda pkt: (not pkt.flags.G) and (not pkt.flags.NextProtocol),\n ),\n ConditionalField(\n FlagsField(\"gpflags\", 0, 8, _GP_FLAGS),\n lambda pkt: pkt.flags.G,\n ),\n ConditionalField(\n ShortField(\"gpid\", 0),\n lambda pkt: pkt.flags.G,\n ),\n X3BytesField(\"vni\", 0),\n XByteField(\"reserved2\", 0),\n ]\n\n # Use default linux implementation port\n overload_fields = {\n UDP: {'dport': 8472},\n }\n\n def mysummary(self):\n if self.flags.G:\n return self.sprintf(\"VXLAN (vni=%VXLAN.vni% gpid=%VXLAN.gpid%)\")\n else:\n return self.sprintf(\"VXLAN (vni=%VXLAN.vni%)\")\n\n\nbind_layers(UDP, VXLAN, dport=4789) # RFC standard vxlan port\nbind_layers(UDP, VXLAN, dport=4790) # RFC standard vxlan-gpe port\nbind_layers(UDP, VXLAN, dport=6633) # New IANA assigned port for use with NSH\nbind_layers(UDP, VXLAN, dport=8472) # Linux implementation port\nbind_layers(UDP, VXLAN, dport=48879) # Cisco ACI\nbind_layers(UDP, VXLAN, sport=4789)\nbind_layers(UDP, VXLAN, sport=4790)\nbind_layers(UDP, VXLAN, sport=6633)\nbind_layers(UDP, VXLAN, sport=8472)\n# By default, set both ports to the RFC standard\nbind_layers(UDP, VXLAN, sport=4789, dport=4789)\n\nbind_layers(VXLAN, Ether)\nbind_layers(VXLAN, IP, NextProtocol=1)\nbind_layers(VXLAN, IPv6, NextProtocol=2)\nbind_layers(VXLAN, Ether, flags=4, NextProtocol=0)\nbind_layers(VXLAN, IP, flags=4, NextProtocol=1)\nbind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)\nbind_layers(VXLAN, Ether, flags=4, NextProtocol=3)\n", "path": "scapy/layers/vxlan.py"}], "after_files": [{"content": "# This file is part of Scapy\n# See http://www.secdev.org/projects/scapy for more information\n# Copyright (C) Philippe Biondi <[email protected]>\n# This program is published under a GPLv2 license\n\n\"\"\"\nVirtual eXtensible Local Area Network (VXLAN)\n- RFC 7348 -\n\nA Framework for Overlaying Virtualized Layer 2 Networks over Layer 3 Networks\nhttp://tools.ietf.org/html/rfc7348\nhttps://www.ietf.org/id/draft-ietf-nvo3-vxlan-gpe-02.txt\n\nVXLAN Group Policy Option:\nhttp://tools.ietf.org/html/draft-smith-vxlan-group-policy-00\n\"\"\"\n\nfrom scapy.packet import Packet, bind_layers\nfrom scapy.layers.l2 import Ether\nfrom scapy.layers.inet import IP, UDP\nfrom scapy.layers.inet6 import IPv6\nfrom scapy.fields import FlagsField, XByteField, ThreeBytesField, \\\n ConditionalField, ShortField, ByteEnumField, X3BytesField\n\n_GP_FLAGS = [\"R\", \"R\", \"R\", \"A\", \"R\", \"R\", \"D\", \"R\"]\n\n\nclass VXLAN(Packet):\n name = \"VXLAN\"\n\n fields_desc = [\n FlagsField(\"flags\", 0x8, 8,\n ['OAM', 'R', 'NextProtocol', 'Instance',\n 'V1', 'V2', 'R', 'G']),\n ConditionalField(\n ShortField(\"reserved0\", 0),\n lambda pkt: pkt.flags.NextProtocol,\n ),\n ConditionalField(\n ByteEnumField('NextProtocol', 0,\n {0: 'NotDefined',\n 1: 'IPv4',\n 2: 'IPv6',\n 3: 'Ethernet',\n 4: 'NSH'}),\n lambda pkt: pkt.flags.NextProtocol,\n ),\n ConditionalField(\n ThreeBytesField(\"reserved1\", 0),\n lambda pkt: (not pkt.flags.G) and (not pkt.flags.NextProtocol),\n ),\n ConditionalField(\n FlagsField(\"gpflags\", 0, 8, _GP_FLAGS),\n lambda pkt: pkt.flags.G,\n ),\n ConditionalField(\n ShortField(\"gpid\", 0),\n lambda pkt: pkt.flags.G,\n ),\n X3BytesField(\"vni\", 0),\n XByteField(\"reserved2\", 0),\n ]\n\n # Use default linux implementation port\n overload_fields = {\n UDP: {'dport': 8472},\n }\n\n def mysummary(self):\n if self.flags.G:\n return self.sprintf(\"VXLAN (vni=%VXLAN.vni% gpid=%VXLAN.gpid%)\")\n else:\n return self.sprintf(\"VXLAN (vni=%VXLAN.vni%)\")\n\n\nbind_layers(UDP, VXLAN, dport=4789) # RFC standard vxlan port\nbind_layers(UDP, VXLAN, dport=4790) # RFC standard vxlan-gpe port\nbind_layers(UDP, VXLAN, dport=6633) # New IANA assigned port for use with NSH\nbind_layers(UDP, VXLAN, dport=8472) # Linux implementation port\nbind_layers(UDP, VXLAN, dport=48879) # Cisco ACI\nbind_layers(UDP, VXLAN, sport=4789)\nbind_layers(UDP, VXLAN, sport=4790)\nbind_layers(UDP, VXLAN, sport=6633)\nbind_layers(UDP, VXLAN, sport=8472)\n# By default, set both ports to the RFC standard\nbind_layers(UDP, VXLAN, sport=4789, dport=4789)\n\nbind_layers(VXLAN, Ether, NextProtocol=0)\nbind_layers(VXLAN, IP, NextProtocol=1)\nbind_layers(VXLAN, IPv6, NextProtocol=2)\nbind_layers(VXLAN, Ether, NextProtocol=3)\nbind_layers(VXLAN, Ether, flags=4, NextProtocol=0)\nbind_layers(VXLAN, IP, flags=4, NextProtocol=1)\nbind_layers(VXLAN, IPv6, flags=4, NextProtocol=2)\nbind_layers(VXLAN, Ether, flags=4, NextProtocol=3)\n", "path": "scapy/layers/vxlan.py"}]}
2,567
208
gh_patches_debug_25728
rasdani/github-patches
git_diff
ydataai__ydata-profiling-1000
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- High correlation warning printed multiple times I get the same warning "High correlation" with the same other column four times in the report. Looks like a bug where the warning is accidentally generated multiple times or not de-duplicated properly. Is it easy to spot the issue or reproduce? Or should I try to extract a standalone test case? This is with pandas 1.3.0 and pandas-profiling 3.0.0. <img width="572" alt="Screenshot 2021-09-05 at 18 54 44" src="https://user-images.githubusercontent.com/852409/132135015-45c0a273-763a-430e-b12f-d340e79b3ea7.png"> --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pandas_profiling/model/alerts.py` Content: ``` 1 """Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant 2 values, high correlations).""" 3 from enum import Enum, auto, unique 4 from typing import Any, Dict, List, Optional, Set 5 6 import numpy as np 7 import pandas as pd 8 9 from pandas_profiling.config import Settings 10 from pandas_profiling.model.correlations import perform_check_correlation 11 12 13 @unique 14 class AlertType(Enum): 15 """Alert types""" 16 17 CONSTANT = auto() 18 """This variable has a constant value.""" 19 20 ZEROS = auto() 21 """This variable contains zeros.""" 22 23 HIGH_CORRELATION = auto() 24 """This variable is highly correlated.""" 25 26 HIGH_CARDINALITY = auto() 27 """This variable has a high cardinality.""" 28 29 UNSUPPORTED = auto() 30 """This variable is unsupported.""" 31 32 DUPLICATES = auto() 33 """This variable contains duplicates.""" 34 35 SKEWED = auto() 36 """This variable is highly skewed.""" 37 38 MISSING = auto() 39 """This variable contains missing values.""" 40 41 INFINITE = auto() 42 """This variable contains infinite values.""" 43 44 TYPE_DATE = auto() 45 """This variable is likely a datetime, but treated as categorical.""" 46 47 UNIQUE = auto() 48 """This variable has unique values.""" 49 50 CONSTANT_LENGTH = auto() 51 """This variable has a constant length""" 52 53 REJECTED = auto() 54 """Variables are rejected if we do not want to consider them for further analysis.""" 55 56 UNIFORM = auto() 57 """The variable is uniformly distributed""" 58 59 EMPTY = auto() 60 """The DataFrame is empty""" 61 62 63 class Alert: 64 """An alert object (type, values, column).""" 65 66 _anchor_id: Optional[str] = None 67 68 def __init__( 69 self, 70 alert_type: AlertType, 71 values: Optional[Dict] = None, 72 column_name: Optional[str] = None, 73 fields: Optional[Set] = None, 74 ): 75 if values is None: 76 values = {} 77 if fields is None: 78 fields = set() 79 80 self.fields = fields 81 self.alert_type = alert_type 82 self.values = values 83 self.column_name = column_name 84 85 @property 86 def anchor_id(self) -> Optional[str]: 87 if self._anchor_id is None: 88 self._anchor_id = str(hash(self.column_name)) 89 return self._anchor_id 90 91 def fmt(self) -> str: 92 # TODO: render in template 93 name = self.alert_type.name.replace("_", " ") 94 if name == "HIGH CORRELATION": 95 num = len(self.values["fields"]) 96 title = ", ".join(self.values["fields"]) 97 name = f'<abbr title="This variable has a high correlation with {num} fields: {title}">HIGH CORRELATION</abbr>' 98 return name 99 100 def __repr__(self): 101 alert_type = self.alert_type.name 102 column = self.column_name 103 return f"[{alert_type}] alert on column {column}" 104 105 106 def check_table_alerts(table: dict) -> List[Alert]: 107 """Checks the overall dataset for alerts. 108 109 Args: 110 table: Overall dataset statistics. 111 112 Returns: 113 A list of alerts. 114 """ 115 alerts = [] 116 if alert_value(table.get("n_duplicates", np.nan)): 117 alerts.append( 118 Alert( 119 alert_type=AlertType.DUPLICATES, 120 values=table, 121 fields={"n_duplicates"}, 122 ) 123 ) 124 if table["n"] == 0: 125 alerts.append( 126 Alert( 127 alert_type=AlertType.EMPTY, 128 values=table, 129 fields={"n"}, 130 ) 131 ) 132 return alerts 133 134 135 def numeric_alerts(config: Settings, summary: dict) -> List[Alert]: 136 alerts = [] 137 138 # Skewness 139 if skewness_alert(summary["skewness"], config.vars.num.skewness_threshold): 140 alerts.append( 141 Alert( 142 alert_type=AlertType.SKEWED, 143 fields={"skewness"}, 144 ) 145 ) 146 147 # Infinite values 148 if alert_value(summary["p_infinite"]): 149 alerts.append( 150 Alert( 151 alert_type=AlertType.INFINITE, 152 fields={"p_infinite", "n_infinite"}, 153 ) 154 ) 155 156 # Zeros 157 if alert_value(summary["p_zeros"]): 158 alerts.append( 159 Alert( 160 alert_type=AlertType.ZEROS, 161 fields={"n_zeros", "p_zeros"}, 162 ) 163 ) 164 165 if ( 166 "chi_squared" in summary 167 and summary["chi_squared"]["pvalue"] > config.vars.num.chi_squared_threshold 168 ): 169 alerts.append(Alert(alert_type=AlertType.UNIFORM)) 170 171 return alerts 172 173 174 def categorical_alerts(config: Settings, summary: dict) -> List[Alert]: 175 alerts = [] 176 177 # High cardinality 178 if summary.get("n_distinct", np.nan) > config.vars.cat.cardinality_threshold: 179 alerts.append( 180 Alert( 181 alert_type=AlertType.HIGH_CARDINALITY, 182 fields={"n_distinct"}, 183 ) 184 ) 185 186 if ( 187 "chi_squared" in summary 188 and summary["chi_squared"]["pvalue"] > config.vars.cat.chi_squared_threshold 189 ): 190 alerts.append(Alert(alert_type=AlertType.UNIFORM)) 191 192 if summary.get("date_warning"): 193 alerts.append(Alert(alert_type=AlertType.TYPE_DATE)) 194 195 # Constant length 196 if "composition" in summary and summary["min_length"] == summary["max_length"]: 197 alerts.append( 198 Alert( 199 alert_type=AlertType.CONSTANT_LENGTH, 200 fields={"composition_min_length", "composition_max_length"}, 201 ) 202 ) 203 204 return alerts 205 206 207 def generic_alerts(summary: dict) -> List[Alert]: 208 alerts = [] 209 210 # Missing 211 if alert_value(summary["p_missing"]): 212 alerts.append( 213 Alert( 214 alert_type=AlertType.MISSING, 215 fields={"p_missing", "n_missing"}, 216 ) 217 ) 218 219 return alerts 220 221 222 def supported_alerts(summary: dict) -> List[Alert]: 223 alerts = [] 224 225 if summary.get("n_distinct", np.nan) == summary["n"]: 226 alerts.append( 227 Alert( 228 alert_type=AlertType.UNIQUE, 229 fields={"n_distinct", "p_distinct", "n_unique", "p_unique"}, 230 ) 231 ) 232 if summary.get("n_distinct", np.nan) == 1: 233 summary["mode"] = summary["value_counts_without_nan"].index[0] 234 alerts.append( 235 Alert( 236 alert_type=AlertType.CONSTANT, 237 fields={"n_distinct"}, 238 ) 239 ) 240 alerts.append( 241 Alert( 242 alert_type=AlertType.REJECTED, 243 fields=set(), 244 ) 245 ) 246 return alerts 247 248 249 def unsupported_alerts(summary: Dict[str, Any]) -> List[Alert]: 250 alerts = [ 251 Alert( 252 alert_type=AlertType.UNSUPPORTED, 253 fields=set(), 254 ), 255 Alert( 256 alert_type=AlertType.REJECTED, 257 fields=set(), 258 ), 259 ] 260 return alerts 261 262 263 def check_variable_alerts(config: Settings, col: str, description: dict) -> List[Alert]: 264 """Checks individual variables for alerts. 265 266 Args: 267 col: The column name that is checked. 268 description: The series description. 269 270 Returns: 271 A list of alerts. 272 """ 273 alerts = [] 274 275 alerts += generic_alerts(description) 276 277 if description["type"] == "Unsupported": 278 alerts += unsupported_alerts(description) 279 else: 280 alerts += supported_alerts(description) 281 282 if description["type"] == "Categorical": 283 alerts += categorical_alerts(config, description) 284 if description["type"] == "Numeric": 285 alerts += numeric_alerts(config, description) 286 287 for idx in range(len(alerts)): 288 alerts[idx].column_name = col 289 alerts[idx].values = description 290 return alerts 291 292 293 def check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]: 294 alerts = [] 295 296 for corr, matrix in correlations.items(): 297 if config.correlations[corr].warn_high_correlations: 298 threshold = config.correlations[corr].threshold 299 correlated_mapping = perform_check_correlation(matrix, threshold) 300 if len(correlated_mapping) > 0: 301 for k, v in correlated_mapping.items(): 302 alerts.append( 303 Alert( 304 column_name=k, 305 alert_type=AlertType.HIGH_CORRELATION, 306 values={"corr": corr, "fields": v}, 307 ) 308 ) 309 return alerts 310 311 312 def get_alerts( 313 config: Settings, table_stats: dict, series_description: dict, correlations: dict 314 ) -> List[Alert]: 315 alerts = check_table_alerts(table_stats) 316 for col, description in series_description.items(): 317 alerts += check_variable_alerts(config, col, description) 318 alerts += check_correlation_alerts(config, correlations) 319 alerts.sort(key=lambda alert: str(alert.alert_type)) 320 return alerts 321 322 323 def alert_value(value: float) -> bool: 324 return not np.isnan(value) and value > 0.01 325 326 327 def skewness_alert(v: float, threshold: int) -> bool: 328 return not np.isnan(v) and (v < (-1 * threshold) or v > threshold) 329 330 331 def type_date_alert(series: pd.Series) -> bool: 332 from dateutil.parser import ParserError, parse 333 334 try: 335 series.apply(parse) 336 except ParserError: 337 return False 338 else: 339 return True 340 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pandas_profiling/model/alerts.py b/src/pandas_profiling/model/alerts.py --- a/src/pandas_profiling/model/alerts.py +++ b/src/pandas_profiling/model/alerts.py @@ -293,19 +293,24 @@ def check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]: alerts = [] + correlations_consolidated = {} for corr, matrix in correlations.items(): if config.correlations[corr].warn_high_correlations: threshold = config.correlations[corr].threshold correlated_mapping = perform_check_correlation(matrix, threshold) - if len(correlated_mapping) > 0: - for k, v in correlated_mapping.items(): - alerts.append( - Alert( - column_name=k, - alert_type=AlertType.HIGH_CORRELATION, - values={"corr": corr, "fields": v}, - ) - ) + for col, fields in correlated_mapping.items(): + set(fields).update(set(correlated_mapping.get(col, []))) + correlations_consolidated[col] = fields + + if len(correlations_consolidated) > 0: + for col, fields in correlations_consolidated.items(): + alerts.append( + Alert( + column_name=col, + alert_type=AlertType.HIGH_CORRELATION, + values={"corr": 'Overall', "fields": fields}, + ) + ) return alerts
{"golden_diff": "diff --git a/src/pandas_profiling/model/alerts.py b/src/pandas_profiling/model/alerts.py\n--- a/src/pandas_profiling/model/alerts.py\n+++ b/src/pandas_profiling/model/alerts.py\n@@ -293,19 +293,24 @@\n def check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:\n alerts = []\n \n+ correlations_consolidated = {}\n for corr, matrix in correlations.items():\n if config.correlations[corr].warn_high_correlations:\n threshold = config.correlations[corr].threshold\n correlated_mapping = perform_check_correlation(matrix, threshold)\n- if len(correlated_mapping) > 0:\n- for k, v in correlated_mapping.items():\n- alerts.append(\n- Alert(\n- column_name=k,\n- alert_type=AlertType.HIGH_CORRELATION,\n- values={\"corr\": corr, \"fields\": v},\n- )\n- )\n+ for col, fields in correlated_mapping.items():\n+ set(fields).update(set(correlated_mapping.get(col, [])))\n+ correlations_consolidated[col] = fields\n+\n+ if len(correlations_consolidated) > 0:\n+ for col, fields in correlations_consolidated.items():\n+ alerts.append(\n+ Alert(\n+ column_name=col,\n+ alert_type=AlertType.HIGH_CORRELATION,\n+ values={\"corr\": 'Overall', \"fields\": fields},\n+ )\n+ )\n return alerts\n", "issue": "High correlation warning printed multiple times\nI get the same warning \"High correlation\" with the same other column four times in the report.\r\n\r\nLooks like a bug where the warning is accidentally generated multiple times or not de-duplicated properly.\r\n\r\nIs it easy to spot the issue or reproduce? Or should I try to extract a standalone test case?\r\n\r\nThis is with pandas 1.3.0 and pandas-profiling 3.0.0.\r\n\r\n<img width=\"572\" alt=\"Screenshot 2021-09-05 at 18 54 44\" src=\"https://user-images.githubusercontent.com/852409/132135015-45c0a273-763a-430e-b12f-d340e79b3ea7.png\">\r\n\n", "before_files": [{"content": "\"\"\"Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant\nvalues, high correlations).\"\"\"\nfrom enum import Enum, auto, unique\nfrom typing import Any, Dict, List, Optional, Set\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.correlations import perform_check_correlation\n\n\n@unique\nclass AlertType(Enum):\n \"\"\"Alert types\"\"\"\n\n CONSTANT = auto()\n \"\"\"This variable has a constant value.\"\"\"\n\n ZEROS = auto()\n \"\"\"This variable contains zeros.\"\"\"\n\n HIGH_CORRELATION = auto()\n \"\"\"This variable is highly correlated.\"\"\"\n\n HIGH_CARDINALITY = auto()\n \"\"\"This variable has a high cardinality.\"\"\"\n\n UNSUPPORTED = auto()\n \"\"\"This variable is unsupported.\"\"\"\n\n DUPLICATES = auto()\n \"\"\"This variable contains duplicates.\"\"\"\n\n SKEWED = auto()\n \"\"\"This variable is highly skewed.\"\"\"\n\n MISSING = auto()\n \"\"\"This variable contains missing values.\"\"\"\n\n INFINITE = auto()\n \"\"\"This variable contains infinite values.\"\"\"\n\n TYPE_DATE = auto()\n \"\"\"This variable is likely a datetime, but treated as categorical.\"\"\"\n\n UNIQUE = auto()\n \"\"\"This variable has unique values.\"\"\"\n\n CONSTANT_LENGTH = auto()\n \"\"\"This variable has a constant length\"\"\"\n\n REJECTED = auto()\n \"\"\"Variables are rejected if we do not want to consider them for further analysis.\"\"\"\n\n UNIFORM = auto()\n \"\"\"The variable is uniformly distributed\"\"\"\n\n EMPTY = auto()\n \"\"\"The DataFrame is empty\"\"\"\n\n\nclass Alert:\n \"\"\"An alert object (type, values, column).\"\"\"\n\n _anchor_id: Optional[str] = None\n\n def __init__(\n self,\n alert_type: AlertType,\n values: Optional[Dict] = None,\n column_name: Optional[str] = None,\n fields: Optional[Set] = None,\n ):\n if values is None:\n values = {}\n if fields is None:\n fields = set()\n\n self.fields = fields\n self.alert_type = alert_type\n self.values = values\n self.column_name = column_name\n\n @property\n def anchor_id(self) -> Optional[str]:\n if self._anchor_id is None:\n self._anchor_id = str(hash(self.column_name))\n return self._anchor_id\n\n def fmt(self) -> str:\n # TODO: render in template\n name = self.alert_type.name.replace(\"_\", \" \")\n if name == \"HIGH CORRELATION\":\n num = len(self.values[\"fields\"])\n title = \", \".join(self.values[\"fields\"])\n name = f'<abbr title=\"This variable has a high correlation with {num} fields: {title}\">HIGH CORRELATION</abbr>'\n return name\n\n def __repr__(self):\n alert_type = self.alert_type.name\n column = self.column_name\n return f\"[{alert_type}] alert on column {column}\"\n\n\ndef check_table_alerts(table: dict) -> List[Alert]:\n \"\"\"Checks the overall dataset for alerts.\n\n Args:\n table: Overall dataset statistics.\n\n Returns:\n A list of alerts.\n \"\"\"\n alerts = []\n if alert_value(table.get(\"n_duplicates\", np.nan)):\n alerts.append(\n Alert(\n alert_type=AlertType.DUPLICATES,\n values=table,\n fields={\"n_duplicates\"},\n )\n )\n if table[\"n\"] == 0:\n alerts.append(\n Alert(\n alert_type=AlertType.EMPTY,\n values=table,\n fields={\"n\"},\n )\n )\n return alerts\n\n\ndef numeric_alerts(config: Settings, summary: dict) -> List[Alert]:\n alerts = []\n\n # Skewness\n if skewness_alert(summary[\"skewness\"], config.vars.num.skewness_threshold):\n alerts.append(\n Alert(\n alert_type=AlertType.SKEWED,\n fields={\"skewness\"},\n )\n )\n\n # Infinite values\n if alert_value(summary[\"p_infinite\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.INFINITE,\n fields={\"p_infinite\", \"n_infinite\"},\n )\n )\n\n # Zeros\n if alert_value(summary[\"p_zeros\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.ZEROS,\n fields={\"n_zeros\", \"p_zeros\"},\n )\n )\n\n if (\n \"chi_squared\" in summary\n and summary[\"chi_squared\"][\"pvalue\"] > config.vars.num.chi_squared_threshold\n ):\n alerts.append(Alert(alert_type=AlertType.UNIFORM))\n\n return alerts\n\n\ndef categorical_alerts(config: Settings, summary: dict) -> List[Alert]:\n alerts = []\n\n # High cardinality\n if summary.get(\"n_distinct\", np.nan) > config.vars.cat.cardinality_threshold:\n alerts.append(\n Alert(\n alert_type=AlertType.HIGH_CARDINALITY,\n fields={\"n_distinct\"},\n )\n )\n\n if (\n \"chi_squared\" in summary\n and summary[\"chi_squared\"][\"pvalue\"] > config.vars.cat.chi_squared_threshold\n ):\n alerts.append(Alert(alert_type=AlertType.UNIFORM))\n\n if summary.get(\"date_warning\"):\n alerts.append(Alert(alert_type=AlertType.TYPE_DATE))\n\n # Constant length\n if \"composition\" in summary and summary[\"min_length\"] == summary[\"max_length\"]:\n alerts.append(\n Alert(\n alert_type=AlertType.CONSTANT_LENGTH,\n fields={\"composition_min_length\", \"composition_max_length\"},\n )\n )\n\n return alerts\n\n\ndef generic_alerts(summary: dict) -> List[Alert]:\n alerts = []\n\n # Missing\n if alert_value(summary[\"p_missing\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.MISSING,\n fields={\"p_missing\", \"n_missing\"},\n )\n )\n\n return alerts\n\n\ndef supported_alerts(summary: dict) -> List[Alert]:\n alerts = []\n\n if summary.get(\"n_distinct\", np.nan) == summary[\"n\"]:\n alerts.append(\n Alert(\n alert_type=AlertType.UNIQUE,\n fields={\"n_distinct\", \"p_distinct\", \"n_unique\", \"p_unique\"},\n )\n )\n if summary.get(\"n_distinct\", np.nan) == 1:\n summary[\"mode\"] = summary[\"value_counts_without_nan\"].index[0]\n alerts.append(\n Alert(\n alert_type=AlertType.CONSTANT,\n fields={\"n_distinct\"},\n )\n )\n alerts.append(\n Alert(\n alert_type=AlertType.REJECTED,\n fields=set(),\n )\n )\n return alerts\n\n\ndef unsupported_alerts(summary: Dict[str, Any]) -> List[Alert]:\n alerts = [\n Alert(\n alert_type=AlertType.UNSUPPORTED,\n fields=set(),\n ),\n Alert(\n alert_type=AlertType.REJECTED,\n fields=set(),\n ),\n ]\n return alerts\n\n\ndef check_variable_alerts(config: Settings, col: str, description: dict) -> List[Alert]:\n \"\"\"Checks individual variables for alerts.\n\n Args:\n col: The column name that is checked.\n description: The series description.\n\n Returns:\n A list of alerts.\n \"\"\"\n alerts = []\n\n alerts += generic_alerts(description)\n\n if description[\"type\"] == \"Unsupported\":\n alerts += unsupported_alerts(description)\n else:\n alerts += supported_alerts(description)\n\n if description[\"type\"] == \"Categorical\":\n alerts += categorical_alerts(config, description)\n if description[\"type\"] == \"Numeric\":\n alerts += numeric_alerts(config, description)\n\n for idx in range(len(alerts)):\n alerts[idx].column_name = col\n alerts[idx].values = description\n return alerts\n\n\ndef check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:\n alerts = []\n\n for corr, matrix in correlations.items():\n if config.correlations[corr].warn_high_correlations:\n threshold = config.correlations[corr].threshold\n correlated_mapping = perform_check_correlation(matrix, threshold)\n if len(correlated_mapping) > 0:\n for k, v in correlated_mapping.items():\n alerts.append(\n Alert(\n column_name=k,\n alert_type=AlertType.HIGH_CORRELATION,\n values={\"corr\": corr, \"fields\": v},\n )\n )\n return alerts\n\n\ndef get_alerts(\n config: Settings, table_stats: dict, series_description: dict, correlations: dict\n) -> List[Alert]:\n alerts = check_table_alerts(table_stats)\n for col, description in series_description.items():\n alerts += check_variable_alerts(config, col, description)\n alerts += check_correlation_alerts(config, correlations)\n alerts.sort(key=lambda alert: str(alert.alert_type))\n return alerts\n\n\ndef alert_value(value: float) -> bool:\n return not np.isnan(value) and value > 0.01\n\n\ndef skewness_alert(v: float, threshold: int) -> bool:\n return not np.isnan(v) and (v < (-1 * threshold) or v > threshold)\n\n\ndef type_date_alert(series: pd.Series) -> bool:\n from dateutil.parser import ParserError, parse\n\n try:\n series.apply(parse)\n except ParserError:\n return False\n else:\n return True\n", "path": "src/pandas_profiling/model/alerts.py"}], "after_files": [{"content": "\"\"\"Logic for alerting the user on possibly problematic patterns in the data (e.g. high number of zeros , constant\nvalues, high correlations).\"\"\"\nfrom enum import Enum, auto, unique\nfrom typing import Any, Dict, List, Optional, Set\n\nimport numpy as np\nimport pandas as pd\n\nfrom pandas_profiling.config import Settings\nfrom pandas_profiling.model.correlations import perform_check_correlation\n\n\n@unique\nclass AlertType(Enum):\n \"\"\"Alert types\"\"\"\n\n CONSTANT = auto()\n \"\"\"This variable has a constant value.\"\"\"\n\n ZEROS = auto()\n \"\"\"This variable contains zeros.\"\"\"\n\n HIGH_CORRELATION = auto()\n \"\"\"This variable is highly correlated.\"\"\"\n\n HIGH_CARDINALITY = auto()\n \"\"\"This variable has a high cardinality.\"\"\"\n\n UNSUPPORTED = auto()\n \"\"\"This variable is unsupported.\"\"\"\n\n DUPLICATES = auto()\n \"\"\"This variable contains duplicates.\"\"\"\n\n SKEWED = auto()\n \"\"\"This variable is highly skewed.\"\"\"\n\n MISSING = auto()\n \"\"\"This variable contains missing values.\"\"\"\n\n INFINITE = auto()\n \"\"\"This variable contains infinite values.\"\"\"\n\n TYPE_DATE = auto()\n \"\"\"This variable is likely a datetime, but treated as categorical.\"\"\"\n\n UNIQUE = auto()\n \"\"\"This variable has unique values.\"\"\"\n\n CONSTANT_LENGTH = auto()\n \"\"\"This variable has a constant length\"\"\"\n\n REJECTED = auto()\n \"\"\"Variables are rejected if we do not want to consider them for further analysis.\"\"\"\n\n UNIFORM = auto()\n \"\"\"The variable is uniformly distributed\"\"\"\n\n EMPTY = auto()\n \"\"\"The DataFrame is empty\"\"\"\n\n\nclass Alert:\n \"\"\"An alert object (type, values, column).\"\"\"\n\n _anchor_id: Optional[str] = None\n\n def __init__(\n self,\n alert_type: AlertType,\n values: Optional[Dict] = None,\n column_name: Optional[str] = None,\n fields: Optional[Set] = None,\n ):\n if values is None:\n values = {}\n if fields is None:\n fields = set()\n\n self.fields = fields\n self.alert_type = alert_type\n self.values = values\n self.column_name = column_name\n\n @property\n def anchor_id(self) -> Optional[str]:\n if self._anchor_id is None:\n self._anchor_id = str(hash(self.column_name))\n return self._anchor_id\n\n def fmt(self) -> str:\n # TODO: render in template\n name = self.alert_type.name.replace(\"_\", \" \")\n if name == \"HIGH CORRELATION\":\n num = len(self.values[\"fields\"])\n title = \", \".join(self.values[\"fields\"])\n name = f'<abbr title=\"This variable has a high correlation with {num} fields: {title}\">HIGH CORRELATION</abbr>'\n return name\n\n def __repr__(self):\n alert_type = self.alert_type.name\n column = self.column_name\n return f\"[{alert_type}] alert on column {column}\"\n\n\ndef check_table_alerts(table: dict) -> List[Alert]:\n \"\"\"Checks the overall dataset for alerts.\n\n Args:\n table: Overall dataset statistics.\n\n Returns:\n A list of alerts.\n \"\"\"\n alerts = []\n if alert_value(table.get(\"n_duplicates\", np.nan)):\n alerts.append(\n Alert(\n alert_type=AlertType.DUPLICATES,\n values=table,\n fields={\"n_duplicates\"},\n )\n )\n if table[\"n\"] == 0:\n alerts.append(\n Alert(\n alert_type=AlertType.EMPTY,\n values=table,\n fields={\"n\"},\n )\n )\n return alerts\n\n\ndef numeric_alerts(config: Settings, summary: dict) -> List[Alert]:\n alerts = []\n\n # Skewness\n if skewness_alert(summary[\"skewness\"], config.vars.num.skewness_threshold):\n alerts.append(\n Alert(\n alert_type=AlertType.SKEWED,\n fields={\"skewness\"},\n )\n )\n\n # Infinite values\n if alert_value(summary[\"p_infinite\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.INFINITE,\n fields={\"p_infinite\", \"n_infinite\"},\n )\n )\n\n # Zeros\n if alert_value(summary[\"p_zeros\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.ZEROS,\n fields={\"n_zeros\", \"p_zeros\"},\n )\n )\n\n if (\n \"chi_squared\" in summary\n and summary[\"chi_squared\"][\"pvalue\"] > config.vars.num.chi_squared_threshold\n ):\n alerts.append(Alert(alert_type=AlertType.UNIFORM))\n\n return alerts\n\n\ndef categorical_alerts(config: Settings, summary: dict) -> List[Alert]:\n alerts = []\n\n # High cardinality\n if summary.get(\"n_distinct\", np.nan) > config.vars.cat.cardinality_threshold:\n alerts.append(\n Alert(\n alert_type=AlertType.HIGH_CARDINALITY,\n fields={\"n_distinct\"},\n )\n )\n\n if (\n \"chi_squared\" in summary\n and summary[\"chi_squared\"][\"pvalue\"] > config.vars.cat.chi_squared_threshold\n ):\n alerts.append(Alert(alert_type=AlertType.UNIFORM))\n\n if summary.get(\"date_warning\"):\n alerts.append(Alert(alert_type=AlertType.TYPE_DATE))\n\n # Constant length\n if \"composition\" in summary and summary[\"min_length\"] == summary[\"max_length\"]:\n alerts.append(\n Alert(\n alert_type=AlertType.CONSTANT_LENGTH,\n fields={\"composition_min_length\", \"composition_max_length\"},\n )\n )\n\n return alerts\n\n\ndef generic_alerts(summary: dict) -> List[Alert]:\n alerts = []\n\n # Missing\n if alert_value(summary[\"p_missing\"]):\n alerts.append(\n Alert(\n alert_type=AlertType.MISSING,\n fields={\"p_missing\", \"n_missing\"},\n )\n )\n\n return alerts\n\n\ndef supported_alerts(summary: dict) -> List[Alert]:\n alerts = []\n\n if summary.get(\"n_distinct\", np.nan) == summary[\"n\"]:\n alerts.append(\n Alert(\n alert_type=AlertType.UNIQUE,\n fields={\"n_distinct\", \"p_distinct\", \"n_unique\", \"p_unique\"},\n )\n )\n if summary.get(\"n_distinct\", np.nan) == 1:\n summary[\"mode\"] = summary[\"value_counts_without_nan\"].index[0]\n alerts.append(\n Alert(\n alert_type=AlertType.CONSTANT,\n fields={\"n_distinct\"},\n )\n )\n alerts.append(\n Alert(\n alert_type=AlertType.REJECTED,\n fields=set(),\n )\n )\n return alerts\n\n\ndef unsupported_alerts(summary: Dict[str, Any]) -> List[Alert]:\n alerts = [\n Alert(\n alert_type=AlertType.UNSUPPORTED,\n fields=set(),\n ),\n Alert(\n alert_type=AlertType.REJECTED,\n fields=set(),\n ),\n ]\n return alerts\n\n\ndef check_variable_alerts(config: Settings, col: str, description: dict) -> List[Alert]:\n \"\"\"Checks individual variables for alerts.\n\n Args:\n col: The column name that is checked.\n description: The series description.\n\n Returns:\n A list of alerts.\n \"\"\"\n alerts = []\n\n alerts += generic_alerts(description)\n\n if description[\"type\"] == \"Unsupported\":\n alerts += unsupported_alerts(description)\n else:\n alerts += supported_alerts(description)\n\n if description[\"type\"] == \"Categorical\":\n alerts += categorical_alerts(config, description)\n if description[\"type\"] == \"Numeric\":\n alerts += numeric_alerts(config, description)\n\n for idx in range(len(alerts)):\n alerts[idx].column_name = col\n alerts[idx].values = description\n return alerts\n\n\ndef check_correlation_alerts(config: Settings, correlations: dict) -> List[Alert]:\n alerts = []\n\n correlations_consolidated = {}\n for corr, matrix in correlations.items():\n if config.correlations[corr].warn_high_correlations:\n threshold = config.correlations[corr].threshold\n correlated_mapping = perform_check_correlation(matrix, threshold)\n for col, fields in correlated_mapping.items():\n set(fields).update(set(correlated_mapping.get(col, [])))\n correlations_consolidated[col] = fields\n\n if len(correlations_consolidated) > 0:\n for col, fields in correlations_consolidated.items():\n alerts.append(\n Alert(\n column_name=col,\n alert_type=AlertType.HIGH_CORRELATION,\n values={\"corr\": 'Overall', \"fields\": fields},\n )\n )\n return alerts\n\n\ndef get_alerts(\n config: Settings, table_stats: dict, series_description: dict, correlations: dict\n) -> List[Alert]:\n alerts = check_table_alerts(table_stats)\n for col, description in series_description.items():\n alerts += check_variable_alerts(config, col, description)\n alerts += check_correlation_alerts(config, correlations)\n alerts.sort(key=lambda alert: str(alert.alert_type))\n return alerts\n\n\ndef alert_value(value: float) -> bool:\n return not np.isnan(value) and value > 0.01\n\n\ndef skewness_alert(v: float, threshold: int) -> bool:\n return not np.isnan(v) and (v < (-1 * threshold) or v > threshold)\n\n\ndef type_date_alert(series: pd.Series) -> bool:\n from dateutil.parser import ParserError, parse\n\n try:\n series.apply(parse)\n except ParserError:\n return False\n else:\n return True\n", "path": "src/pandas_profiling/model/alerts.py"}]}
3,412
329
gh_patches_debug_8720
rasdani/github-patches
git_diff
azavea__raster-vision-733
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Allow Map Tiles to be evaluated I am trying to make rastervision work for a tiled map, so I run it on each tile I have, for example: ![image](https://user-images.githubusercontent.com/5757359/55006587-a45ad780-4fe6-11e9-8f6c-86e2ffedc23d.png) When I do, I get the error: > rastervision_1 | Executing rastervision predict /app/packages/xView_Vehicle.zip /images/674915186.png output/xView_Vehicle_674915186.png.json > rastervision_1 | (node:7) UnhandledPromiseRejectionWarning: Error: Command failed: rastervision predict /app/packages/xView_Vehicle.zip /images/674915186.png output/xView_Vehicle_674915186.png.json > rastervision_1 | 2019-03-26 14:44:13.975033: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA > rastervision_1 | /usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`. > rastervision_1 | fromlist, level) > rastervision_1 | /usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned. > rastervision_1 | s = DatasetReader(path, driver=driver, **kwargs) > rastervision_1 | 2019-03-26 14:44:13:rastervision.task.task: INFO - Making predictions for scene > rastervision_1 | Traceback (most recent call last): > rastervision_1 | File "/usr/lib/python3.5/runpy.py", line 184, in _run_module_as_main > rastervision_1 | "__main__", mod_spec) > rastervision_1 | File "/usr/lib/python3.5/runpy.py", line 85, in _run_code > rastervision_1 | exec(code, run_globals) > rastervision_1 | File "/opt/src/rastervision/__main__.py", line 17, in <module> > rastervision_1 | rv.main() > rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 722, in __call__ > rastervision_1 | return self.main(*args, **kwargs) > rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 697, in main > rastervision_1 | rv = self.invoke(ctx) > rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 1066, in invoke > rastervision_1 | return _process_result(sub_ctx.command.invoke(sub_ctx)) > rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 895, in invoke > rastervision_1 | return ctx.invoke(self.callback, **ctx.params) > rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/click/core.py", line 535, in invoke > rastervision_1 | return callback(*args, **kwargs) > rastervision_1 | File "/opt/src/rastervision/cli/main.py", line 240, in predict > rastervision_1 | predictor.predict(image_uri, output_uri, export_config) > rastervision_1 | File "/opt/src/rastervision/predictor.py", line 142, in predict > rastervision_1 | labels = self.task.predict_scene(scene, self.tmp_dir) > rastervision_1 | File "/opt/src/rastervision/task/task.py", line 192, in predict_scene > rastervision_1 | predict_batch(batch_chips, batch_windows) > rastervision_1 | File "/opt/src/rastervision/task/task.py", line 173, in predict_batch > rastervision_1 | np.array(predict_chips), predict_windows, tmp_dir) > rastervision_1 | File "/opt/src/rastervision/backend/tf_object_detection.py", line 736, in predict > rastervision_1 | self.session) > rastervision_1 | File "/opt/src/rastervision/backend/tf_object_detection.py", line 573, in compute_prediction > rastervision_1 | [boxes, scores, class_ids], feed_dict={image_tensor: image_nps}) > rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 877, in run > rastervision_1 | run_metadata_ptr) > rastervision_1 | File "/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py", line 1076, in _run > rastervision_1 | str(subfeed_t.get_shape()))) > rastervision_1 | ValueError: Cannot feed value of shape (2, 300, 300, 1) for Tensor 'image_tensor:0', which has shape '(?, ?, ?, 3)' > rastervision_1 | > rastervision_1 | at ChildProcess.exithandler (child_process.js:294:12) > rastervision_1 | at ChildProcess.emit (events.js:189:13) > rastervision_1 | at maybeClose (internal/child_process.js:970:16) > rastervision_1 | at Process.ChildProcess._handle.onexit (internal/child_process.js:259:5) > rastervision_1 | (node:7) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1) > rastervision_1 | (node:7) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code. I am successfull at running the same code on TIF files, and JPEG. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `rastervision/data/raster_source/raster_source_config.py` Content: ``` 1 from abc import abstractmethod 2 from copy import deepcopy 3 import logging 4 5 import rastervision as rv 6 from rastervision.core.config import (Config, ConfigBuilder, 7 BundledConfigMixin) 8 from rastervision.data import (RasterTransformerConfig, StatsTransformerConfig) 9 from rastervision.protos.raster_source_pb2 \ 10 import RasterSourceConfig as RasterSourceConfigMsg 11 12 log = logging.getLogger(__name__) 13 14 15 class RasterSourceConfig(BundledConfigMixin, Config): 16 deprecation_warnings = [] 17 18 def __init__(self, source_type, transformers=None, channel_order=None): 19 if transformers is None: 20 transformers = [] 21 22 self.source_type = source_type 23 self.transformers = transformers 24 self.channel_order = channel_order 25 26 def to_proto(self): 27 transformers = list(map(lambda c: c.to_proto(), self.transformers)) 28 msg = RasterSourceConfigMsg( 29 source_type=self.source_type, 30 channel_order=self.channel_order, 31 transformers=transformers) 32 return msg 33 34 def save_bundle_files(self, bundle_dir): 35 new_transformers = [] 36 files = [] 37 for transformer in self.transformers: 38 new_transformer, t_files = transformer.save_bundle_files( 39 bundle_dir) 40 new_transformers.append(new_transformer) 41 files.extend(t_files) 42 43 new_config = self.to_builder() \ 44 .with_transformers(new_transformers) \ 45 .build() 46 return (new_config, files) 47 48 def load_bundle_files(self, bundle_dir): 49 new_transformers = [] 50 for transformer in self.transformers: 51 new_transformer = transformer.load_bundle_files(bundle_dir) 52 new_transformers.append(new_transformer) 53 return self.to_builder() \ 54 .with_transformers(new_transformers) \ 55 .build() 56 57 @abstractmethod 58 def create_source(self, tmp_dir, crs_transformer, extent, class_map): 59 """Create the Raster Source for this configuration. 60 """ 61 pass 62 63 def to_builder(self): 64 return rv._registry.get_config_builder(rv.RASTER_SOURCE, 65 self.source_type)(self) 66 67 @staticmethod 68 def check_deprecation(source_type): 69 # If source_type is deprecated and warning hasn't been shown yet, then warn. 70 if (source_type in rv.raster_source_deprecated_map and 71 source_type not in RasterSourceConfig.deprecation_warnings): 72 RasterSourceConfig.deprecation_warnings.append(source_type) 73 new_source_type = rv.raster_source_deprecated_map[source_type] 74 log.warn( 75 'RasterSource {} is deprecated. Please use {} instead.'.format( 76 source_type, new_source_type)) 77 78 def builder(source_type): 79 RasterSourceConfig.check_deprecation(source_type) 80 return rv._registry.get_config_builder(rv.RASTER_SOURCE, source_type)() 81 82 @staticmethod 83 def from_proto(msg): 84 """Creates a TaskConfig from the specificed protobuf message 85 """ 86 return rv._registry.get_config_builder(rv.RASTER_SOURCE, msg.source_type)() \ 87 .from_proto(msg) \ 88 .build() 89 90 @abstractmethod 91 def for_prediction(self, image_uri): 92 """Creates a new config with the image_uri.""" 93 pass 94 95 @abstractmethod 96 def create_local(self, tmp_dir): 97 """Returns a new config with a local copy of the image data 98 if this image is remote. 99 """ 100 pass 101 102 def create_transformers(self): 103 return list(map(lambda c: c.create_transformer(), self.transformers)) 104 105 def update_for_command(self, command_type, experiment_config, 106 context=None): 107 for transformer in self.transformers: 108 transformer.update_for_command(command_type, experiment_config, 109 context) 110 111 def report_io(self, command_type, io_def): 112 for transformer in self.transformers: 113 transformer.report_io(command_type, io_def) 114 115 116 class RasterSourceConfigBuilder(ConfigBuilder): 117 def from_proto(self, msg): 118 transformers = list( 119 map(lambda m: RasterTransformerConfig.from_proto(m), 120 msg.transformers)) 121 122 return self.with_channel_order(list(msg.channel_order)) \ 123 .with_transformers(transformers) 124 125 def with_channel_order(self, channel_order): 126 """Defines the channel order for this raster source. 127 128 This defines the subset of channel indices and their order to use when extracting 129 chips from raw imagery. 130 131 Args: 132 channel_order: list of channel indices 133 """ 134 b = deepcopy(self) 135 b.config['channel_order'] = channel_order 136 return b 137 138 def with_transformers(self, transformers): 139 """Transformers to be applied to the raster data. 140 141 Args: 142 transformers: A list of transformers to apply to the 143 raster data. 144 145 """ 146 b = deepcopy(self) 147 b.config['transformers'] = list(transformers) 148 return b 149 150 def with_transformer(self, transformer): 151 """A transformer to be applied to the raster data. 152 153 Args: 154 transformer: A transformer to apply to the raster 155 data. 156 157 """ 158 return self.with_transformers([transformer]) 159 160 def with_stats_transformer(self): 161 """Add a stats transformer to the raster source.""" 162 b = deepcopy(self) 163 transformers = b.config.get('transformers') 164 if transformers: 165 b.config['transformers'] = transformers.append( 166 StatsTransformerConfig()) 167 else: 168 b.config['transformers'] = [StatsTransformerConfig()] 169 return b 170 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/rastervision/data/raster_source/raster_source_config.py b/rastervision/data/raster_source/raster_source_config.py --- a/rastervision/data/raster_source/raster_source_config.py +++ b/rastervision/data/raster_source/raster_source_config.py @@ -119,7 +119,10 @@ map(lambda m: RasterTransformerConfig.from_proto(m), msg.transformers)) - return self.with_channel_order(list(msg.channel_order)) \ + channel_order = list(msg.channel_order) + if len(channel_order) == 0: + channel_order = None + return self.with_channel_order(channel_order) \ .with_transformers(transformers) def with_channel_order(self, channel_order):
{"golden_diff": "diff --git a/rastervision/data/raster_source/raster_source_config.py b/rastervision/data/raster_source/raster_source_config.py\n--- a/rastervision/data/raster_source/raster_source_config.py\n+++ b/rastervision/data/raster_source/raster_source_config.py\n@@ -119,7 +119,10 @@\n map(lambda m: RasterTransformerConfig.from_proto(m),\n msg.transformers))\n \n- return self.with_channel_order(list(msg.channel_order)) \\\n+ channel_order = list(msg.channel_order)\n+ if len(channel_order) == 0:\n+ channel_order = None\n+ return self.with_channel_order(channel_order) \\\n .with_transformers(transformers)\n \n def with_channel_order(self, channel_order):\n", "issue": "Allow Map Tiles to be evaluated\nI am trying to make rastervision work for a tiled map, so I run it on each tile I have, for example:\r\n![image](https://user-images.githubusercontent.com/5757359/55006587-a45ad780-4fe6-11e9-8f6c-86e2ffedc23d.png)\r\n\r\nWhen I do, I get the error:\r\n\r\n> rastervision_1 | Executing rastervision predict /app/packages/xView_Vehicle.zip /images/674915186.png output/xView_Vehicle_674915186.png.json\r\n> rastervision_1 | (node:7) UnhandledPromiseRejectionWarning: Error: Command failed: rastervision predict /app/packages/xView_Vehicle.zip /images/674915186.png output/xView_Vehicle_674915186.png.json\r\n> rastervision_1 | 2019-03-26 14:44:13.975033: I tensorflow/core/platform/cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA\r\n> rastervision_1 | /usr/local/lib/python3.5/dist-packages/pluginbase.py:439: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.\r\n> rastervision_1 | fromlist, level)\r\n> rastervision_1 | /usr/local/lib/python3.5/dist-packages/rasterio/__init__.py:217: NotGeoreferencedWarning: Dataset has no geotransform set. The identity matrix may be returned.\r\n> rastervision_1 | s = DatasetReader(path, driver=driver, **kwargs)\r\n> rastervision_1 | 2019-03-26 14:44:13:rastervision.task.task: INFO - Making predictions for scene\r\n> rastervision_1 | Traceback (most recent call last):\r\n> rastervision_1 | File \"/usr/lib/python3.5/runpy.py\", line 184, in _run_module_as_main\r\n> rastervision_1 | \"__main__\", mod_spec)\r\n> rastervision_1 | File \"/usr/lib/python3.5/runpy.py\", line 85, in _run_code\r\n> rastervision_1 | exec(code, run_globals)\r\n> rastervision_1 | File \"/opt/src/rastervision/__main__.py\", line 17, in <module>\r\n> rastervision_1 | rv.main()\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 722, in __call__\r\n> rastervision_1 | return self.main(*args, **kwargs)\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 697, in main\r\n> rastervision_1 | rv = self.invoke(ctx)\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 1066, in invoke\r\n> rastervision_1 | return _process_result(sub_ctx.command.invoke(sub_ctx))\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 895, in invoke\r\n> rastervision_1 | return ctx.invoke(self.callback, **ctx.params)\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/click/core.py\", line 535, in invoke\r\n> rastervision_1 | return callback(*args, **kwargs)\r\n> rastervision_1 | File \"/opt/src/rastervision/cli/main.py\", line 240, in predict\r\n> rastervision_1 | predictor.predict(image_uri, output_uri, export_config)\r\n> rastervision_1 | File \"/opt/src/rastervision/predictor.py\", line 142, in predict\r\n> rastervision_1 | labels = self.task.predict_scene(scene, self.tmp_dir)\r\n> rastervision_1 | File \"/opt/src/rastervision/task/task.py\", line 192, in predict_scene\r\n> rastervision_1 | predict_batch(batch_chips, batch_windows)\r\n> rastervision_1 | File \"/opt/src/rastervision/task/task.py\", line 173, in predict_batch\r\n> rastervision_1 | np.array(predict_chips), predict_windows, tmp_dir)\r\n> rastervision_1 | File \"/opt/src/rastervision/backend/tf_object_detection.py\", line 736, in predict\r\n> rastervision_1 | self.session)\r\n> rastervision_1 | File \"/opt/src/rastervision/backend/tf_object_detection.py\", line 573, in compute_prediction\r\n> rastervision_1 | [boxes, scores, class_ids], feed_dict={image_tensor: image_nps})\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py\", line 877, in run\r\n> rastervision_1 | run_metadata_ptr)\r\n> rastervision_1 | File \"/usr/local/lib/python3.5/dist-packages/tensorflow/python/client/session.py\", line 1076, in _run\r\n> rastervision_1 | str(subfeed_t.get_shape())))\r\n> rastervision_1 | ValueError: Cannot feed value of shape (2, 300, 300, 1) for Tensor 'image_tensor:0', which has shape '(?, ?, ?, 3)'\r\n> rastervision_1 | \r\n> rastervision_1 | at ChildProcess.exithandler (child_process.js:294:12)\r\n> rastervision_1 | at ChildProcess.emit (events.js:189:13)\r\n> rastervision_1 | at maybeClose (internal/child_process.js:970:16)\r\n> rastervision_1 | at Process.ChildProcess._handle.onexit (internal/child_process.js:259:5)\r\n> rastervision_1 | (node:7) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). (rejection id: 1)\r\n> rastervision_1 | (node:7) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.\r\n\r\nI am successfull at running the same code on TIF files, and JPEG.\n", "before_files": [{"content": "from abc import abstractmethod\nfrom copy import deepcopy\nimport logging\n\nimport rastervision as rv\nfrom rastervision.core.config import (Config, ConfigBuilder,\n BundledConfigMixin)\nfrom rastervision.data import (RasterTransformerConfig, StatsTransformerConfig)\nfrom rastervision.protos.raster_source_pb2 \\\n import RasterSourceConfig as RasterSourceConfigMsg\n\nlog = logging.getLogger(__name__)\n\n\nclass RasterSourceConfig(BundledConfigMixin, Config):\n deprecation_warnings = []\n\n def __init__(self, source_type, transformers=None, channel_order=None):\n if transformers is None:\n transformers = []\n\n self.source_type = source_type\n self.transformers = transformers\n self.channel_order = channel_order\n\n def to_proto(self):\n transformers = list(map(lambda c: c.to_proto(), self.transformers))\n msg = RasterSourceConfigMsg(\n source_type=self.source_type,\n channel_order=self.channel_order,\n transformers=transformers)\n return msg\n\n def save_bundle_files(self, bundle_dir):\n new_transformers = []\n files = []\n for transformer in self.transformers:\n new_transformer, t_files = transformer.save_bundle_files(\n bundle_dir)\n new_transformers.append(new_transformer)\n files.extend(t_files)\n\n new_config = self.to_builder() \\\n .with_transformers(new_transformers) \\\n .build()\n return (new_config, files)\n\n def load_bundle_files(self, bundle_dir):\n new_transformers = []\n for transformer in self.transformers:\n new_transformer = transformer.load_bundle_files(bundle_dir)\n new_transformers.append(new_transformer)\n return self.to_builder() \\\n .with_transformers(new_transformers) \\\n .build()\n\n @abstractmethod\n def create_source(self, tmp_dir, crs_transformer, extent, class_map):\n \"\"\"Create the Raster Source for this configuration.\n \"\"\"\n pass\n\n def to_builder(self):\n return rv._registry.get_config_builder(rv.RASTER_SOURCE,\n self.source_type)(self)\n\n @staticmethod\n def check_deprecation(source_type):\n # If source_type is deprecated and warning hasn't been shown yet, then warn.\n if (source_type in rv.raster_source_deprecated_map and\n source_type not in RasterSourceConfig.deprecation_warnings):\n RasterSourceConfig.deprecation_warnings.append(source_type)\n new_source_type = rv.raster_source_deprecated_map[source_type]\n log.warn(\n 'RasterSource {} is deprecated. Please use {} instead.'.format(\n source_type, new_source_type))\n\n def builder(source_type):\n RasterSourceConfig.check_deprecation(source_type)\n return rv._registry.get_config_builder(rv.RASTER_SOURCE, source_type)()\n\n @staticmethod\n def from_proto(msg):\n \"\"\"Creates a TaskConfig from the specificed protobuf message\n \"\"\"\n return rv._registry.get_config_builder(rv.RASTER_SOURCE, msg.source_type)() \\\n .from_proto(msg) \\\n .build()\n\n @abstractmethod\n def for_prediction(self, image_uri):\n \"\"\"Creates a new config with the image_uri.\"\"\"\n pass\n\n @abstractmethod\n def create_local(self, tmp_dir):\n \"\"\"Returns a new config with a local copy of the image data\n if this image is remote.\n \"\"\"\n pass\n\n def create_transformers(self):\n return list(map(lambda c: c.create_transformer(), self.transformers))\n\n def update_for_command(self, command_type, experiment_config,\n context=None):\n for transformer in self.transformers:\n transformer.update_for_command(command_type, experiment_config,\n context)\n\n def report_io(self, command_type, io_def):\n for transformer in self.transformers:\n transformer.report_io(command_type, io_def)\n\n\nclass RasterSourceConfigBuilder(ConfigBuilder):\n def from_proto(self, msg):\n transformers = list(\n map(lambda m: RasterTransformerConfig.from_proto(m),\n msg.transformers))\n\n return self.with_channel_order(list(msg.channel_order)) \\\n .with_transformers(transformers)\n\n def with_channel_order(self, channel_order):\n \"\"\"Defines the channel order for this raster source.\n\n This defines the subset of channel indices and their order to use when extracting\n chips from raw imagery.\n\n Args:\n channel_order: list of channel indices\n \"\"\"\n b = deepcopy(self)\n b.config['channel_order'] = channel_order\n return b\n\n def with_transformers(self, transformers):\n \"\"\"Transformers to be applied to the raster data.\n\n Args:\n transformers: A list of transformers to apply to the\n raster data.\n\n \"\"\"\n b = deepcopy(self)\n b.config['transformers'] = list(transformers)\n return b\n\n def with_transformer(self, transformer):\n \"\"\"A transformer to be applied to the raster data.\n\n Args:\n transformer: A transformer to apply to the raster\n data.\n\n \"\"\"\n return self.with_transformers([transformer])\n\n def with_stats_transformer(self):\n \"\"\"Add a stats transformer to the raster source.\"\"\"\n b = deepcopy(self)\n transformers = b.config.get('transformers')\n if transformers:\n b.config['transformers'] = transformers.append(\n StatsTransformerConfig())\n else:\n b.config['transformers'] = [StatsTransformerConfig()]\n return b\n", "path": "rastervision/data/raster_source/raster_source_config.py"}], "after_files": [{"content": "from abc import abstractmethod\nfrom copy import deepcopy\nimport logging\n\nimport rastervision as rv\nfrom rastervision.core.config import (Config, ConfigBuilder,\n BundledConfigMixin)\nfrom rastervision.data import (RasterTransformerConfig, StatsTransformerConfig)\nfrom rastervision.protos.raster_source_pb2 \\\n import RasterSourceConfig as RasterSourceConfigMsg\n\nlog = logging.getLogger(__name__)\n\n\nclass RasterSourceConfig(BundledConfigMixin, Config):\n deprecation_warnings = []\n\n def __init__(self, source_type, transformers=None, channel_order=None):\n if transformers is None:\n transformers = []\n\n self.source_type = source_type\n self.transformers = transformers\n self.channel_order = channel_order\n\n def to_proto(self):\n transformers = list(map(lambda c: c.to_proto(), self.transformers))\n msg = RasterSourceConfigMsg(\n source_type=self.source_type,\n channel_order=self.channel_order,\n transformers=transformers)\n return msg\n\n def save_bundle_files(self, bundle_dir):\n new_transformers = []\n files = []\n for transformer in self.transformers:\n new_transformer, t_files = transformer.save_bundle_files(\n bundle_dir)\n new_transformers.append(new_transformer)\n files.extend(t_files)\n\n new_config = self.to_builder() \\\n .with_transformers(new_transformers) \\\n .build()\n return (new_config, files)\n\n def load_bundle_files(self, bundle_dir):\n new_transformers = []\n for transformer in self.transformers:\n new_transformer = transformer.load_bundle_files(bundle_dir)\n new_transformers.append(new_transformer)\n return self.to_builder() \\\n .with_transformers(new_transformers) \\\n .build()\n\n @abstractmethod\n def create_source(self, tmp_dir, crs_transformer, extent, class_map):\n \"\"\"Create the Raster Source for this configuration.\n \"\"\"\n pass\n\n def to_builder(self):\n return rv._registry.get_config_builder(rv.RASTER_SOURCE,\n self.source_type)(self)\n\n @staticmethod\n def check_deprecation(source_type):\n # If source_type is deprecated and warning hasn't been shown yet, then warn.\n if (source_type in rv.raster_source_deprecated_map and\n source_type not in RasterSourceConfig.deprecation_warnings):\n RasterSourceConfig.deprecation_warnings.append(source_type)\n new_source_type = rv.raster_source_deprecated_map[source_type]\n log.warn(\n 'RasterSource {} is deprecated. Please use {} instead.'.format(\n source_type, new_source_type))\n\n def builder(source_type):\n RasterSourceConfig.check_deprecation(source_type)\n return rv._registry.get_config_builder(rv.RASTER_SOURCE, source_type)()\n\n @staticmethod\n def from_proto(msg):\n \"\"\"Creates a TaskConfig from the specificed protobuf message\n \"\"\"\n return rv._registry.get_config_builder(rv.RASTER_SOURCE, msg.source_type)() \\\n .from_proto(msg) \\\n .build()\n\n @abstractmethod\n def for_prediction(self, image_uri):\n \"\"\"Creates a new config with the image_uri.\"\"\"\n pass\n\n @abstractmethod\n def create_local(self, tmp_dir):\n \"\"\"Returns a new config with a local copy of the image data\n if this image is remote.\n \"\"\"\n pass\n\n def create_transformers(self):\n return list(map(lambda c: c.create_transformer(), self.transformers))\n\n def update_for_command(self, command_type, experiment_config,\n context=None):\n for transformer in self.transformers:\n transformer.update_for_command(command_type, experiment_config,\n context)\n\n def report_io(self, command_type, io_def):\n for transformer in self.transformers:\n transformer.report_io(command_type, io_def)\n\n\nclass RasterSourceConfigBuilder(ConfigBuilder):\n def from_proto(self, msg):\n transformers = list(\n map(lambda m: RasterTransformerConfig.from_proto(m),\n msg.transformers))\n\n channel_order = list(msg.channel_order)\n if len(channel_order) == 0:\n channel_order = None\n return self.with_channel_order(channel_order) \\\n .with_transformers(transformers)\n\n def with_channel_order(self, channel_order):\n \"\"\"Defines the channel order for this raster source.\n\n This defines the subset of channel indices and their order to use when extracting\n chips from raw imagery.\n\n Args:\n channel_order: list of channel indices\n \"\"\"\n b = deepcopy(self)\n b.config['channel_order'] = channel_order\n return b\n\n def with_transformers(self, transformers):\n \"\"\"Transformers to be applied to the raster data.\n\n Args:\n transformers: A list of transformers to apply to the\n raster data.\n\n \"\"\"\n b = deepcopy(self)\n b.config['transformers'] = list(transformers)\n return b\n\n def with_transformer(self, transformer):\n \"\"\"A transformer to be applied to the raster data.\n\n Args:\n transformer: A transformer to apply to the raster\n data.\n\n \"\"\"\n return self.with_transformers([transformer])\n\n def with_stats_transformer(self):\n \"\"\"Add a stats transformer to the raster source.\"\"\"\n b = deepcopy(self)\n transformers = b.config.get('transformers')\n if transformers:\n b.config['transformers'] = transformers.append(\n StatsTransformerConfig())\n else:\n b.config['transformers'] = [StatsTransformerConfig()]\n return b\n", "path": "rastervision/data/raster_source/raster_source_config.py"}]}
3,508
169
gh_patches_debug_1078
rasdani/github-patches
git_diff
ipython__ipython-2186
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- oct2py v >= 0.3.1 doesn't need h5py anymore The octave magic docs/examples should update this information. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `IPython/extensions/octavemagic.py` Content: ``` 1 # -*- coding: utf-8 -*- 2 """ 3 =========== 4 octavemagic 5 =========== 6 7 Magics for interacting with Octave via oct2py. 8 9 .. note:: 10 11 The ``oct2py`` module needs to be installed separately, and in turn depends 12 on ``h5py``. Both can be obtained using ``easy_install`` or ``pip``. 13 14 Usage 15 ===== 16 17 ``%octave`` 18 19 {OCTAVE_DOC} 20 21 ``%octave_push`` 22 23 {OCTAVE_PUSH_DOC} 24 25 ``%octave_pull`` 26 27 {OCTAVE_PULL_DOC} 28 29 """ 30 31 #----------------------------------------------------------------------------- 32 # Copyright (C) 2012 The IPython Development Team 33 # 34 # Distributed under the terms of the BSD License. The full license is in 35 # the file COPYING, distributed as part of this software. 36 #----------------------------------------------------------------------------- 37 38 import tempfile 39 from glob import glob 40 from shutil import rmtree 41 42 import numpy as np 43 import oct2py 44 from xml.dom import minidom 45 46 from IPython.core.displaypub import publish_display_data 47 from IPython.core.magic import (Magics, magics_class, line_magic, 48 line_cell_magic) 49 from IPython.testing.skipdoctest import skip_doctest 50 from IPython.core.magic_arguments import ( 51 argument, magic_arguments, parse_argstring 52 ) 53 from IPython.utils.py3compat import unicode_to_str 54 55 class OctaveMagicError(oct2py.Oct2PyError): 56 pass 57 58 _mimetypes = {'png' : 'image/png', 59 'svg' : 'image/svg+xml', 60 'jpg' : 'image/jpeg', 61 'jpeg': 'image/jpeg'} 62 63 @magics_class 64 class OctaveMagics(Magics): 65 """A set of magics useful for interactive work with Octave via oct2py. 66 """ 67 def __init__(self, shell): 68 """ 69 Parameters 70 ---------- 71 shell : IPython shell 72 73 """ 74 super(OctaveMagics, self).__init__(shell) 75 self._oct = oct2py.Oct2Py() 76 self._plot_format = 'png' 77 78 # Allow publish_display_data to be overridden for 79 # testing purposes. 80 self._publish_display_data = publish_display_data 81 82 83 def _fix_gnuplot_svg_size(self, image, size=None): 84 """ 85 GnuPlot SVGs do not have height/width attributes. Set 86 these to be the same as the viewBox, so that the browser 87 scales the image correctly. 88 89 Parameters 90 ---------- 91 image : str 92 SVG data. 93 size : tuple of int 94 Image width, height. 95 96 """ 97 (svg,) = minidom.parseString(image).getElementsByTagName('svg') 98 viewbox = svg.getAttribute('viewBox').split(' ') 99 100 if size is not None: 101 width, height = size 102 else: 103 width, height = viewbox[2:] 104 105 svg.setAttribute('width', '%dpx' % width) 106 svg.setAttribute('height', '%dpx' % height) 107 return svg.toxml() 108 109 110 @skip_doctest 111 @line_magic 112 def octave_push(self, line): 113 ''' 114 Line-level magic that pushes a variable to Octave. 115 116 `line` should be made up of whitespace separated variable names in the 117 IPython namespace:: 118 119 In [7]: import numpy as np 120 121 In [8]: X = np.arange(5) 122 123 In [9]: X.mean() 124 Out[9]: 2.0 125 126 In [10]: %octave_push X 127 128 In [11]: %octave mean(X) 129 Out[11]: 2.0 130 131 ''' 132 inputs = line.split(' ') 133 for input in inputs: 134 input = unicode_to_str(input) 135 self._oct.put(input, self.shell.user_ns[input]) 136 137 138 @skip_doctest 139 @line_magic 140 def octave_pull(self, line): 141 ''' 142 Line-level magic that pulls a variable from Octave. 143 144 In [18]: _ = %octave x = [1 2; 3 4]; y = 'hello' 145 146 In [19]: %octave_pull x y 147 148 In [20]: x 149 Out[20]: 150 array([[ 1., 2.], 151 [ 3., 4.]]) 152 153 In [21]: y 154 Out[21]: 'hello' 155 156 ''' 157 outputs = line.split(' ') 158 for output in outputs: 159 output = unicode_to_str(output) 160 self.shell.push({output: self._oct.get(output)}) 161 162 163 @skip_doctest 164 @magic_arguments() 165 @argument( 166 '-i', '--input', action='append', 167 help='Names of input variables to be pushed to Octave. Multiple names ' 168 'can be passed, separated by commas with no whitespace.' 169 ) 170 @argument( 171 '-o', '--output', action='append', 172 help='Names of variables to be pulled from Octave after executing cell ' 173 'body. Multiple names can be passed, separated by commas with no ' 174 'whitespace.' 175 ) 176 @argument( 177 '-s', '--size', action='store', 178 help='Pixel size of plots, "width,height". Default is "-s 400,250".' 179 ) 180 @argument( 181 '-f', '--format', action='store', 182 help='Plot format (png, svg or jpg).' 183 ) 184 185 @argument( 186 'code', 187 nargs='*', 188 ) 189 @line_cell_magic 190 def octave(self, line, cell=None): 191 ''' 192 Execute code in Octave, and pull some of the results back into the 193 Python namespace. 194 195 In [9]: %octave X = [1 2; 3 4]; mean(X) 196 Out[9]: array([[ 2., 3.]]) 197 198 As a cell, this will run a block of Octave code, without returning any 199 value:: 200 201 In [10]: %%octave 202 ....: p = [-2, -1, 0, 1, 2] 203 ....: polyout(p, 'x') 204 205 -2*x^4 - 1*x^3 + 0*x^2 + 1*x^1 + 2 206 207 In the notebook, plots are published as the output of the cell, e.g. 208 209 %octave plot([1 2 3], [4 5 6]) 210 211 will create a line plot. 212 213 Objects can be passed back and forth between Octave and IPython via the 214 -i and -o flags in line:: 215 216 In [14]: Z = np.array([1, 4, 5, 10]) 217 218 In [15]: %octave -i Z mean(Z) 219 Out[15]: array([ 5.]) 220 221 222 In [16]: %octave -o W W = Z * mean(Z) 223 Out[16]: array([ 5., 20., 25., 50.]) 224 225 In [17]: W 226 Out[17]: array([ 5., 20., 25., 50.]) 227 228 The size and format of output plots can be specified:: 229 230 In [18]: %%octave -s 600,800 -f svg 231 ...: plot([1, 2, 3]); 232 233 ''' 234 args = parse_argstring(self.octave, line) 235 236 # arguments 'code' in line are prepended to the cell lines 237 if cell is None: 238 code = '' 239 return_output = True 240 line_mode = True 241 else: 242 code = cell 243 return_output = False 244 line_mode = False 245 246 code = ' '.join(args.code) + code 247 248 if args.input: 249 for input in ','.join(args.input).split(','): 250 input = unicode_to_str(input) 251 self._oct.put(input, self.shell.user_ns[input]) 252 253 # generate plots in a temporary directory 254 plot_dir = tempfile.mkdtemp() 255 if args.size is not None: 256 size = args.size 257 else: 258 size = '400,240' 259 260 if args.format is not None: 261 plot_format = args.format 262 else: 263 plot_format = 'png' 264 265 pre_call = ''' 266 global __ipy_figures = []; 267 page_screen_output(0); 268 269 function fig_create(src, event) 270 global __ipy_figures; 271 __ipy_figures(size(__ipy_figures) + 1) = src; 272 set(src, "visible", "off"); 273 end 274 275 set(0, 'DefaultFigureCreateFcn', @fig_create); 276 277 close all; 278 clear ans; 279 280 # ___<end_pre_call>___ # 281 ''' 282 283 post_call = ''' 284 # ___<start_post_call>___ # 285 286 # Save output of the last execution 287 if exist("ans") == 1 288 _ = ans; 289 else 290 _ = nan; 291 end 292 293 for f = __ipy_figures 294 outfile = sprintf('%(plot_dir)s/__ipy_oct_fig_%%03d.png', f); 295 try 296 print(f, outfile, '-d%(plot_format)s', '-tight', '-S%(size)s'); 297 end 298 end 299 300 ''' % locals() 301 302 code = ' '.join((pre_call, code, post_call)) 303 try: 304 text_output = self._oct.run(code, verbose=False) 305 except (oct2py.Oct2PyError) as exception: 306 msg = exception.message 307 msg = msg.split('# ___<end_pre_call>___ #')[1] 308 msg = msg.split('# ___<start_post_call>___ #')[0] 309 raise OctaveMagicError('Octave could not complete execution. ' 310 'Traceback (currently broken in oct2py): %s' 311 % msg) 312 313 key = 'OctaveMagic.Octave' 314 display_data = [] 315 316 # Publish text output 317 if text_output: 318 display_data.append((key, {'text/plain': text_output})) 319 320 # Publish images 321 images = [open(imgfile, 'rb').read() for imgfile in \ 322 glob("%s/*" % plot_dir)] 323 rmtree(plot_dir) 324 325 plot_mime_type = _mimetypes.get(plot_format, 'image/png') 326 width, height = [int(s) for s in size.split(',')] 327 for image in images: 328 if plot_format == 'svg': 329 image = self._fix_gnuplot_svg_size(image, size=(width, height)) 330 display_data.append((key, {plot_mime_type: image})) 331 332 if args.output: 333 for output in ','.join(args.output).split(','): 334 output = unicode_to_str(output) 335 self.shell.push({output: self._oct.get(output)}) 336 337 for source, data in display_data: 338 self._publish_display_data(source, data) 339 340 if return_output: 341 ans = self._oct.get('_') 342 343 # Unfortunately, Octave doesn't have a "None" object, 344 # so we can't return any NaN outputs 345 if np.isscalar(ans) and np.isnan(ans): 346 ans = None 347 348 return ans 349 350 351 __doc__ = __doc__.format( 352 OCTAVE_DOC = ' '*8 + OctaveMagics.octave.__doc__, 353 OCTAVE_PUSH_DOC = ' '*8 + OctaveMagics.octave_push.__doc__, 354 OCTAVE_PULL_DOC = ' '*8 + OctaveMagics.octave_pull.__doc__ 355 ) 356 357 358 _loaded = False 359 def load_ipython_extension(ip): 360 """Load the extension in IPython.""" 361 global _loaded 362 if not _loaded: 363 ip.register_magics(OctaveMagics) 364 _loaded = True 365 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/IPython/extensions/octavemagic.py b/IPython/extensions/octavemagic.py --- a/IPython/extensions/octavemagic.py +++ b/IPython/extensions/octavemagic.py @@ -8,8 +8,8 @@ .. note:: - The ``oct2py`` module needs to be installed separately, and in turn depends - on ``h5py``. Both can be obtained using ``easy_install`` or ``pip``. + The ``oct2py`` module needs to be installed separately and + can be obtained using ``easy_install`` or ``pip``. Usage =====
{"golden_diff": "diff --git a/IPython/extensions/octavemagic.py b/IPython/extensions/octavemagic.py\n--- a/IPython/extensions/octavemagic.py\n+++ b/IPython/extensions/octavemagic.py\n@@ -8,8 +8,8 @@\n \n .. note::\n \n- The ``oct2py`` module needs to be installed separately, and in turn depends\n- on ``h5py``. Both can be obtained using ``easy_install`` or ``pip``.\n+ The ``oct2py`` module needs to be installed separately and\n+ can be obtained using ``easy_install`` or ``pip``.\n \n Usage\n =====\n", "issue": "oct2py v >= 0.3.1 doesn't need h5py anymore\nThe octave magic docs/examples should update this information.\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n===========\noctavemagic\n===========\n\nMagics for interacting with Octave via oct2py.\n\n.. note::\n\n The ``oct2py`` module needs to be installed separately, and in turn depends\n on ``h5py``. Both can be obtained using ``easy_install`` or ``pip``.\n\nUsage\n=====\n\n``%octave``\n\n{OCTAVE_DOC}\n\n``%octave_push``\n\n{OCTAVE_PUSH_DOC}\n\n``%octave_pull``\n\n{OCTAVE_PULL_DOC}\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2012 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\nimport tempfile\nfrom glob import glob\nfrom shutil import rmtree\n\nimport numpy as np\nimport oct2py\nfrom xml.dom import minidom\n\nfrom IPython.core.displaypub import publish_display_data\nfrom IPython.core.magic import (Magics, magics_class, line_magic,\n line_cell_magic)\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.core.magic_arguments import (\n argument, magic_arguments, parse_argstring\n)\nfrom IPython.utils.py3compat import unicode_to_str\n\nclass OctaveMagicError(oct2py.Oct2PyError):\n pass\n\n_mimetypes = {'png' : 'image/png',\n 'svg' : 'image/svg+xml',\n 'jpg' : 'image/jpeg',\n 'jpeg': 'image/jpeg'}\n\n@magics_class\nclass OctaveMagics(Magics):\n \"\"\"A set of magics useful for interactive work with Octave via oct2py.\n \"\"\"\n def __init__(self, shell):\n \"\"\"\n Parameters\n ----------\n shell : IPython shell\n\n \"\"\"\n super(OctaveMagics, self).__init__(shell)\n self._oct = oct2py.Oct2Py()\n self._plot_format = 'png'\n\n # Allow publish_display_data to be overridden for\n # testing purposes.\n self._publish_display_data = publish_display_data\n\n\n def _fix_gnuplot_svg_size(self, image, size=None):\n \"\"\"\n GnuPlot SVGs do not have height/width attributes. Set\n these to be the same as the viewBox, so that the browser\n scales the image correctly.\n\n Parameters\n ----------\n image : str\n SVG data.\n size : tuple of int\n Image width, height.\n\n \"\"\"\n (svg,) = minidom.parseString(image).getElementsByTagName('svg')\n viewbox = svg.getAttribute('viewBox').split(' ')\n\n if size is not None:\n width, height = size\n else:\n width, height = viewbox[2:]\n\n svg.setAttribute('width', '%dpx' % width)\n svg.setAttribute('height', '%dpx' % height)\n return svg.toxml()\n\n\n @skip_doctest\n @line_magic\n def octave_push(self, line):\n '''\n Line-level magic that pushes a variable to Octave.\n\n `line` should be made up of whitespace separated variable names in the\n IPython namespace::\n\n In [7]: import numpy as np\n\n In [8]: X = np.arange(5)\n\n In [9]: X.mean()\n Out[9]: 2.0\n\n In [10]: %octave_push X\n\n In [11]: %octave mean(X)\n Out[11]: 2.0\n\n '''\n inputs = line.split(' ')\n for input in inputs:\n input = unicode_to_str(input)\n self._oct.put(input, self.shell.user_ns[input])\n\n\n @skip_doctest\n @line_magic\n def octave_pull(self, line):\n '''\n Line-level magic that pulls a variable from Octave.\n\n In [18]: _ = %octave x = [1 2; 3 4]; y = 'hello'\n\n In [19]: %octave_pull x y\n\n In [20]: x\n Out[20]:\n array([[ 1., 2.],\n [ 3., 4.]])\n\n In [21]: y\n Out[21]: 'hello'\n\n '''\n outputs = line.split(' ')\n for output in outputs:\n output = unicode_to_str(output)\n self.shell.push({output: self._oct.get(output)})\n\n\n @skip_doctest\n @magic_arguments()\n @argument(\n '-i', '--input', action='append',\n help='Names of input variables to be pushed to Octave. Multiple names '\n 'can be passed, separated by commas with no whitespace.'\n )\n @argument(\n '-o', '--output', action='append',\n help='Names of variables to be pulled from Octave after executing cell '\n 'body. Multiple names can be passed, separated by commas with no '\n 'whitespace.'\n )\n @argument(\n '-s', '--size', action='store',\n help='Pixel size of plots, \"width,height\". Default is \"-s 400,250\".'\n )\n @argument(\n '-f', '--format', action='store',\n help='Plot format (png, svg or jpg).'\n )\n\n @argument(\n 'code',\n nargs='*',\n )\n @line_cell_magic\n def octave(self, line, cell=None):\n '''\n Execute code in Octave, and pull some of the results back into the\n Python namespace.\n\n In [9]: %octave X = [1 2; 3 4]; mean(X)\n Out[9]: array([[ 2., 3.]])\n\n As a cell, this will run a block of Octave code, without returning any\n value::\n\n In [10]: %%octave\n ....: p = [-2, -1, 0, 1, 2]\n ....: polyout(p, 'x')\n\n -2*x^4 - 1*x^3 + 0*x^2 + 1*x^1 + 2\n\n In the notebook, plots are published as the output of the cell, e.g.\n\n %octave plot([1 2 3], [4 5 6])\n\n will create a line plot.\n\n Objects can be passed back and forth between Octave and IPython via the\n -i and -o flags in line::\n\n In [14]: Z = np.array([1, 4, 5, 10])\n\n In [15]: %octave -i Z mean(Z)\n Out[15]: array([ 5.])\n\n\n In [16]: %octave -o W W = Z * mean(Z)\n Out[16]: array([ 5., 20., 25., 50.])\n\n In [17]: W\n Out[17]: array([ 5., 20., 25., 50.])\n\n The size and format of output plots can be specified::\n\n In [18]: %%octave -s 600,800 -f svg\n ...: plot([1, 2, 3]);\n\n '''\n args = parse_argstring(self.octave, line)\n\n # arguments 'code' in line are prepended to the cell lines\n if cell is None:\n code = ''\n return_output = True\n line_mode = True\n else:\n code = cell\n return_output = False\n line_mode = False\n\n code = ' '.join(args.code) + code\n\n if args.input:\n for input in ','.join(args.input).split(','):\n input = unicode_to_str(input)\n self._oct.put(input, self.shell.user_ns[input])\n\n # generate plots in a temporary directory\n plot_dir = tempfile.mkdtemp()\n if args.size is not None:\n size = args.size\n else:\n size = '400,240'\n\n if args.format is not None:\n plot_format = args.format\n else:\n plot_format = 'png'\n\n pre_call = '''\n global __ipy_figures = [];\n page_screen_output(0);\n\n function fig_create(src, event)\n global __ipy_figures;\n __ipy_figures(size(__ipy_figures) + 1) = src;\n set(src, \"visible\", \"off\");\n end\n\n set(0, 'DefaultFigureCreateFcn', @fig_create);\n\n close all;\n clear ans;\n\n # ___<end_pre_call>___ #\n '''\n\n post_call = '''\n # ___<start_post_call>___ #\n\n # Save output of the last execution\n if exist(\"ans\") == 1\n _ = ans;\n else\n _ = nan;\n end\n\n for f = __ipy_figures\n outfile = sprintf('%(plot_dir)s/__ipy_oct_fig_%%03d.png', f);\n try\n print(f, outfile, '-d%(plot_format)s', '-tight', '-S%(size)s');\n end\n end\n\n ''' % locals()\n\n code = ' '.join((pre_call, code, post_call))\n try:\n text_output = self._oct.run(code, verbose=False)\n except (oct2py.Oct2PyError) as exception:\n msg = exception.message\n msg = msg.split('# ___<end_pre_call>___ #')[1]\n msg = msg.split('# ___<start_post_call>___ #')[0]\n raise OctaveMagicError('Octave could not complete execution. '\n 'Traceback (currently broken in oct2py): %s'\n % msg)\n\n key = 'OctaveMagic.Octave'\n display_data = []\n\n # Publish text output\n if text_output:\n display_data.append((key, {'text/plain': text_output}))\n\n # Publish images\n images = [open(imgfile, 'rb').read() for imgfile in \\\n glob(\"%s/*\" % plot_dir)]\n rmtree(plot_dir)\n\n plot_mime_type = _mimetypes.get(plot_format, 'image/png')\n width, height = [int(s) for s in size.split(',')]\n for image in images:\n if plot_format == 'svg':\n image = self._fix_gnuplot_svg_size(image, size=(width, height))\n display_data.append((key, {plot_mime_type: image}))\n\n if args.output:\n for output in ','.join(args.output).split(','):\n output = unicode_to_str(output)\n self.shell.push({output: self._oct.get(output)})\n\n for source, data in display_data:\n self._publish_display_data(source, data)\n\n if return_output:\n ans = self._oct.get('_')\n\n # Unfortunately, Octave doesn't have a \"None\" object,\n # so we can't return any NaN outputs\n if np.isscalar(ans) and np.isnan(ans):\n ans = None\n\n return ans\n\n\n__doc__ = __doc__.format(\n OCTAVE_DOC = ' '*8 + OctaveMagics.octave.__doc__,\n OCTAVE_PUSH_DOC = ' '*8 + OctaveMagics.octave_push.__doc__,\n OCTAVE_PULL_DOC = ' '*8 + OctaveMagics.octave_pull.__doc__\n )\n\n\n_loaded = False\ndef load_ipython_extension(ip):\n \"\"\"Load the extension in IPython.\"\"\"\n global _loaded\n if not _loaded:\n ip.register_magics(OctaveMagics)\n _loaded = True\n", "path": "IPython/extensions/octavemagic.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\n===========\noctavemagic\n===========\n\nMagics for interacting with Octave via oct2py.\n\n.. note::\n\n The ``oct2py`` module needs to be installed separately and\n can be obtained using ``easy_install`` or ``pip``.\n\nUsage\n=====\n\n``%octave``\n\n{OCTAVE_DOC}\n\n``%octave_push``\n\n{OCTAVE_PUSH_DOC}\n\n``%octave_pull``\n\n{OCTAVE_PULL_DOC}\n\n\"\"\"\n\n#-----------------------------------------------------------------------------\n# Copyright (C) 2012 The IPython Development Team\n#\n# Distributed under the terms of the BSD License. The full license is in\n# the file COPYING, distributed as part of this software.\n#-----------------------------------------------------------------------------\n\nimport tempfile\nfrom glob import glob\nfrom shutil import rmtree\n\nimport numpy as np\nimport oct2py\nfrom xml.dom import minidom\n\nfrom IPython.core.displaypub import publish_display_data\nfrom IPython.core.magic import (Magics, magics_class, line_magic,\n line_cell_magic)\nfrom IPython.testing.skipdoctest import skip_doctest\nfrom IPython.core.magic_arguments import (\n argument, magic_arguments, parse_argstring\n)\nfrom IPython.utils.py3compat import unicode_to_str\n\nclass OctaveMagicError(oct2py.Oct2PyError):\n pass\n\n_mimetypes = {'png' : 'image/png',\n 'svg' : 'image/svg+xml',\n 'jpg' : 'image/jpeg',\n 'jpeg': 'image/jpeg'}\n\n@magics_class\nclass OctaveMagics(Magics):\n \"\"\"A set of magics useful for interactive work with Octave via oct2py.\n \"\"\"\n def __init__(self, shell):\n \"\"\"\n Parameters\n ----------\n shell : IPython shell\n\n \"\"\"\n super(OctaveMagics, self).__init__(shell)\n self._oct = oct2py.Oct2Py()\n self._plot_format = 'png'\n\n # Allow publish_display_data to be overridden for\n # testing purposes.\n self._publish_display_data = publish_display_data\n\n\n def _fix_gnuplot_svg_size(self, image, size=None):\n \"\"\"\n GnuPlot SVGs do not have height/width attributes. Set\n these to be the same as the viewBox, so that the browser\n scales the image correctly.\n\n Parameters\n ----------\n image : str\n SVG data.\n size : tuple of int\n Image width, height.\n\n \"\"\"\n (svg,) = minidom.parseString(image).getElementsByTagName('svg')\n viewbox = svg.getAttribute('viewBox').split(' ')\n\n if size is not None:\n width, height = size\n else:\n width, height = viewbox[2:]\n\n svg.setAttribute('width', '%dpx' % width)\n svg.setAttribute('height', '%dpx' % height)\n return svg.toxml()\n\n\n @skip_doctest\n @line_magic\n def octave_push(self, line):\n '''\n Line-level magic that pushes a variable to Octave.\n\n `line` should be made up of whitespace separated variable names in the\n IPython namespace::\n\n In [7]: import numpy as np\n\n In [8]: X = np.arange(5)\n\n In [9]: X.mean()\n Out[9]: 2.0\n\n In [10]: %octave_push X\n\n In [11]: %octave mean(X)\n Out[11]: 2.0\n\n '''\n inputs = line.split(' ')\n for input in inputs:\n input = unicode_to_str(input)\n self._oct.put(input, self.shell.user_ns[input])\n\n\n @skip_doctest\n @line_magic\n def octave_pull(self, line):\n '''\n Line-level magic that pulls a variable from Octave.\n\n In [18]: _ = %octave x = [1 2; 3 4]; y = 'hello'\n\n In [19]: %octave_pull x y\n\n In [20]: x\n Out[20]:\n array([[ 1., 2.],\n [ 3., 4.]])\n\n In [21]: y\n Out[21]: 'hello'\n\n '''\n outputs = line.split(' ')\n for output in outputs:\n output = unicode_to_str(output)\n self.shell.push({output: self._oct.get(output)})\n\n\n @skip_doctest\n @magic_arguments()\n @argument(\n '-i', '--input', action='append',\n help='Names of input variables to be pushed to Octave. Multiple names '\n 'can be passed, separated by commas with no whitespace.'\n )\n @argument(\n '-o', '--output', action='append',\n help='Names of variables to be pulled from Octave after executing cell '\n 'body. Multiple names can be passed, separated by commas with no '\n 'whitespace.'\n )\n @argument(\n '-s', '--size', action='store',\n help='Pixel size of plots, \"width,height\". Default is \"-s 400,250\".'\n )\n @argument(\n '-f', '--format', action='store',\n help='Plot format (png, svg or jpg).'\n )\n\n @argument(\n 'code',\n nargs='*',\n )\n @line_cell_magic\n def octave(self, line, cell=None):\n '''\n Execute code in Octave, and pull some of the results back into the\n Python namespace.\n\n In [9]: %octave X = [1 2; 3 4]; mean(X)\n Out[9]: array([[ 2., 3.]])\n\n As a cell, this will run a block of Octave code, without returning any\n value::\n\n In [10]: %%octave\n ....: p = [-2, -1, 0, 1, 2]\n ....: polyout(p, 'x')\n\n -2*x^4 - 1*x^3 + 0*x^2 + 1*x^1 + 2\n\n In the notebook, plots are published as the output of the cell, e.g.\n\n %octave plot([1 2 3], [4 5 6])\n\n will create a line plot.\n\n Objects can be passed back and forth between Octave and IPython via the\n -i and -o flags in line::\n\n In [14]: Z = np.array([1, 4, 5, 10])\n\n In [15]: %octave -i Z mean(Z)\n Out[15]: array([ 5.])\n\n\n In [16]: %octave -o W W = Z * mean(Z)\n Out[16]: array([ 5., 20., 25., 50.])\n\n In [17]: W\n Out[17]: array([ 5., 20., 25., 50.])\n\n The size and format of output plots can be specified::\n\n In [18]: %%octave -s 600,800 -f svg\n ...: plot([1, 2, 3]);\n\n '''\n args = parse_argstring(self.octave, line)\n\n # arguments 'code' in line are prepended to the cell lines\n if cell is None:\n code = ''\n return_output = True\n line_mode = True\n else:\n code = cell\n return_output = False\n line_mode = False\n\n code = ' '.join(args.code) + code\n\n if args.input:\n for input in ','.join(args.input).split(','):\n input = unicode_to_str(input)\n self._oct.put(input, self.shell.user_ns[input])\n\n # generate plots in a temporary directory\n plot_dir = tempfile.mkdtemp()\n if args.size is not None:\n size = args.size\n else:\n size = '400,240'\n\n if args.format is not None:\n plot_format = args.format\n else:\n plot_format = 'png'\n\n pre_call = '''\n global __ipy_figures = [];\n page_screen_output(0);\n\n function fig_create(src, event)\n global __ipy_figures;\n __ipy_figures(size(__ipy_figures) + 1) = src;\n set(src, \"visible\", \"off\");\n end\n\n set(0, 'DefaultFigureCreateFcn', @fig_create);\n\n close all;\n clear ans;\n\n # ___<end_pre_call>___ #\n '''\n\n post_call = '''\n # ___<start_post_call>___ #\n\n # Save output of the last execution\n if exist(\"ans\") == 1\n _ = ans;\n else\n _ = nan;\n end\n\n for f = __ipy_figures\n outfile = sprintf('%(plot_dir)s/__ipy_oct_fig_%%03d.png', f);\n try\n print(f, outfile, '-d%(plot_format)s', '-tight', '-S%(size)s');\n end\n end\n\n ''' % locals()\n\n code = ' '.join((pre_call, code, post_call))\n try:\n text_output = self._oct.run(code, verbose=False)\n except (oct2py.Oct2PyError) as exception:\n msg = exception.message\n msg = msg.split('# ___<end_pre_call>___ #')[1]\n msg = msg.split('# ___<start_post_call>___ #')[0]\n raise OctaveMagicError('Octave could not complete execution. '\n 'Traceback (currently broken in oct2py): %s'\n % msg)\n\n key = 'OctaveMagic.Octave'\n display_data = []\n\n # Publish text output\n if text_output:\n display_data.append((key, {'text/plain': text_output}))\n\n # Publish images\n images = [open(imgfile, 'rb').read() for imgfile in \\\n glob(\"%s/*\" % plot_dir)]\n rmtree(plot_dir)\n\n plot_mime_type = _mimetypes.get(plot_format, 'image/png')\n width, height = [int(s) for s in size.split(',')]\n for image in images:\n if plot_format == 'svg':\n image = self._fix_gnuplot_svg_size(image, size=(width, height))\n display_data.append((key, {plot_mime_type: image}))\n\n if args.output:\n for output in ','.join(args.output).split(','):\n output = unicode_to_str(output)\n self.shell.push({output: self._oct.get(output)})\n\n for source, data in display_data:\n self._publish_display_data(source, data)\n\n if return_output:\n ans = self._oct.get('_')\n\n # Unfortunately, Octave doesn't have a \"None\" object,\n # so we can't return any NaN outputs\n if np.isscalar(ans) and np.isnan(ans):\n ans = None\n\n return ans\n\n\n__doc__ = __doc__.format(\n OCTAVE_DOC = ' '*8 + OctaveMagics.octave.__doc__,\n OCTAVE_PUSH_DOC = ' '*8 + OctaveMagics.octave_push.__doc__,\n OCTAVE_PULL_DOC = ' '*8 + OctaveMagics.octave_pull.__doc__\n )\n\n\n_loaded = False\ndef load_ipython_extension(ip):\n \"\"\"Load the extension in IPython.\"\"\"\n global _loaded\n if not _loaded:\n ip.register_magics(OctaveMagics)\n _loaded = True\n", "path": "IPython/extensions/octavemagic.py"}]}
3,883
139
gh_patches_debug_30017
rasdani/github-patches
git_diff
docker__docker-py-1022
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Empty auth dictionary should be valid docker/compose#3265 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docker/auth/auth.py` Content: ``` 1 # Copyright 2013 dotCloud inc. 2 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 7 # http://www.apache.org/licenses/LICENSE-2.0 8 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 15 import base64 16 import json 17 import logging 18 import os 19 20 import six 21 22 from .. import errors 23 24 INDEX_NAME = 'docker.io' 25 INDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME) 26 DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json') 27 LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg' 28 29 log = logging.getLogger(__name__) 30 31 32 def resolve_repository_name(repo_name): 33 if '://' in repo_name: 34 raise errors.InvalidRepository( 35 'Repository name cannot contain a scheme ({0})'.format(repo_name) 36 ) 37 38 index_name, remote_name = split_repo_name(repo_name) 39 if index_name[0] == '-' or index_name[-1] == '-': 40 raise errors.InvalidRepository( 41 'Invalid index name ({0}). Cannot begin or end with a' 42 ' hyphen.'.format(index_name) 43 ) 44 return resolve_index_name(index_name), remote_name 45 46 47 def resolve_index_name(index_name): 48 index_name = convert_to_hostname(index_name) 49 if index_name == 'index.' + INDEX_NAME: 50 index_name = INDEX_NAME 51 return index_name 52 53 54 def split_repo_name(repo_name): 55 parts = repo_name.split('/', 1) 56 if len(parts) == 1 or ( 57 '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost' 58 ): 59 # This is a docker index repo (ex: username/foobar or ubuntu) 60 return INDEX_NAME, repo_name 61 return tuple(parts) 62 63 64 def resolve_authconfig(authconfig, registry=None): 65 """ 66 Returns the authentication data from the given auth configuration for a 67 specific registry. As with the Docker client, legacy entries in the config 68 with full URLs are stripped down to hostnames before checking for a match. 69 Returns None if no match was found. 70 """ 71 # Default to the public index server 72 registry = resolve_index_name(registry) if registry else INDEX_NAME 73 log.debug("Looking for auth entry for {0}".format(repr(registry))) 74 75 if registry in authconfig: 76 log.debug("Found {0}".format(repr(registry))) 77 return authconfig[registry] 78 79 for key, config in six.iteritems(authconfig): 80 if resolve_index_name(key) == registry: 81 log.debug("Found {0}".format(repr(key))) 82 return config 83 84 log.debug("No entry found") 85 return None 86 87 88 def convert_to_hostname(url): 89 return url.replace('http://', '').replace('https://', '').split('/', 1)[0] 90 91 92 def decode_auth(auth): 93 if isinstance(auth, six.string_types): 94 auth = auth.encode('ascii') 95 s = base64.b64decode(auth) 96 login, pwd = s.split(b':', 1) 97 return login.decode('utf8'), pwd.decode('utf8') 98 99 100 def encode_header(auth): 101 auth_json = json.dumps(auth).encode('ascii') 102 return base64.urlsafe_b64encode(auth_json) 103 104 105 def parse_auth(entries, raise_on_error=False): 106 """ 107 Parses authentication entries 108 109 Args: 110 entries: Dict of authentication entries. 111 raise_on_error: If set to true, an invalid format will raise 112 InvalidConfigFile 113 114 Returns: 115 Authentication registry. 116 """ 117 118 conf = {} 119 for registry, entry in six.iteritems(entries): 120 if not (isinstance(entry, dict) and 'auth' in entry): 121 log.debug( 122 'Config entry for key {0} is not auth config'.format(registry) 123 ) 124 # We sometimes fall back to parsing the whole config as if it was 125 # the auth config by itself, for legacy purposes. In that case, we 126 # fail silently and return an empty conf if any of the keys is not 127 # formatted properly. 128 if raise_on_error: 129 raise errors.InvalidConfigFile( 130 'Invalid configuration for registry {0}'.format(registry) 131 ) 132 return {} 133 username, password = decode_auth(entry['auth']) 134 log.debug( 135 'Found entry (registry={0}, username={1})' 136 .format(repr(registry), repr(username)) 137 ) 138 conf[registry] = { 139 'username': username, 140 'password': password, 141 'email': entry.get('email'), 142 'serveraddress': registry, 143 } 144 return conf 145 146 147 def find_config_file(config_path=None): 148 environment_path = os.path.join( 149 os.environ.get('DOCKER_CONFIG'), 150 os.path.basename(DOCKER_CONFIG_FILENAME) 151 ) if os.environ.get('DOCKER_CONFIG') else None 152 153 paths = [ 154 config_path, # 1 155 environment_path, # 2 156 os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3 157 os.path.join( 158 os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME 159 ) # 4 160 ] 161 162 for path in paths: 163 if path and os.path.exists(path): 164 return path 165 return None 166 167 168 def load_config(config_path=None): 169 """ 170 Loads authentication data from a Docker configuration file in the given 171 root directory or if config_path is passed use given path. 172 Lookup priority: 173 explicit config_path parameter > DOCKER_CONFIG environment variable > 174 ~/.docker/config.json > ~/.dockercfg 175 """ 176 config_file = find_config_file(config_path) 177 178 if not config_file: 179 log.debug("File doesn't exist") 180 return {} 181 182 try: 183 with open(config_file) as f: 184 data = json.load(f) 185 res = {} 186 if data.get('auths'): 187 log.debug("Found 'auths' section") 188 res.update(parse_auth(data['auths'], raise_on_error=True)) 189 if data.get('HttpHeaders'): 190 log.debug("Found 'HttpHeaders' section") 191 res.update({'HttpHeaders': data['HttpHeaders']}) 192 if res: 193 return res 194 else: 195 log.debug("Couldn't find 'auths' or 'HttpHeaders' sections") 196 f.seek(0) 197 return parse_auth(json.load(f)) 198 except (IOError, KeyError, ValueError) as e: 199 # Likely missing new Docker config file or it's in an 200 # unknown format, continue to attempt to read old location 201 # and format. 202 log.debug(e) 203 204 log.debug("Attempting to parse legacy auth file format") 205 try: 206 data = [] 207 with open(config_file) as f: 208 for line in f.readlines(): 209 data.append(line.strip().split(' = ')[1]) 210 if len(data) < 2: 211 # Not enough data 212 raise errors.InvalidConfigFile( 213 'Invalid or empty configuration file!' 214 ) 215 216 username, password = decode_auth(data[0]) 217 return { 218 INDEX_NAME: { 219 'username': username, 220 'password': password, 221 'email': data[1], 222 'serveraddress': INDEX_URL, 223 } 224 } 225 except Exception as e: 226 log.debug(e) 227 pass 228 229 log.debug("All parsing attempts failed - returning empty config") 230 return {} 231 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docker/auth/auth.py b/docker/auth/auth.py --- a/docker/auth/auth.py +++ b/docker/auth/auth.py @@ -117,7 +117,7 @@ conf = {} for registry, entry in six.iteritems(entries): - if not (isinstance(entry, dict) and 'auth' in entry): + if not isinstance(entry, dict): log.debug( 'Config entry for key {0} is not auth config'.format(registry) ) @@ -130,6 +130,16 @@ 'Invalid configuration for registry {0}'.format(registry) ) return {} + if 'auth' not in entry: + # Starting with engine v1.11 (API 1.23), an empty dictionary is + # a valid value in the auths config. + # https://github.com/docker/compose/issues/3265 + log.debug( + 'Auth data for {0} is absent. Client might be using a ' + 'credentials store instead.' + ) + return {} + username, password = decode_auth(entry['auth']) log.debug( 'Found entry (registry={0}, username={1})' @@ -189,6 +199,9 @@ if data.get('HttpHeaders'): log.debug("Found 'HttpHeaders' section") res.update({'HttpHeaders': data['HttpHeaders']}) + if data.get('credsStore'): + log.debug("Found 'credsStore' section") + res.update({'credsStore': data['credsStore']}) if res: return res else:
{"golden_diff": "diff --git a/docker/auth/auth.py b/docker/auth/auth.py\n--- a/docker/auth/auth.py\n+++ b/docker/auth/auth.py\n@@ -117,7 +117,7 @@\n \n conf = {}\n for registry, entry in six.iteritems(entries):\n- if not (isinstance(entry, dict) and 'auth' in entry):\n+ if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n@@ -130,6 +130,16 @@\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n+ if 'auth' not in entry:\n+ # Starting with engine v1.11 (API 1.23), an empty dictionary is\n+ # a valid value in the auths config.\n+ # https://github.com/docker/compose/issues/3265\n+ log.debug(\n+ 'Auth data for {0} is absent. Client might be using a '\n+ 'credentials store instead.'\n+ )\n+ return {}\n+\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n@@ -189,6 +199,9 @@\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n+ if data.get('credsStore'):\n+ log.debug(\"Found 'credsStore' section\")\n+ res.update({'credsStore': data['credsStore']})\n if res:\n return res\n else:\n", "issue": "Empty auth dictionary should be valid\ndocker/compose#3265\n\n", "before_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport logging\nimport os\n\nimport six\n\nfrom .. import errors\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not (isinstance(entry, dict) and 'auth' in entry):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n environment_path = os.path.join(\n os.environ.get('DOCKER_CONFIG'),\n os.path.basename(DOCKER_CONFIG_FILENAME)\n ) if os.environ.get('DOCKER_CONFIG') else None\n\n paths = [\n config_path, # 1\n environment_path, # 2\n os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(\n os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME\n ) # 4\n ]\n\n for path in paths:\n if path and os.path.exists(path):\n return path\n return None\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n config_file = find_config_file(config_path)\n\n if not config_file:\n log.debug(\"File doesn't exist\")\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update(parse_auth(data['auths'], raise_on_error=True))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n if res:\n return res\n else:\n log.debug(\"Couldn't find 'auths' or 'HttpHeaders' sections\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth/auth.py"}], "after_files": [{"content": "# Copyright 2013 dotCloud inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport base64\nimport json\nimport logging\nimport os\n\nimport six\n\nfrom .. import errors\n\nINDEX_NAME = 'docker.io'\nINDEX_URL = 'https://{0}/v1/'.format(INDEX_NAME)\nDOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')\nLEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'\n\nlog = logging.getLogger(__name__)\n\n\ndef resolve_repository_name(repo_name):\n if '://' in repo_name:\n raise errors.InvalidRepository(\n 'Repository name cannot contain a scheme ({0})'.format(repo_name)\n )\n\n index_name, remote_name = split_repo_name(repo_name)\n if index_name[0] == '-' or index_name[-1] == '-':\n raise errors.InvalidRepository(\n 'Invalid index name ({0}). Cannot begin or end with a'\n ' hyphen.'.format(index_name)\n )\n return resolve_index_name(index_name), remote_name\n\n\ndef resolve_index_name(index_name):\n index_name = convert_to_hostname(index_name)\n if index_name == 'index.' + INDEX_NAME:\n index_name = INDEX_NAME\n return index_name\n\n\ndef split_repo_name(repo_name):\n parts = repo_name.split('/', 1)\n if len(parts) == 1 or (\n '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'\n ):\n # This is a docker index repo (ex: username/foobar or ubuntu)\n return INDEX_NAME, repo_name\n return tuple(parts)\n\n\ndef resolve_authconfig(authconfig, registry=None):\n \"\"\"\n Returns the authentication data from the given auth configuration for a\n specific registry. As with the Docker client, legacy entries in the config\n with full URLs are stripped down to hostnames before checking for a match.\n Returns None if no match was found.\n \"\"\"\n # Default to the public index server\n registry = resolve_index_name(registry) if registry else INDEX_NAME\n log.debug(\"Looking for auth entry for {0}\".format(repr(registry)))\n\n if registry in authconfig:\n log.debug(\"Found {0}\".format(repr(registry)))\n return authconfig[registry]\n\n for key, config in six.iteritems(authconfig):\n if resolve_index_name(key) == registry:\n log.debug(\"Found {0}\".format(repr(key)))\n return config\n\n log.debug(\"No entry found\")\n return None\n\n\ndef convert_to_hostname(url):\n return url.replace('http://', '').replace('https://', '').split('/', 1)[0]\n\n\ndef decode_auth(auth):\n if isinstance(auth, six.string_types):\n auth = auth.encode('ascii')\n s = base64.b64decode(auth)\n login, pwd = s.split(b':', 1)\n return login.decode('utf8'), pwd.decode('utf8')\n\n\ndef encode_header(auth):\n auth_json = json.dumps(auth).encode('ascii')\n return base64.urlsafe_b64encode(auth_json)\n\n\ndef parse_auth(entries, raise_on_error=False):\n \"\"\"\n Parses authentication entries\n\n Args:\n entries: Dict of authentication entries.\n raise_on_error: If set to true, an invalid format will raise\n InvalidConfigFile\n\n Returns:\n Authentication registry.\n \"\"\"\n\n conf = {}\n for registry, entry in six.iteritems(entries):\n if not isinstance(entry, dict):\n log.debug(\n 'Config entry for key {0} is not auth config'.format(registry)\n )\n # We sometimes fall back to parsing the whole config as if it was\n # the auth config by itself, for legacy purposes. In that case, we\n # fail silently and return an empty conf if any of the keys is not\n # formatted properly.\n if raise_on_error:\n raise errors.InvalidConfigFile(\n 'Invalid configuration for registry {0}'.format(registry)\n )\n return {}\n if 'auth' not in entry:\n # Starting with engine v1.11 (API 1.23), an empty dictionary is\n # a valid value in the auths config.\n # https://github.com/docker/compose/issues/3265\n log.debug(\n 'Auth data for {0} is absent. Client might be using a '\n 'credentials store instead.'\n )\n return {}\n\n username, password = decode_auth(entry['auth'])\n log.debug(\n 'Found entry (registry={0}, username={1})'\n .format(repr(registry), repr(username))\n )\n conf[registry] = {\n 'username': username,\n 'password': password,\n 'email': entry.get('email'),\n 'serveraddress': registry,\n }\n return conf\n\n\ndef find_config_file(config_path=None):\n environment_path = os.path.join(\n os.environ.get('DOCKER_CONFIG'),\n os.path.basename(DOCKER_CONFIG_FILENAME)\n ) if os.environ.get('DOCKER_CONFIG') else None\n\n paths = [\n config_path, # 1\n environment_path, # 2\n os.path.join(os.path.expanduser('~'), DOCKER_CONFIG_FILENAME), # 3\n os.path.join(\n os.path.expanduser('~'), LEGACY_DOCKER_CONFIG_FILENAME\n ) # 4\n ]\n\n for path in paths:\n if path and os.path.exists(path):\n return path\n return None\n\n\ndef load_config(config_path=None):\n \"\"\"\n Loads authentication data from a Docker configuration file in the given\n root directory or if config_path is passed use given path.\n Lookup priority:\n explicit config_path parameter > DOCKER_CONFIG environment variable >\n ~/.docker/config.json > ~/.dockercfg\n \"\"\"\n config_file = find_config_file(config_path)\n\n if not config_file:\n log.debug(\"File doesn't exist\")\n return {}\n\n try:\n with open(config_file) as f:\n data = json.load(f)\n res = {}\n if data.get('auths'):\n log.debug(\"Found 'auths' section\")\n res.update(parse_auth(data['auths'], raise_on_error=True))\n if data.get('HttpHeaders'):\n log.debug(\"Found 'HttpHeaders' section\")\n res.update({'HttpHeaders': data['HttpHeaders']})\n if data.get('credsStore'):\n log.debug(\"Found 'credsStore' section\")\n res.update({'credsStore': data['credsStore']})\n if res:\n return res\n else:\n log.debug(\"Couldn't find 'auths' or 'HttpHeaders' sections\")\n f.seek(0)\n return parse_auth(json.load(f))\n except (IOError, KeyError, ValueError) as e:\n # Likely missing new Docker config file or it's in an\n # unknown format, continue to attempt to read old location\n # and format.\n log.debug(e)\n\n log.debug(\"Attempting to parse legacy auth file format\")\n try:\n data = []\n with open(config_file) as f:\n for line in f.readlines():\n data.append(line.strip().split(' = ')[1])\n if len(data) < 2:\n # Not enough data\n raise errors.InvalidConfigFile(\n 'Invalid or empty configuration file!'\n )\n\n username, password = decode_auth(data[0])\n return {\n INDEX_NAME: {\n 'username': username,\n 'password': password,\n 'email': data[1],\n 'serveraddress': INDEX_URL,\n }\n }\n except Exception as e:\n log.debug(e)\n pass\n\n log.debug(\"All parsing attempts failed - returning empty config\")\n return {}\n", "path": "docker/auth/auth.py"}]}
2,522
363
gh_patches_debug_687
rasdani/github-patches
git_diff
hylang__hy-2220
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add header notice to "stable" line documentation to point users to the alpha cycle documentation I was reading documentation and noticed that hy.contrib.walk is mentioned there: https://docs.hylang.org/en/stable/contrib/walk.html however it appears that hy.contrib.walk file is no longer on the master branch. https://github.com/hylang/hy/blob/6ba90fd3f853b2ddc391aa3358f9386c41d831c4/hy/contrib/walk.hy is it a bug in documentation or I'm missing something? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `docs/conf.py` Content: ``` 1 # This file is execfile()d with the current directory set to its containing dir. 2 3 import re, os, sys, time, html 4 5 sys.path.insert(0, os.path.abspath('..')) 6 7 extensions = [ 8 'sphinx.ext.napoleon', 9 'sphinx.ext.intersphinx', 10 'sphinx.ext.autodoc', 11 'sphinx.ext.viewcode', 12 'sphinxcontrib.hydomain', 13 ] 14 15 from get_version import __version__ as hy_version 16 17 # Read the Docs might dirty its checkout, so strip the dirty flag. 18 hy_version = re.sub(r'[+.]dirty\Z', '', hy_version) 19 20 templates_path = ['_templates'] 21 source_suffix = '.rst' 22 23 master_doc = 'index' 24 25 # General information about the project. 26 project = 'hy' 27 copyright = '%s the authors' % time.strftime('%Y') 28 29 # The version info for the project you're documenting, acts as replacement for 30 # |version| and |release|, also used in various other places throughout the 31 # built documents. 32 # 33 # The short X.Y version. 34 version = ".".join(hy_version.split(".")[:-1]) 35 # The full version, including alpha/beta/rc tags. 36 release = hy_version 37 hy_descriptive_version = html.escape(hy_version) 38 if "+" in hy_version: 39 hy_descriptive_version += " <strong style='color: red;'>(unstable)</strong>" 40 41 exclude_patterns = ['_build', 'coreteam.rst'] 42 add_module_names = True 43 44 pygments_style = 'sphinx' 45 46 import sphinx_rtd_theme 47 html_theme = 'sphinx_rtd_theme' 48 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] 49 50 # Add any paths that contain custom static files (such as style sheets) here, 51 # relative to this directory. They are copied after the builtin static files, 52 # so a file named "default.css" will overwrite the builtin "default.css". 53 html_static_path = ['_static'] 54 55 html_use_smartypants = False 56 html_show_sphinx = False 57 58 html_context = dict( 59 hy_descriptive_version = hy_descriptive_version) 60 61 highlight_language = 'clojure' 62 63 intersphinx_mapping = dict( 64 py = ('https://docs.python.org/3/', None), 65 py3_10 = ('https://docs.python.org/3.10/', None), 66 hyrule = ('https://hyrule.readthedocs.io/en/master/', None)) 67 # ** Generate Cheatsheet 68 import json 69 from pathlib import Path 70 from itertools import zip_longest 71 72 def refize(spec): 73 role = ':hy:func:' 74 if isinstance(spec, dict): 75 _name = spec['name'] 76 uri = spec['uri'] 77 if spec.get('internal'): 78 role = ':ref:' 79 else: 80 uri = spec 81 _name = str.split(uri, '.')[-1] 82 return '{}`{} <{}>`'.format(role, _name, uri) 83 84 85 def format_refs(refs, indent): 86 args = [iter(map(refize, refs))] 87 ref_groups = zip_longest(*args, fillvalue="") 88 return str.join( 89 ' \\\n' + ' ' * (indent + 3), 90 [str.join(' ', ref_group) for ref_group in ref_groups], 91 ) 92 93 94 def format_row(category, divider_loc): 95 return '{title: <{width}} | {methods}'.format( 96 width=divider_loc, 97 title=category['name'], 98 methods=format_refs(category['methods'], divider_loc) 99 ) 100 101 102 def format_table(table_spec): 103 table_name = table_spec['name'] 104 categories = table_spec['categories'] 105 longest_cat_name = max(len(category['name']) for category in categories) 106 table = [ 107 table_name, 108 '-' * len(table_name), 109 '', 110 '=' * longest_cat_name + ' ' + '=' * 25, 111 *(format_row(category, longest_cat_name) for category in categories), 112 '=' * longest_cat_name + ' ' + '=' * 25, 113 '' 114 ] 115 return '\n'.join(table) 116 117 118 # Modifications to the cheatsheet should be added in `cheatsheet.json` 119 cheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text()) 120 cheatsheet = [ 121 '..', 122 ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``', 123 ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``', 124 '', 125 '.. _cheatsheet:', 126 '', 127 'Cheatsheet', 128 '==========', 129 '', 130 *map(format_table, cheatsheet_spec), 131 ] 132 Path('./docs/cheatsheet.rst').write_text('\n'.join(cheatsheet)) 133 134 135 # ** Sphinx App Setup 136 137 138 def setup(app): 139 app.add_css_file('overrides.css') 140 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/docs/conf.py b/docs/conf.py --- a/docs/conf.py +++ b/docs/conf.py @@ -56,7 +56,9 @@ html_show_sphinx = False html_context = dict( - hy_descriptive_version = hy_descriptive_version) + hy_descriptive_version = hy_descriptive_version, + has_active_alpha = True, +) highlight_language = 'clojure'
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -56,7 +56,9 @@\n html_show_sphinx = False\n \n html_context = dict(\n- hy_descriptive_version = hy_descriptive_version)\n+ hy_descriptive_version = hy_descriptive_version,\n+ has_active_alpha = True,\n+)\n \n highlight_language = 'clojure'\n", "issue": "Add header notice to \"stable\" line documentation to point users to the alpha cycle documentation\nI was reading documentation and noticed that hy.contrib.walk is mentioned there:\r\nhttps://docs.hylang.org/en/stable/contrib/walk.html\r\n\r\nhowever it appears that hy.contrib.walk file is no longer on the master branch. \r\nhttps://github.com/hylang/hy/blob/6ba90fd3f853b2ddc391aa3358f9386c41d831c4/hy/contrib/walk.hy\r\n\r\nis it a bug in documentation or I'm missing something? \r\n\r\n\n", "before_files": [{"content": "# This file is execfile()d with the current directory set to its containing dir.\n\nimport re, os, sys, time, html\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.hydomain',\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r'[+.]dirty\\Z', '', hy_version)\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'hy'\ncopyright = '%s the authors' % time.strftime('%Y')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = ['_build', 'coreteam.rst']\nadd_module_names = True\n\npygments_style = 'sphinx'\n\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version = hy_descriptive_version)\n\nhighlight_language = 'clojure'\n\nintersphinx_mapping = dict(\n py = ('https://docs.python.org/3/', None),\n py3_10 = ('https://docs.python.org/3.10/', None),\n hyrule = ('https://hyrule.readthedocs.io/en/master/', None))\n# ** Generate Cheatsheet\nimport json\nfrom pathlib import Path\nfrom itertools import zip_longest\n\ndef refize(spec):\n role = ':hy:func:'\n if isinstance(spec, dict):\n _name = spec['name']\n uri = spec['uri']\n if spec.get('internal'):\n role = ':ref:'\n else:\n uri = spec\n _name = str.split(uri, '.')[-1]\n return '{}`{} <{}>`'.format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n ' \\\\\\n' + ' ' * (indent + 3),\n [str.join(' ', ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return '{title: <{width}} | {methods}'.format(\n width=divider_loc,\n title=category['name'],\n methods=format_refs(category['methods'], divider_loc)\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec['name']\n categories = table_spec['categories']\n longest_cat_name = max(len(category['name']) for category in categories)\n table = [\n table_name,\n '-' * len(table_name),\n '',\n '=' * longest_cat_name + ' ' + '=' * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n '=' * longest_cat_name + ' ' + '=' * 25,\n ''\n ]\n return '\\n'.join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())\ncheatsheet = [\n '..',\n ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',\n ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',\n '',\n '.. _cheatsheet:',\n '',\n 'Cheatsheet',\n '==========',\n '',\n *map(format_table, cheatsheet_spec),\n]\nPath('./docs/cheatsheet.rst').write_text('\\n'.join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file('overrides.css')\n", "path": "docs/conf.py"}], "after_files": [{"content": "# This file is execfile()d with the current directory set to its containing dir.\n\nimport re, os, sys, time, html\n\nsys.path.insert(0, os.path.abspath('..'))\n\nextensions = [\n 'sphinx.ext.napoleon',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.viewcode',\n 'sphinxcontrib.hydomain',\n]\n\nfrom get_version import __version__ as hy_version\n\n# Read the Docs might dirty its checkout, so strip the dirty flag.\nhy_version = re.sub(r'[+.]dirty\\Z', '', hy_version)\n\ntemplates_path = ['_templates']\nsource_suffix = '.rst'\n\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'hy'\ncopyright = '%s the authors' % time.strftime('%Y')\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = \".\".join(hy_version.split(\".\")[:-1])\n# The full version, including alpha/beta/rc tags.\nrelease = hy_version\nhy_descriptive_version = html.escape(hy_version)\nif \"+\" in hy_version:\n hy_descriptive_version += \" <strong style='color: red;'>(unstable)</strong>\"\n\nexclude_patterns = ['_build', 'coreteam.rst']\nadd_module_names = True\n\npygments_style = 'sphinx'\n\nimport sphinx_rtd_theme\nhtml_theme = 'sphinx_rtd_theme'\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\nhtml_use_smartypants = False\nhtml_show_sphinx = False\n\nhtml_context = dict(\n hy_descriptive_version = hy_descriptive_version,\n has_active_alpha = True,\n)\n\nhighlight_language = 'clojure'\n\nintersphinx_mapping = dict(\n py = ('https://docs.python.org/3/', None),\n py3_10 = ('https://docs.python.org/3.10/', None),\n hyrule = ('https://hyrule.readthedocs.io/en/master/', None))\n# ** Generate Cheatsheet\nimport json\nfrom pathlib import Path\nfrom itertools import zip_longest\n\ndef refize(spec):\n role = ':hy:func:'\n if isinstance(spec, dict):\n _name = spec['name']\n uri = spec['uri']\n if spec.get('internal'):\n role = ':ref:'\n else:\n uri = spec\n _name = str.split(uri, '.')[-1]\n return '{}`{} <{}>`'.format(role, _name, uri)\n\n\ndef format_refs(refs, indent):\n args = [iter(map(refize, refs))]\n ref_groups = zip_longest(*args, fillvalue=\"\")\n return str.join(\n ' \\\\\\n' + ' ' * (indent + 3),\n [str.join(' ', ref_group) for ref_group in ref_groups],\n )\n\n\ndef format_row(category, divider_loc):\n return '{title: <{width}} | {methods}'.format(\n width=divider_loc,\n title=category['name'],\n methods=format_refs(category['methods'], divider_loc)\n )\n\n\ndef format_table(table_spec):\n table_name = table_spec['name']\n categories = table_spec['categories']\n longest_cat_name = max(len(category['name']) for category in categories)\n table = [\n table_name,\n '-' * len(table_name),\n '',\n '=' * longest_cat_name + ' ' + '=' * 25,\n *(format_row(category, longest_cat_name) for category in categories),\n '=' * longest_cat_name + ' ' + '=' * 25,\n ''\n ]\n return '\\n'.join(table)\n\n\n# Modifications to the cheatsheet should be added in `cheatsheet.json`\ncheatsheet_spec = json.loads(Path('./docs/cheatsheet.json').read_text())\ncheatsheet = [\n '..',\n ' DO NOT MODIFY THIS FILE. IT IS AUTO GENERATED BY ``conf.py``',\n ' If you need to change or add methods, modify ``cheatsheet_spec`` in ``conf.py``',\n '',\n '.. _cheatsheet:',\n '',\n 'Cheatsheet',\n '==========',\n '',\n *map(format_table, cheatsheet_spec),\n]\nPath('./docs/cheatsheet.rst').write_text('\\n'.join(cheatsheet))\n\n\n# ** Sphinx App Setup\n\n\ndef setup(app):\n app.add_css_file('overrides.css')\n", "path": "docs/conf.py"}]}
1,737
90
gh_patches_debug_27568
rasdani/github-patches
git_diff
spack__spack-18325
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Installation issue: py-lxml doesn't use the spack libxslt and libexslt libraries It looks like py-lxml should have AUTO_RPATH set to true. Otherwise it picks up the OS versions of libxslt instead of the spack built versions. I added this to the package.py and the library dependencies were correct: ``` def setup_build_environment(self, env): env.set('AUTO_RPATH', 'true') ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `var/spack/repos/builtin/packages/py-lxml/package.py` Content: ``` 1 # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other 2 # Spack Project Developers. See the top-level COPYRIGHT file for details. 3 # 4 # SPDX-License-Identifier: (Apache-2.0 OR MIT) 5 6 from spack import * 7 8 9 class PyLxml(PythonPackage): 10 """lxml is the most feature-rich and easy-to-use library for processing 11 XML and HTML in the Python language.""" 12 13 homepage = "http://lxml.de/" 14 url = "https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz" 15 16 version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692') 17 version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90') 18 version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f') 19 version('3.7.3', sha256='aa502d78a51ee7d127b4824ff96500f0181d3c7826e6ee7b800d068be79361c7') 20 version('2.3', sha256='eea1b8d29532739c1383cb4794c5eacd6176f0972b59e8d29348335b87ff2e66') 21 22 variant('html5', default=False, description='Enable html5lib backend') 23 variant('htmlsoup', default=False, description='Enable BeautifulSoup4 backend') 24 variant('cssselect', default=False, description='Enable cssselect module') 25 26 depends_on('[email protected]:2.8,3.5:', type=('build', 'run')) 27 depends_on('py-setuptools', type='build') 28 depends_on('libxml2', type=('build', 'run')) 29 depends_on('libxslt', type=('build', 'run')) 30 depends_on('py-html5lib', when='+html5', type=('build', 'run')) 31 depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run')) 32 depends_on('[email protected]:', when='+cssselect', type=('build', 'run')) 33 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/var/spack/repos/builtin/packages/py-lxml/package.py b/var/spack/repos/builtin/packages/py-lxml/package.py --- a/var/spack/repos/builtin/packages/py-lxml/package.py +++ b/var/spack/repos/builtin/packages/py-lxml/package.py @@ -13,6 +13,7 @@ homepage = "http://lxml.de/" url = "https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz" + version('4.5.2', sha256='cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6') version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692') version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90') version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f') @@ -25,8 +26,8 @@ depends_on('[email protected]:2.8,3.5:', type=('build', 'run')) depends_on('py-setuptools', type='build') - depends_on('libxml2', type=('build', 'run')) - depends_on('libxslt', type=('build', 'run')) + depends_on('libxml2', type=('build', 'link', 'run')) + depends_on('libxslt', type=('build', 'link', 'run')) depends_on('py-html5lib', when='+html5', type=('build', 'run')) depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run')) depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))
{"golden_diff": "diff --git a/var/spack/repos/builtin/packages/py-lxml/package.py b/var/spack/repos/builtin/packages/py-lxml/package.py\n--- a/var/spack/repos/builtin/packages/py-lxml/package.py\n+++ b/var/spack/repos/builtin/packages/py-lxml/package.py\n@@ -13,6 +13,7 @@\n homepage = \"http://lxml.de/\"\n url = \"https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz\"\n \n+ version('4.5.2', sha256='cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6')\n version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')\n version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')\n version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')\n@@ -25,8 +26,8 @@\n \n depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))\n depends_on('py-setuptools', type='build')\n- depends_on('libxml2', type=('build', 'run'))\n- depends_on('libxslt', type=('build', 'run'))\n+ depends_on('libxml2', type=('build', 'link', 'run'))\n+ depends_on('libxslt', type=('build', 'link', 'run'))\n depends_on('py-html5lib', when='+html5', type=('build', 'run'))\n depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))\n depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))\n", "issue": "Installation issue: py-lxml doesn't use the spack libxslt and libexslt libraries\nIt looks like py-lxml should have AUTO_RPATH set to true. Otherwise it picks up the OS versions of libxslt instead of the spack built versions. I added this to the package.py and the library dependencies were correct:\r\n\r\n```\r\n def setup_build_environment(self, env):\r\n env.set('AUTO_RPATH', 'true')\r\n```\n", "before_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyLxml(PythonPackage):\n \"\"\"lxml is the most feature-rich and easy-to-use library for processing\n XML and HTML in the Python language.\"\"\"\n\n homepage = \"http://lxml.de/\"\n url = \"https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz\"\n\n version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')\n version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')\n version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')\n version('3.7.3', sha256='aa502d78a51ee7d127b4824ff96500f0181d3c7826e6ee7b800d068be79361c7')\n version('2.3', sha256='eea1b8d29532739c1383cb4794c5eacd6176f0972b59e8d29348335b87ff2e66')\n\n variant('html5', default=False, description='Enable html5lib backend')\n variant('htmlsoup', default=False, description='Enable BeautifulSoup4 backend')\n variant('cssselect', default=False, description='Enable cssselect module')\n\n depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))\n depends_on('py-setuptools', type='build')\n depends_on('libxml2', type=('build', 'run'))\n depends_on('libxslt', type=('build', 'run'))\n depends_on('py-html5lib', when='+html5', type=('build', 'run'))\n depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))\n depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/py-lxml/package.py"}], "after_files": [{"content": "# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nfrom spack import *\n\n\nclass PyLxml(PythonPackage):\n \"\"\"lxml is the most feature-rich and easy-to-use library for processing\n XML and HTML in the Python language.\"\"\"\n\n homepage = \"http://lxml.de/\"\n url = \"https://pypi.io/packages/source/l/lxml/lxml-4.4.1.tar.gz\"\n\n version('4.5.2', sha256='cdc13a1682b2a6241080745b1953719e7fe0850b40a5c71ca574f090a1391df6')\n version('4.4.1', sha256='c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692')\n version('4.3.3', sha256='4a03dd682f8e35a10234904e0b9508d705ff98cf962c5851ed052e9340df3d90')\n version('4.2.5', sha256='36720698c29e7a9626a0dc802ef8885f8f0239bfd1689628ecd459a061f2807f')\n version('3.7.3', sha256='aa502d78a51ee7d127b4824ff96500f0181d3c7826e6ee7b800d068be79361c7')\n version('2.3', sha256='eea1b8d29532739c1383cb4794c5eacd6176f0972b59e8d29348335b87ff2e66')\n\n variant('html5', default=False, description='Enable html5lib backend')\n variant('htmlsoup', default=False, description='Enable BeautifulSoup4 backend')\n variant('cssselect', default=False, description='Enable cssselect module')\n\n depends_on('[email protected]:2.8,3.5:', type=('build', 'run'))\n depends_on('py-setuptools', type='build')\n depends_on('libxml2', type=('build', 'link', 'run'))\n depends_on('libxslt', type=('build', 'link', 'run'))\n depends_on('py-html5lib', when='+html5', type=('build', 'run'))\n depends_on('py-beautifulsoup4', when='+htmlsoup', type=('build', 'run'))\n depends_on('[email protected]:', when='+cssselect', type=('build', 'run'))\n", "path": "var/spack/repos/builtin/packages/py-lxml/package.py"}]}
1,096
587
gh_patches_debug_34919
rasdani/github-patches
git_diff
iterative__dvc-380
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Revisit dvc target from config in relation to phony stage files --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `dvc/command/repro.py` Content: ``` 1 from dvc.command.common.base import CmdBase 2 3 class CmdRepro(CmdBase): 4 def run(self): 5 recursive = not self.args.single_item 6 self.project.reproduce(self.args.targets, 7 recursive=recursive, 8 force=self.args.force) 9 ``` Path: `dvc/project.py` Content: ``` 1 import os 2 import itertools 3 import networkx as nx 4 5 from dvc.logger import Logger 6 from dvc.exceptions import DvcException 7 from dvc.stage import Stage, Output, Dependency 8 from dvc.config import Config 9 from dvc.state import State 10 from dvc.lock import Lock 11 from dvc.scm import SCM 12 from dvc.cache import Cache 13 from dvc.data_cloud import DataCloud 14 15 16 class PipelineError(DvcException): 17 pass 18 19 20 class StageNotInPipelineError(PipelineError): 21 pass 22 23 24 class StageNotFoundError(DvcException): 25 pass 26 27 28 class Pipeline(object): 29 30 def __init__(self, project, G): 31 self.project = project 32 self.G = G 33 34 def graph(self): 35 return self.G 36 37 def stages(self): 38 return nx.get_node_attributes(self.G, 'stage') 39 40 def changed(self, stage): 41 for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)): 42 if self.stages[node].changed(): 43 return True 44 return False 45 46 def reproduce(self, stage): 47 if stage not in self.stages(): 48 raise StageNotInPipelineError() 49 50 if not self.changed(stage): 51 raise PipelineNotChangedError() 52 53 for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)): 54 self.stages[node].reproduce() 55 56 stage.reproduce() 57 58 59 class Project(object): 60 DVC_DIR = '.dvc' 61 62 def __init__(self, root_dir): 63 self.root_dir = os.path.abspath(os.path.realpath(root_dir)) 64 self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR) 65 66 self.scm = SCM(self.root_dir) 67 self.lock = Lock(self.dvc_dir) 68 self.cache = Cache(self.dvc_dir) 69 self.state = State(self.root_dir, self.dvc_dir) 70 self.config = Config(self.dvc_dir) 71 self.logger = Logger() 72 self.cloud = DataCloud(self.config._config) 73 74 @staticmethod 75 def init(root_dir): 76 """ 77 Initiate dvc project in directory. 78 79 Args: 80 root_dir: Path to project's root directory. 81 82 Returns: 83 Project instance. 84 85 Raises: 86 KeyError: Raises an exception. 87 """ 88 root_dir = os.path.abspath(root_dir) 89 dvc_dir = os.path.join(root_dir, Project.DVC_DIR) 90 os.mkdir(dvc_dir) 91 92 config = Config.init(dvc_dir) 93 cache = Cache.init(dvc_dir) 94 state = State.init(root_dir, dvc_dir) 95 lock = Lock(dvc_dir) 96 97 scm = SCM(root_dir) 98 scm.ignore_list([cache.cache_dir, 99 state.state_file, 100 lock.lock_file]) 101 102 ignore_file = os.path.join(dvc_dir, scm.ignore_file()) 103 scm.add([config.config_file, ignore_file]) 104 scm.commit('DVC init') 105 106 return Project(root_dir) 107 108 def add(self, fname): 109 path = os.path.abspath(fname) + Stage.STAGE_FILE_SUFFIX 110 cwd = os.path.dirname(path) 111 outputs = [Output.loads(self, os.path.basename(fname), use_cache=True, cwd=cwd)] 112 stage = Stage(project=self, 113 path=path, 114 cmd=None, 115 cwd=cwd, 116 outs=outputs, 117 deps=[], 118 locked=True) 119 stage.save() 120 stage.dump() 121 return stage 122 123 def remove(self, fname): 124 stages = [] 125 output = Output.loads(self, fname) 126 for out in self.outs(): 127 if out.path == output.path: 128 stage = out.stage() 129 stages.append(stage) 130 131 if len(stages) == 0: 132 raise StageNotFoundError(fname) 133 134 for stage in stages: 135 stage.remove() 136 137 return stages 138 139 def run(self, 140 cmd=None, 141 deps=[], 142 outs=[], 143 outs_no_cache=[], 144 locked=False, 145 fname=Stage.STAGE_FILE, 146 cwd=os.curdir): 147 cwd = os.path.abspath(cwd) 148 path = os.path.join(cwd, fname) 149 outputs = Output.loads_from(self, outs, use_cache=True, cwd=cwd) 150 outputs += Output.loads_from(self, outs_no_cache, use_cache=False, cwd=cwd) 151 deps = Dependency.loads_from(self, deps, use_cache=False, cwd=cwd) 152 153 stage = Stage(project=self, 154 path=path, 155 cmd=cmd, 156 cwd=cwd, 157 outs=outputs, 158 deps=deps, 159 locked=locked) 160 stage.run() 161 stage.dump() 162 return stage 163 164 def reproduce(self, targets, recursive=True, force=False): 165 reproduced = [] 166 stages = nx.get_node_attributes(self.graph(), 'stage') 167 for target in targets: 168 node = os.path.relpath(os.path.abspath(target), self.root_dir) 169 if node not in stages: 170 raise StageNotFoundError(target) 171 172 if recursive: 173 for n in nx.dfs_postorder_nodes(self.graph(), node): 174 stages[n].reproduce(force=force) 175 stages[n].dump() 176 reproduced.append(stages[n]) 177 178 stages[node].reproduce(force=force) 179 stages[node].dump() 180 reproduced.append(stages[node]) 181 182 return reproduced 183 184 def checkout(self): 185 for stage in self.stages(): 186 stage.checkout() 187 188 def _used_cache(self): 189 clist = [] 190 for stage in self.stages(): 191 for entry in itertools.chain(stage.outs, stage.deps): 192 if not entry.use_cache: 193 continue 194 if entry.cache not in clist: 195 clist.append(entry.cache) 196 return clist 197 198 def gc(self): 199 clist = self._used_cache() 200 for cache in self.cache.all(): 201 if cache in clist: 202 continue 203 os.unlink(cache) 204 self.logger.info('\'{}\' was removed'.format(cache)) 205 206 def push(self, jobs=1): 207 self.cloud.push(self._used_cache(), jobs) 208 209 def pull(self, jobs=1): 210 self.cloud.pull(self._used_cache(), jobs) 211 for stage in self.stages(): 212 for entry in itertools.chain(stage.outs, stage.deps): 213 if entry.use_cache: 214 entry.link() 215 216 def status(self, jobs=1): 217 return self.cloud.status(self._used_cache(), jobs) 218 219 def graph(self): 220 G = nx.DiGraph() 221 222 for stage in self.stages(): 223 node = os.path.relpath(stage.path, self.root_dir) 224 G.add_node(node, stage=stage) 225 for dep in stage.deps: 226 dep_stage = dep.stage() 227 if not dep_stage: 228 continue 229 dep_node = os.path.relpath(dep_stage.path, self.root_dir) 230 G.add_node(dep_node, stage=dep_stage) 231 G.add_edge(node, dep_node) 232 233 return G 234 235 def stages(self): 236 stages = [] 237 for root, dirs, files in os.walk(self.root_dir): 238 for fname in files: 239 path = os.path.join(root, fname) 240 if not Stage.is_stage_file(path): 241 continue 242 stages.append(Stage.load(self, path)) 243 return stages 244 245 def outs(self): 246 outs = [] 247 for stage in self.stages(): 248 outs += stage.outs 249 return outs 250 251 def pipelines(self): 252 pipelines = [] 253 for G in nx.weakly_connected_component_subgraphs(self.graph()): 254 pipeline = Pipeline(self, G) 255 pipelines.append(pipeline) 256 257 return pipelines 258 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/dvc/command/repro.py b/dvc/command/repro.py --- a/dvc/command/repro.py +++ b/dvc/command/repro.py @@ -1,8 +1,16 @@ +from dvc.exceptions import DvcException from dvc.command.common.base import CmdBase class CmdRepro(CmdBase): def run(self): recursive = not self.args.single_item - self.project.reproduce(self.args.targets, - recursive=recursive, - force=self.args.force) + for target in self.args.targets: + try: + self.project.reproduce(target, + recursive=recursive, + force=self.args.force) + except DvcException as ex: + msg = 'Failed to reproduce {}: {}'.format(target, str(ex)) + self.project.logger.error(msg) + return 1 + return 0 diff --git a/dvc/project.py b/dvc/project.py --- a/dvc/project.py +++ b/dvc/project.py @@ -22,7 +22,9 @@ class StageNotFoundError(DvcException): - pass + def __init__(self, path): + msg = 'Stage file {} does not exist'.format(path) + super(StageNotFoundError, self).__init__(msg) class Pipeline(object): @@ -161,23 +163,22 @@ stage.dump() return stage - def reproduce(self, targets, recursive=True, force=False): + def reproduce(self, target, recursive=True, force=False): reproduced = [] stages = nx.get_node_attributes(self.graph(), 'stage') - for target in targets: - node = os.path.relpath(os.path.abspath(target), self.root_dir) - if node not in stages: - raise StageNotFoundError(target) - - if recursive: - for n in nx.dfs_postorder_nodes(self.graph(), node): - stages[n].reproduce(force=force) - stages[n].dump() - reproduced.append(stages[n]) - - stages[node].reproduce(force=force) - stages[node].dump() - reproduced.append(stages[node]) + node = os.path.relpath(os.path.abspath(target), self.root_dir) + if node not in stages: + raise StageNotFoundError(target) + + if recursive: + for n in nx.dfs_postorder_nodes(self.graph(), node): + stages[n].reproduce(force=force) + stages[n].dump() + reproduced.append(stages[n]) + + stages[node].reproduce(force=force) + stages[node].dump() + reproduced.append(stages[node]) return reproduced
{"golden_diff": "diff --git a/dvc/command/repro.py b/dvc/command/repro.py\n--- a/dvc/command/repro.py\n+++ b/dvc/command/repro.py\n@@ -1,8 +1,16 @@\n+from dvc.exceptions import DvcException\n from dvc.command.common.base import CmdBase\n \n class CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n- self.project.reproduce(self.args.targets,\n- recursive=recursive,\n- force=self.args.force)\n+ for target in self.args.targets:\n+ try:\n+ self.project.reproduce(target,\n+ recursive=recursive,\n+ force=self.args.force)\n+ except DvcException as ex:\n+ msg = 'Failed to reproduce {}: {}'.format(target, str(ex))\n+ self.project.logger.error(msg)\n+ return 1\n+ return 0\ndiff --git a/dvc/project.py b/dvc/project.py\n--- a/dvc/project.py\n+++ b/dvc/project.py\n@@ -22,7 +22,9 @@\n \n \n class StageNotFoundError(DvcException):\n- pass\n+ def __init__(self, path):\n+ msg = 'Stage file {} does not exist'.format(path)\n+ super(StageNotFoundError, self).__init__(msg)\n \n \n class Pipeline(object):\n@@ -161,23 +163,22 @@\n stage.dump()\n return stage\n \n- def reproduce(self, targets, recursive=True, force=False):\n+ def reproduce(self, target, recursive=True, force=False):\n reproduced = []\n stages = nx.get_node_attributes(self.graph(), 'stage')\n- for target in targets:\n- node = os.path.relpath(os.path.abspath(target), self.root_dir)\n- if node not in stages:\n- raise StageNotFoundError(target)\n-\n- if recursive:\n- for n in nx.dfs_postorder_nodes(self.graph(), node):\n- stages[n].reproduce(force=force)\n- stages[n].dump()\n- reproduced.append(stages[n])\n-\n- stages[node].reproduce(force=force)\n- stages[node].dump()\n- reproduced.append(stages[node])\n+ node = os.path.relpath(os.path.abspath(target), self.root_dir)\n+ if node not in stages:\n+ raise StageNotFoundError(target)\n+\n+ if recursive:\n+ for n in nx.dfs_postorder_nodes(self.graph(), node):\n+ stages[n].reproduce(force=force)\n+ stages[n].dump()\n+ reproduced.append(stages[n])\n+\n+ stages[node].reproduce(force=force)\n+ stages[node].dump()\n+ reproduced.append(stages[node])\n \n return reproduced\n", "issue": "Revisit dvc target from config in relation to phony stage files\n\n", "before_files": [{"content": "from dvc.command.common.base import CmdBase\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n self.project.reproduce(self.args.targets,\n recursive=recursive,\n force=self.args.force)\n", "path": "dvc/command/repro.py"}, {"content": "import os\nimport itertools\nimport networkx as nx\n\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import Stage, Output, Dependency\nfrom dvc.config import Config\nfrom dvc.state import State\nfrom dvc.lock import Lock\nfrom dvc.scm import SCM\nfrom dvc.cache import Cache\nfrom dvc.data_cloud import DataCloud\n\n\nclass PipelineError(DvcException):\n pass\n\n\nclass StageNotInPipelineError(PipelineError):\n pass\n\n\nclass StageNotFoundError(DvcException):\n pass\n\n\nclass Pipeline(object):\n\n def __init__(self, project, G):\n self.project = project\n self.G = G\n\n def graph(self):\n return self.G\n\n def stages(self):\n return nx.get_node_attributes(self.G, 'stage')\n\n def changed(self, stage):\n for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)):\n if self.stages[node].changed():\n return True\n return False\n\n def reproduce(self, stage):\n if stage not in self.stages():\n raise StageNotInPipelineError()\n\n if not self.changed(stage):\n raise PipelineNotChangedError()\n\n for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)):\n self.stages[node].reproduce()\n\n stage.reproduce()\n\n\nclass Project(object):\n DVC_DIR = '.dvc'\n\n def __init__(self, root_dir):\n self.root_dir = os.path.abspath(os.path.realpath(root_dir))\n self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)\n\n self.scm = SCM(self.root_dir)\n self.lock = Lock(self.dvc_dir)\n self.cache = Cache(self.dvc_dir)\n self.state = State(self.root_dir, self.dvc_dir)\n self.config = Config(self.dvc_dir)\n self.logger = Logger()\n self.cloud = DataCloud(self.config._config)\n\n @staticmethod\n def init(root_dir):\n \"\"\"\n Initiate dvc project in directory.\n\n Args:\n root_dir: Path to project's root directory.\n\n Returns:\n Project instance.\n\n Raises:\n KeyError: Raises an exception.\n \"\"\"\n root_dir = os.path.abspath(root_dir)\n dvc_dir = os.path.join(root_dir, Project.DVC_DIR)\n os.mkdir(dvc_dir)\n\n config = Config.init(dvc_dir)\n cache = Cache.init(dvc_dir)\n state = State.init(root_dir, dvc_dir)\n lock = Lock(dvc_dir)\n\n scm = SCM(root_dir)\n scm.ignore_list([cache.cache_dir,\n state.state_file,\n lock.lock_file])\n\n ignore_file = os.path.join(dvc_dir, scm.ignore_file())\n scm.add([config.config_file, ignore_file])\n scm.commit('DVC init')\n\n return Project(root_dir)\n\n def add(self, fname):\n path = os.path.abspath(fname) + Stage.STAGE_FILE_SUFFIX\n cwd = os.path.dirname(path)\n outputs = [Output.loads(self, os.path.basename(fname), use_cache=True, cwd=cwd)]\n stage = Stage(project=self,\n path=path,\n cmd=None,\n cwd=cwd,\n outs=outputs,\n deps=[],\n locked=True)\n stage.save()\n stage.dump()\n return stage\n\n def remove(self, fname):\n stages = []\n output = Output.loads(self, fname)\n for out in self.outs():\n if out.path == output.path:\n stage = out.stage()\n stages.append(stage)\n\n if len(stages) == 0:\n raise StageNotFoundError(fname) \n\n for stage in stages:\n stage.remove()\n\n return stages\n\n def run(self,\n cmd=None,\n deps=[],\n outs=[],\n outs_no_cache=[],\n locked=False,\n fname=Stage.STAGE_FILE,\n cwd=os.curdir):\n cwd = os.path.abspath(cwd)\n path = os.path.join(cwd, fname)\n outputs = Output.loads_from(self, outs, use_cache=True, cwd=cwd)\n outputs += Output.loads_from(self, outs_no_cache, use_cache=False, cwd=cwd)\n deps = Dependency.loads_from(self, deps, use_cache=False, cwd=cwd)\n\n stage = Stage(project=self,\n path=path,\n cmd=cmd,\n cwd=cwd,\n outs=outputs,\n deps=deps,\n locked=locked)\n stage.run()\n stage.dump()\n return stage\n\n def reproduce(self, targets, recursive=True, force=False):\n reproduced = []\n stages = nx.get_node_attributes(self.graph(), 'stage')\n for target in targets:\n node = os.path.relpath(os.path.abspath(target), self.root_dir)\n if node not in stages:\n raise StageNotFoundError(target)\n\n if recursive:\n for n in nx.dfs_postorder_nodes(self.graph(), node):\n stages[n].reproduce(force=force)\n stages[n].dump()\n reproduced.append(stages[n])\n\n stages[node].reproduce(force=force)\n stages[node].dump()\n reproduced.append(stages[node])\n\n return reproduced\n\n def checkout(self):\n for stage in self.stages():\n stage.checkout()\n\n def _used_cache(self):\n clist = []\n for stage in self.stages():\n for entry in itertools.chain(stage.outs, stage.deps):\n if not entry.use_cache:\n continue\n if entry.cache not in clist:\n clist.append(entry.cache)\n return clist\n\n def gc(self):\n clist = self._used_cache()\n for cache in self.cache.all():\n if cache in clist:\n continue\n os.unlink(cache)\n self.logger.info('\\'{}\\' was removed'.format(cache))\n\n def push(self, jobs=1):\n self.cloud.push(self._used_cache(), jobs)\n\n def pull(self, jobs=1):\n self.cloud.pull(self._used_cache(), jobs)\n for stage in self.stages():\n for entry in itertools.chain(stage.outs, stage.deps):\n if entry.use_cache:\n entry.link()\n\n def status(self, jobs=1):\n return self.cloud.status(self._used_cache(), jobs)\n\n def graph(self):\n G = nx.DiGraph()\n\n for stage in self.stages():\n node = os.path.relpath(stage.path, self.root_dir)\n G.add_node(node, stage=stage)\n for dep in stage.deps:\n dep_stage = dep.stage()\n if not dep_stage:\n continue\n dep_node = os.path.relpath(dep_stage.path, self.root_dir)\n G.add_node(dep_node, stage=dep_stage)\n G.add_edge(node, dep_node)\n\n return G\n\n def stages(self):\n stages = []\n for root, dirs, files in os.walk(self.root_dir):\n for fname in files:\n path = os.path.join(root, fname)\n if not Stage.is_stage_file(path):\n continue\n stages.append(Stage.load(self, path))\n return stages\n\n def outs(self):\n outs = []\n for stage in self.stages():\n outs += stage.outs\n return outs\n\n def pipelines(self):\n pipelines = []\n for G in nx.weakly_connected_component_subgraphs(self.graph()):\n pipeline = Pipeline(self, G)\n pipelines.append(pipeline)\n\n return pipelines\n", "path": "dvc/project.py"}], "after_files": [{"content": "from dvc.exceptions import DvcException\nfrom dvc.command.common.base import CmdBase\n\nclass CmdRepro(CmdBase):\n def run(self):\n recursive = not self.args.single_item\n for target in self.args.targets:\n try:\n self.project.reproduce(target,\n recursive=recursive,\n force=self.args.force)\n except DvcException as ex:\n msg = 'Failed to reproduce {}: {}'.format(target, str(ex))\n self.project.logger.error(msg)\n return 1\n return 0\n", "path": "dvc/command/repro.py"}, {"content": "import os\nimport itertools\nimport networkx as nx\n\nfrom dvc.logger import Logger\nfrom dvc.exceptions import DvcException\nfrom dvc.stage import Stage, Output, Dependency\nfrom dvc.config import Config\nfrom dvc.state import State\nfrom dvc.lock import Lock\nfrom dvc.scm import SCM\nfrom dvc.cache import Cache\nfrom dvc.data_cloud import DataCloud\n\n\nclass PipelineError(DvcException):\n pass\n\n\nclass StageNotInPipelineError(PipelineError):\n pass\n\n\nclass StageNotFoundError(DvcException):\n def __init__(self, path):\n msg = 'Stage file {} does not exist'.format(path)\n super(StageNotFoundError, self).__init__(msg)\n\n\nclass Pipeline(object):\n\n def __init__(self, project, G):\n self.project = project\n self.G = G\n\n def graph(self):\n return self.G\n\n def stages(self):\n return nx.get_node_attributes(self.G, 'stage')\n\n def changed(self, stage):\n for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)):\n if self.stages[node].changed():\n return True\n return False\n\n def reproduce(self, stage):\n if stage not in self.stages():\n raise StageNotInPipelineError()\n\n if not self.changed(stage):\n raise PipelineNotChangedError()\n\n for node in nx.dfs_postorder_nodes(G, stage.path.relative_to(self.project.root_dir)):\n self.stages[node].reproduce()\n\n stage.reproduce()\n\n\nclass Project(object):\n DVC_DIR = '.dvc'\n\n def __init__(self, root_dir):\n self.root_dir = os.path.abspath(os.path.realpath(root_dir))\n self.dvc_dir = os.path.join(self.root_dir, self.DVC_DIR)\n\n self.scm = SCM(self.root_dir)\n self.lock = Lock(self.dvc_dir)\n self.cache = Cache(self.dvc_dir)\n self.state = State(self.root_dir, self.dvc_dir)\n self.config = Config(self.dvc_dir)\n self.logger = Logger()\n self.cloud = DataCloud(self.config._config)\n\n @staticmethod\n def init(root_dir):\n \"\"\"\n Initiate dvc project in directory.\n\n Args:\n root_dir: Path to project's root directory.\n\n Returns:\n Project instance.\n\n Raises:\n KeyError: Raises an exception.\n \"\"\"\n root_dir = os.path.abspath(root_dir)\n dvc_dir = os.path.join(root_dir, Project.DVC_DIR)\n os.mkdir(dvc_dir)\n\n config = Config.init(dvc_dir)\n cache = Cache.init(dvc_dir)\n state = State.init(root_dir, dvc_dir)\n lock = Lock(dvc_dir)\n\n scm = SCM(root_dir)\n scm.ignore_list([cache.cache_dir,\n state.state_file,\n lock.lock_file])\n\n ignore_file = os.path.join(dvc_dir, scm.ignore_file())\n scm.add([config.config_file, ignore_file])\n scm.commit('DVC init')\n\n return Project(root_dir)\n\n def add(self, fname):\n path = os.path.abspath(fname) + Stage.STAGE_FILE_SUFFIX\n cwd = os.path.dirname(path)\n outputs = [Output.loads(self, os.path.basename(fname), use_cache=True, cwd=cwd)]\n stage = Stage(project=self,\n path=path,\n cmd=None,\n cwd=cwd,\n outs=outputs,\n deps=[],\n locked=True)\n stage.save()\n stage.dump()\n return stage\n\n def remove(self, fname):\n stages = []\n output = Output.loads(self, fname)\n for out in self.outs():\n if out.path == output.path:\n stage = out.stage()\n stages.append(stage)\n\n if len(stages) == 0:\n raise StageNotFoundError(fname) \n\n for stage in stages:\n stage.remove()\n\n return stages\n\n def run(self,\n cmd=None,\n deps=[],\n outs=[],\n outs_no_cache=[],\n locked=False,\n fname=Stage.STAGE_FILE,\n cwd=os.curdir):\n cwd = os.path.abspath(cwd)\n path = os.path.join(cwd, fname)\n outputs = Output.loads_from(self, outs, use_cache=True, cwd=cwd)\n outputs += Output.loads_from(self, outs_no_cache, use_cache=False, cwd=cwd)\n deps = Dependency.loads_from(self, deps, use_cache=False, cwd=cwd)\n\n stage = Stage(project=self,\n path=path,\n cmd=cmd,\n cwd=cwd,\n outs=outputs,\n deps=deps,\n locked=locked)\n stage.run()\n stage.dump()\n return stage\n\n def reproduce(self, target, recursive=True, force=False):\n reproduced = []\n stages = nx.get_node_attributes(self.graph(), 'stage')\n node = os.path.relpath(os.path.abspath(target), self.root_dir)\n if node not in stages:\n raise StageNotFoundError(target)\n\n if recursive:\n for n in nx.dfs_postorder_nodes(self.graph(), node):\n stages[n].reproduce(force=force)\n stages[n].dump()\n reproduced.append(stages[n])\n\n stages[node].reproduce(force=force)\n stages[node].dump()\n reproduced.append(stages[node])\n\n return reproduced\n\n def checkout(self):\n for stage in self.stages():\n stage.checkout()\n\n def _used_cache(self):\n clist = []\n for stage in self.stages():\n for entry in itertools.chain(stage.outs, stage.deps):\n if not entry.use_cache:\n continue\n if entry.cache not in clist:\n clist.append(entry.cache)\n return clist\n\n def gc(self):\n clist = self._used_cache()\n for cache in self.cache.all():\n if cache in clist:\n continue\n os.unlink(cache)\n self.logger.info('\\'{}\\' was removed'.format(cache))\n\n def push(self, jobs=1):\n self.cloud.push(self._used_cache(), jobs)\n\n def pull(self, jobs=1):\n self.cloud.pull(self._used_cache(), jobs)\n for stage in self.stages():\n for entry in itertools.chain(stage.outs, stage.deps):\n if entry.use_cache:\n entry.link()\n\n def status(self, jobs=1):\n return self.cloud.status(self._used_cache(), jobs)\n\n def graph(self):\n G = nx.DiGraph()\n\n for stage in self.stages():\n node = os.path.relpath(stage.path, self.root_dir)\n G.add_node(node, stage=stage)\n for dep in stage.deps:\n dep_stage = dep.stage()\n if not dep_stage:\n continue\n dep_node = os.path.relpath(dep_stage.path, self.root_dir)\n G.add_node(dep_node, stage=dep_stage)\n G.add_edge(node, dep_node)\n\n return G\n\n def stages(self):\n stages = []\n for root, dirs, files in os.walk(self.root_dir):\n for fname in files:\n path = os.path.join(root, fname)\n if not Stage.is_stage_file(path):\n continue\n stages.append(Stage.load(self, path))\n return stages\n\n def outs(self):\n outs = []\n for stage in self.stages():\n outs += stage.outs\n return outs\n\n def pipelines(self):\n pipelines = []\n for G in nx.weakly_connected_component_subgraphs(self.graph()):\n pipeline = Pipeline(self, G)\n pipelines.append(pipeline)\n\n return pipelines\n", "path": "dvc/project.py"}]}
2,621
581
gh_patches_debug_35275
rasdani/github-patches
git_diff
conan-io__conan-center-index-11634
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [package] xorg/system: generated pkg-config files lack variables While trying to add xkbcommon 1.0.1, I had to add pkg-config generator to xkbcommon recipe because since 1.0.0 a new optional component depends on libxml2, and meson uses pkg-config files. xkbcommon depends on xorg, but currently xkbcommon's recipe relies on xorg system installed pkg-config files. As soon as conan pkg-config files are generated for xkbcommon build, they take precedence, but one of them lacks an important variable for xkbcommon: `xkb_base` in `xkeyboard-config` xorg's component, used to set a definition at build time. xkbcommon can build without this variable, but il will fail at runtime. I'm pretty sure that this issue can't be fixed right now. It requires https://github.com/conan-io/conan/issues/7720 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `recipes/xorg/all/conanfile.py` Content: ``` 1 from conans import ConanFile, tools 2 from conans.errors import ConanException, ConanInvalidConfiguration 3 import os 4 5 required_conan_version = ">=1.32" 6 7 class ConanXOrg(ConanFile): 8 name = "xorg" 9 url = "https://github.com/conan-io/conan-center-index" 10 license = "MIT" 11 homepage = "https://www.x.org/wiki/" 12 description = "The X.Org project provides an open source implementation of the X Window System." 13 settings = "os" 14 topics = ("x11", "xorg") 15 16 def configure(self): 17 if self.settings.os not in ["Linux", "FreeBSD"]: 18 raise ConanInvalidConfiguration("This recipe supports only Linux and FreeBSD") 19 20 def package_id(self): 21 self.info.header_only() 22 23 def _fill_cppinfo_from_pkgconfig(self, name): 24 pkg_config = tools.PkgConfig(name) 25 if not pkg_config.provides: 26 raise ConanException("OpenGL development files aren't available, give up") 27 libs = [lib[2:] for lib in pkg_config.libs_only_l] 28 lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L] 29 ldflags = [flag for flag in pkg_config.libs_only_other] 30 include_dirs = [include[2:] for include in pkg_config.cflags_only_I] 31 cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")] 32 defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")] 33 34 self.cpp_info.components[name].system_libs = libs 35 self.cpp_info.components[name].libdirs = lib_dirs 36 self.cpp_info.components[name].sharedlinkflags = ldflags 37 self.cpp_info.components[name].exelinkflags = ldflags 38 self.cpp_info.components[name].defines = defines 39 self.cpp_info.components[name].includedirs = include_dirs 40 self.cpp_info.components[name].cflags = cflags 41 self.cpp_info.components[name].cxxflags = cflags 42 self.cpp_info.components[name].version = pkg_config.version[0] 43 44 def system_requirements(self): 45 packages = [] 46 if tools.os_info.is_linux and self.settings.os == "Linux": 47 if tools.os_info.with_apt: 48 packages = ["libx11-dev", "libx11-xcb-dev", "libfontenc-dev", "libice-dev", "libsm-dev", "libxau-dev", "libxaw7-dev", 49 "libxcomposite-dev", "libxcursor-dev", "libxdamage-dev", "libxdmcp-dev", "libxext-dev", "libxfixes-dev", 50 "libxi-dev", "libxinerama-dev", "libxkbfile-dev", "libxmu-dev", "libxmuu-dev", 51 "libxpm-dev", "libxrandr-dev", "libxrender-dev", "libxres-dev", "libxss-dev", "libxt-dev", "libxtst-dev", 52 "libxv-dev", "libxvmc-dev", "libxxf86vm-dev", "xtrans-dev", "libxcb-render0-dev", 53 "libxcb-render-util0-dev", "libxcb-xkb-dev", "libxcb-icccm4-dev", "libxcb-image0-dev", 54 "libxcb-keysyms1-dev", "libxcb-randr0-dev", "libxcb-shape0-dev", "libxcb-sync-dev", "libxcb-xfixes0-dev", 55 "libxcb-xinerama0-dev", "xkb-data", "libxcb-dri3-dev", "uuid-dev"] 56 if (tools.os_info.linux_distro == "ubuntu" and tools.os_info.os_version < "15") or\ 57 (tools.os_info.linux_distro == "debian" and tools.os_info.os_version < "12") or\ 58 (tools.os_info.linux_distro == "raspbian" and tools.os_info.os_version < "12"): 59 packages.append( "libxcb-util0-dev" ) 60 else: 61 packages.append( "libxcb-util-dev" ) 62 elif tools.os_info.with_yum or tools.os_info.with_dnf or tools.os_info.with_zypper: 63 packages = ["libxcb-devel", "libfontenc-devel", "libXaw-devel", "libXcomposite-devel", 64 "libXcursor-devel", "libXdmcp-devel", "libXtst-devel", "libXinerama-devel", 65 "libxkbfile-devel", "libXrandr-devel", "libXres-devel", "libXScrnSaver-devel", "libXvMC-devel", 66 "xorg-x11-xtrans-devel", "xcb-util-wm-devel", "xcb-util-image-devel", "xcb-util-keysyms-devel", 67 "xcb-util-renderutil-devel", "libXdamage-devel", "libXxf86vm-devel", "libXv-devel", 68 "xcb-util-devel", "libuuid-devel"] 69 packages.append("xkeyboard-config" if tools.os_info.with_zypper else "xkeyboard-config-devel") 70 elif tools.os_info.with_pacman: 71 packages = ["libxcb", "libfontenc", "libice", "libsm", "libxaw", "libxcomposite", "libxcursor", 72 "libxdamage", "libxdmcp", "libxtst", "libxinerama", "libxkbfile", "libxrandr", "libxres", 73 "libxss", "libxvmc", "xtrans", "xcb-util-wm", "xcb-util-image","xcb-util-keysyms", "xcb-util-renderutil", 74 "libxxf86vm", "libxv", "xkeyboard-config", "xcb-util", "util-linux-libs"] 75 else: 76 self.output.warn("Do not know how to install 'xorg' for {}.".format(tools.os_info.linux_distro)) 77 78 elif tools.os_info.is_freebsd and self.settings.os == "FreeBSD": 79 packages = ["libX11", "libfontenc", "libice", "libsm", "libxaw", "libxcomposite", "libxcursor", 80 "libxdamage", "libxdmcp", "libxtst", "libxinerama", "libxkbfile", "libxrandr", "libxres", 81 "libXScrnSaver", "libxvmc", "xtrans", "xcb-util-wm", "xcb-util-image", "xcb-util-keysyms", "xcb-util-renderutil", 82 "libxxf86vm", "libxv", "xkeyboard-config", "xcb-util"] 83 if packages: 84 package_tool = tools.SystemPackageTool(conanfile=self, default_mode="verify") 85 package_tool.install_packages(update=True, packages=packages) 86 87 def package_info(self): 88 for name in ["x11", "x11-xcb", "fontenc", "ice", "sm", "xau", "xaw7", 89 "xcomposite", "xcursor", "xdamage", "xdmcp", "xext", "xfixes", "xi", 90 "xinerama", "xkbfile", "xmu", "xmuu", "xpm", "xrandr", "xrender", "xres", 91 "xscrnsaver", "xt", "xtst", "xv", "xvmc", "xxf86vm", "xtrans", 92 "xcb-xkb", "xcb-icccm", "xcb-image", "xcb-keysyms", "xcb-randr", "xcb-render", 93 "xcb-renderutil", "xcb-shape", "xcb-shm", "xcb-sync", "xcb-xfixes", 94 "xcb-xinerama", "xcb", "xkeyboard-config", "xcb-atom", "xcb-aux", "xcb-event", "xcb-util", 95 "xcb-dri3"] + ([] if self.settings.os == "FreeBSD" else ["uuid"]): 96 self._fill_cppinfo_from_pkgconfig(name) 97 if self.settings.os == "Linux": 98 self.cpp_info.components["sm"].requires.append("uuid") 99 100 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/recipes/xorg/all/conanfile.py b/recipes/xorg/all/conanfile.py --- a/recipes/xorg/all/conanfile.py +++ b/recipes/xorg/all/conanfile.py @@ -1,6 +1,5 @@ from conans import ConanFile, tools from conans.errors import ConanException, ConanInvalidConfiguration -import os required_conan_version = ">=1.32" @@ -30,6 +29,7 @@ include_dirs = [include[2:] for include in pkg_config.cflags_only_I] cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith("-D")] defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith("-D")] + variables = pkg_config.variables self.cpp_info.components[name].system_libs = libs self.cpp_info.components[name].libdirs = lib_dirs @@ -40,6 +40,10 @@ self.cpp_info.components[name].cflags = cflags self.cpp_info.components[name].cxxflags = cflags self.cpp_info.components[name].version = pkg_config.version[0] + self.cpp_info.components[name].set_property("component_version", pkg_config.version[0]) + self.cpp_info.components[name].set_property( + "pkg_config_custom_content", + "\n".join("%s=%s" % (key, value) for key,value in variables.items())) def system_requirements(self): packages = [] @@ -94,6 +98,8 @@ "xcb-xinerama", "xcb", "xkeyboard-config", "xcb-atom", "xcb-aux", "xcb-event", "xcb-util", "xcb-dri3"] + ([] if self.settings.os == "FreeBSD" else ["uuid"]): self._fill_cppinfo_from_pkgconfig(name) + self.cpp_info.components[name].set_property("pkg_config_name", name) + if self.settings.os == "Linux": self.cpp_info.components["sm"].requires.append("uuid")
{"golden_diff": "diff --git a/recipes/xorg/all/conanfile.py b/recipes/xorg/all/conanfile.py\n--- a/recipes/xorg/all/conanfile.py\n+++ b/recipes/xorg/all/conanfile.py\n@@ -1,6 +1,5 @@\n from conans import ConanFile, tools\n from conans.errors import ConanException, ConanInvalidConfiguration\n-import os\n \n required_conan_version = \">=1.32\"\n \n@@ -30,6 +29,7 @@\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n+ variables = pkg_config.variables\n \n self.cpp_info.components[name].system_libs = libs\n self.cpp_info.components[name].libdirs = lib_dirs\n@@ -40,6 +40,10 @@\n self.cpp_info.components[name].cflags = cflags\n self.cpp_info.components[name].cxxflags = cflags\n self.cpp_info.components[name].version = pkg_config.version[0]\n+ self.cpp_info.components[name].set_property(\"component_version\", pkg_config.version[0])\n+ self.cpp_info.components[name].set_property(\n+ \"pkg_config_custom_content\",\n+ \"\\n\".join(\"%s=%s\" % (key, value) for key,value in variables.items()))\n \n def system_requirements(self):\n packages = []\n@@ -94,6 +98,8 @@\n \"xcb-xinerama\", \"xcb\", \"xkeyboard-config\", \"xcb-atom\", \"xcb-aux\", \"xcb-event\", \"xcb-util\",\n \"xcb-dri3\"] + ([] if self.settings.os == \"FreeBSD\" else [\"uuid\"]):\n self._fill_cppinfo_from_pkgconfig(name)\n+ self.cpp_info.components[name].set_property(\"pkg_config_name\", name)\n+ \n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"sm\"].requires.append(\"uuid\")\n", "issue": "[package] xorg/system: generated pkg-config files lack variables\nWhile trying to add xkbcommon 1.0.1, I had to add pkg-config generator to xkbcommon recipe because since 1.0.0 a new optional component depends on libxml2, and meson uses pkg-config files.\r\nxkbcommon depends on xorg, but currently xkbcommon's recipe relies on xorg system installed pkg-config files.\r\nAs soon as conan pkg-config files are generated for xkbcommon build, they take precedence, but one of them lacks an important variable for xkbcommon: `xkb_base` in `xkeyboard-config` xorg's component, used to set a definition at build time.\r\n\r\nxkbcommon can build without this variable, but il will fail at runtime.\r\n\r\nI'm pretty sure that this issue can't be fixed right now. It requires https://github.com/conan-io/conan/issues/7720\n", "before_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanException, ConanInvalidConfiguration\nimport os\n\nrequired_conan_version = \">=1.32\"\n\nclass ConanXOrg(ConanFile):\n name = \"xorg\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"MIT\"\n homepage = \"https://www.x.org/wiki/\"\n description = \"The X.Org project provides an open source implementation of the X Window System.\"\n settings = \"os\"\n topics = (\"x11\", \"xorg\")\n\n def configure(self):\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n raise ConanInvalidConfiguration(\"This recipe supports only Linux and FreeBSD\")\n\n def package_id(self):\n self.info.header_only()\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"OpenGL development files aren't available, give up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n\n self.cpp_info.components[name].system_libs = libs\n self.cpp_info.components[name].libdirs = lib_dirs\n self.cpp_info.components[name].sharedlinkflags = ldflags\n self.cpp_info.components[name].exelinkflags = ldflags\n self.cpp_info.components[name].defines = defines\n self.cpp_info.components[name].includedirs = include_dirs\n self.cpp_info.components[name].cflags = cflags\n self.cpp_info.components[name].cxxflags = cflags\n self.cpp_info.components[name].version = pkg_config.version[0]\n\n def system_requirements(self):\n packages = []\n if tools.os_info.is_linux and self.settings.os == \"Linux\":\n if tools.os_info.with_apt:\n packages = [\"libx11-dev\", \"libx11-xcb-dev\", \"libfontenc-dev\", \"libice-dev\", \"libsm-dev\", \"libxau-dev\", \"libxaw7-dev\",\n \"libxcomposite-dev\", \"libxcursor-dev\", \"libxdamage-dev\", \"libxdmcp-dev\", \"libxext-dev\", \"libxfixes-dev\", \n \"libxi-dev\", \"libxinerama-dev\", \"libxkbfile-dev\", \"libxmu-dev\", \"libxmuu-dev\",\n \"libxpm-dev\", \"libxrandr-dev\", \"libxrender-dev\", \"libxres-dev\", \"libxss-dev\", \"libxt-dev\", \"libxtst-dev\", \n \"libxv-dev\", \"libxvmc-dev\", \"libxxf86vm-dev\", \"xtrans-dev\", \"libxcb-render0-dev\",\n \"libxcb-render-util0-dev\", \"libxcb-xkb-dev\", \"libxcb-icccm4-dev\", \"libxcb-image0-dev\",\n \"libxcb-keysyms1-dev\", \"libxcb-randr0-dev\", \"libxcb-shape0-dev\", \"libxcb-sync-dev\", \"libxcb-xfixes0-dev\",\n \"libxcb-xinerama0-dev\", \"xkb-data\", \"libxcb-dri3-dev\", \"uuid-dev\"]\n if (tools.os_info.linux_distro == \"ubuntu\" and tools.os_info.os_version < \"15\") or\\\n (tools.os_info.linux_distro == \"debian\" and tools.os_info.os_version < \"12\") or\\\n (tools.os_info.linux_distro == \"raspbian\" and tools.os_info.os_version < \"12\"):\n packages.append( \"libxcb-util0-dev\" )\n else:\n packages.append( \"libxcb-util-dev\" )\n elif tools.os_info.with_yum or tools.os_info.with_dnf or tools.os_info.with_zypper:\n packages = [\"libxcb-devel\", \"libfontenc-devel\", \"libXaw-devel\", \"libXcomposite-devel\",\n \"libXcursor-devel\", \"libXdmcp-devel\", \"libXtst-devel\", \"libXinerama-devel\",\n \"libxkbfile-devel\", \"libXrandr-devel\", \"libXres-devel\", \"libXScrnSaver-devel\", \"libXvMC-devel\",\n \"xorg-x11-xtrans-devel\", \"xcb-util-wm-devel\", \"xcb-util-image-devel\", \"xcb-util-keysyms-devel\",\n \"xcb-util-renderutil-devel\", \"libXdamage-devel\", \"libXxf86vm-devel\", \"libXv-devel\",\n \"xcb-util-devel\", \"libuuid-devel\"]\n packages.append(\"xkeyboard-config\" if tools.os_info.with_zypper else \"xkeyboard-config-devel\")\n elif tools.os_info.with_pacman:\n packages = [\"libxcb\", \"libfontenc\", \"libice\", \"libsm\", \"libxaw\", \"libxcomposite\", \"libxcursor\",\n \"libxdamage\", \"libxdmcp\", \"libxtst\", \"libxinerama\", \"libxkbfile\", \"libxrandr\", \"libxres\",\n \"libxss\", \"libxvmc\", \"xtrans\", \"xcb-util-wm\", \"xcb-util-image\",\"xcb-util-keysyms\", \"xcb-util-renderutil\",\n \"libxxf86vm\", \"libxv\", \"xkeyboard-config\", \"xcb-util\", \"util-linux-libs\"]\n else:\n self.output.warn(\"Do not know how to install 'xorg' for {}.\".format(tools.os_info.linux_distro))\n \n elif tools.os_info.is_freebsd and self.settings.os == \"FreeBSD\":\n packages = [\"libX11\", \"libfontenc\", \"libice\", \"libsm\", \"libxaw\", \"libxcomposite\", \"libxcursor\",\n \"libxdamage\", \"libxdmcp\", \"libxtst\", \"libxinerama\", \"libxkbfile\", \"libxrandr\", \"libxres\",\n \"libXScrnSaver\", \"libxvmc\", \"xtrans\", \"xcb-util-wm\", \"xcb-util-image\", \"xcb-util-keysyms\", \"xcb-util-renderutil\",\n \"libxxf86vm\", \"libxv\", \"xkeyboard-config\", \"xcb-util\"]\n if packages:\n package_tool = tools.SystemPackageTool(conanfile=self, default_mode=\"verify\")\n package_tool.install_packages(update=True, packages=packages)\n\n def package_info(self):\n for name in [\"x11\", \"x11-xcb\", \"fontenc\", \"ice\", \"sm\", \"xau\", \"xaw7\",\n \"xcomposite\", \"xcursor\", \"xdamage\", \"xdmcp\", \"xext\", \"xfixes\", \"xi\",\n \"xinerama\", \"xkbfile\", \"xmu\", \"xmuu\", \"xpm\", \"xrandr\", \"xrender\", \"xres\",\n \"xscrnsaver\", \"xt\", \"xtst\", \"xv\", \"xvmc\", \"xxf86vm\", \"xtrans\",\n \"xcb-xkb\", \"xcb-icccm\", \"xcb-image\", \"xcb-keysyms\", \"xcb-randr\", \"xcb-render\",\n \"xcb-renderutil\", \"xcb-shape\", \"xcb-shm\", \"xcb-sync\", \"xcb-xfixes\",\n \"xcb-xinerama\", \"xcb\", \"xkeyboard-config\", \"xcb-atom\", \"xcb-aux\", \"xcb-event\", \"xcb-util\",\n \"xcb-dri3\"] + ([] if self.settings.os == \"FreeBSD\" else [\"uuid\"]):\n self._fill_cppinfo_from_pkgconfig(name)\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"sm\"].requires.append(\"uuid\")\n\n", "path": "recipes/xorg/all/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, tools\nfrom conans.errors import ConanException, ConanInvalidConfiguration\n\nrequired_conan_version = \">=1.32\"\n\nclass ConanXOrg(ConanFile):\n name = \"xorg\"\n url = \"https://github.com/conan-io/conan-center-index\"\n license = \"MIT\"\n homepage = \"https://www.x.org/wiki/\"\n description = \"The X.Org project provides an open source implementation of the X Window System.\"\n settings = \"os\"\n topics = (\"x11\", \"xorg\")\n\n def configure(self):\n if self.settings.os not in [\"Linux\", \"FreeBSD\"]:\n raise ConanInvalidConfiguration(\"This recipe supports only Linux and FreeBSD\")\n\n def package_id(self):\n self.info.header_only()\n\n def _fill_cppinfo_from_pkgconfig(self, name):\n pkg_config = tools.PkgConfig(name)\n if not pkg_config.provides:\n raise ConanException(\"OpenGL development files aren't available, give up\")\n libs = [lib[2:] for lib in pkg_config.libs_only_l]\n lib_dirs = [lib[2:] for lib in pkg_config.libs_only_L]\n ldflags = [flag for flag in pkg_config.libs_only_other]\n include_dirs = [include[2:] for include in pkg_config.cflags_only_I]\n cflags = [flag for flag in pkg_config.cflags_only_other if not flag.startswith(\"-D\")]\n defines = [flag[2:] for flag in pkg_config.cflags_only_other if flag.startswith(\"-D\")]\n variables = pkg_config.variables\n\n self.cpp_info.components[name].system_libs = libs\n self.cpp_info.components[name].libdirs = lib_dirs\n self.cpp_info.components[name].sharedlinkflags = ldflags\n self.cpp_info.components[name].exelinkflags = ldflags\n self.cpp_info.components[name].defines = defines\n self.cpp_info.components[name].includedirs = include_dirs\n self.cpp_info.components[name].cflags = cflags\n self.cpp_info.components[name].cxxflags = cflags\n self.cpp_info.components[name].version = pkg_config.version[0]\n self.cpp_info.components[name].set_property(\"component_version\", pkg_config.version[0])\n self.cpp_info.components[name].set_property(\n \"pkg_config_custom_content\",\n \"\\n\".join(\"%s=%s\" % (key, value) for key,value in variables.items()))\n\n def system_requirements(self):\n packages = []\n if tools.os_info.is_linux and self.settings.os == \"Linux\":\n if tools.os_info.with_apt:\n packages = [\"libx11-dev\", \"libx11-xcb-dev\", \"libfontenc-dev\", \"libice-dev\", \"libsm-dev\", \"libxau-dev\", \"libxaw7-dev\",\n \"libxcomposite-dev\", \"libxcursor-dev\", \"libxdamage-dev\", \"libxdmcp-dev\", \"libxext-dev\", \"libxfixes-dev\", \n \"libxi-dev\", \"libxinerama-dev\", \"libxkbfile-dev\", \"libxmu-dev\", \"libxmuu-dev\",\n \"libxpm-dev\", \"libxrandr-dev\", \"libxrender-dev\", \"libxres-dev\", \"libxss-dev\", \"libxt-dev\", \"libxtst-dev\", \n \"libxv-dev\", \"libxvmc-dev\", \"libxxf86vm-dev\", \"xtrans-dev\", \"libxcb-render0-dev\",\n \"libxcb-render-util0-dev\", \"libxcb-xkb-dev\", \"libxcb-icccm4-dev\", \"libxcb-image0-dev\",\n \"libxcb-keysyms1-dev\", \"libxcb-randr0-dev\", \"libxcb-shape0-dev\", \"libxcb-sync-dev\", \"libxcb-xfixes0-dev\",\n \"libxcb-xinerama0-dev\", \"xkb-data\", \"libxcb-dri3-dev\", \"uuid-dev\"]\n if (tools.os_info.linux_distro == \"ubuntu\" and tools.os_info.os_version < \"15\") or\\\n (tools.os_info.linux_distro == \"debian\" and tools.os_info.os_version < \"12\") or\\\n (tools.os_info.linux_distro == \"raspbian\" and tools.os_info.os_version < \"12\"):\n packages.append( \"libxcb-util0-dev\" )\n else:\n packages.append( \"libxcb-util-dev\" )\n elif tools.os_info.with_yum or tools.os_info.with_dnf or tools.os_info.with_zypper:\n packages = [\"libxcb-devel\", \"libfontenc-devel\", \"libXaw-devel\", \"libXcomposite-devel\",\n \"libXcursor-devel\", \"libXdmcp-devel\", \"libXtst-devel\", \"libXinerama-devel\",\n \"libxkbfile-devel\", \"libXrandr-devel\", \"libXres-devel\", \"libXScrnSaver-devel\", \"libXvMC-devel\",\n \"xorg-x11-xtrans-devel\", \"xcb-util-wm-devel\", \"xcb-util-image-devel\", \"xcb-util-keysyms-devel\",\n \"xcb-util-renderutil-devel\", \"libXdamage-devel\", \"libXxf86vm-devel\", \"libXv-devel\",\n \"xcb-util-devel\", \"libuuid-devel\"]\n packages.append(\"xkeyboard-config\" if tools.os_info.with_zypper else \"xkeyboard-config-devel\")\n elif tools.os_info.with_pacman:\n packages = [\"libxcb\", \"libfontenc\", \"libice\", \"libsm\", \"libxaw\", \"libxcomposite\", \"libxcursor\",\n \"libxdamage\", \"libxdmcp\", \"libxtst\", \"libxinerama\", \"libxkbfile\", \"libxrandr\", \"libxres\",\n \"libxss\", \"libxvmc\", \"xtrans\", \"xcb-util-wm\", \"xcb-util-image\",\"xcb-util-keysyms\", \"xcb-util-renderutil\",\n \"libxxf86vm\", \"libxv\", \"xkeyboard-config\", \"xcb-util\", \"util-linux-libs\"]\n else:\n self.output.warn(\"Do not know how to install 'xorg' for {}.\".format(tools.os_info.linux_distro))\n \n elif tools.os_info.is_freebsd and self.settings.os == \"FreeBSD\":\n packages = [\"libX11\", \"libfontenc\", \"libice\", \"libsm\", \"libxaw\", \"libxcomposite\", \"libxcursor\",\n \"libxdamage\", \"libxdmcp\", \"libxtst\", \"libxinerama\", \"libxkbfile\", \"libxrandr\", \"libxres\",\n \"libXScrnSaver\", \"libxvmc\", \"xtrans\", \"xcb-util-wm\", \"xcb-util-image\", \"xcb-util-keysyms\", \"xcb-util-renderutil\",\n \"libxxf86vm\", \"libxv\", \"xkeyboard-config\", \"xcb-util\"]\n if packages:\n package_tool = tools.SystemPackageTool(conanfile=self, default_mode=\"verify\")\n package_tool.install_packages(update=True, packages=packages)\n\n def package_info(self):\n for name in [\"x11\", \"x11-xcb\", \"fontenc\", \"ice\", \"sm\", \"xau\", \"xaw7\",\n \"xcomposite\", \"xcursor\", \"xdamage\", \"xdmcp\", \"xext\", \"xfixes\", \"xi\",\n \"xinerama\", \"xkbfile\", \"xmu\", \"xmuu\", \"xpm\", \"xrandr\", \"xrender\", \"xres\",\n \"xscrnsaver\", \"xt\", \"xtst\", \"xv\", \"xvmc\", \"xxf86vm\", \"xtrans\",\n \"xcb-xkb\", \"xcb-icccm\", \"xcb-image\", \"xcb-keysyms\", \"xcb-randr\", \"xcb-render\",\n \"xcb-renderutil\", \"xcb-shape\", \"xcb-shm\", \"xcb-sync\", \"xcb-xfixes\",\n \"xcb-xinerama\", \"xcb\", \"xkeyboard-config\", \"xcb-atom\", \"xcb-aux\", \"xcb-event\", \"xcb-util\",\n \"xcb-dri3\"] + ([] if self.settings.os == \"FreeBSD\" else [\"uuid\"]):\n self._fill_cppinfo_from_pkgconfig(name)\n self.cpp_info.components[name].set_property(\"pkg_config_name\", name)\n \n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"sm\"].requires.append(\"uuid\")\n\n", "path": "recipes/xorg/all/conanfile.py"}]}
2,420
453
gh_patches_debug_12522
rasdani/github-patches
git_diff
goauthentik__authentik-6105
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- OAuth source is added to user attributes each time user write is run **Describe the bug** The user attribute `goauthentik.io/user/sources` gets appended each time an user write stage is run. ``` goauthentik.io/user/sources: - Wordpress - Wordpress - Wordpress - Wordpress - Wordpress ... ``` **To Reproduce** Steps to reproduce the behavior: 1. Configure OAuth source 2. Add User Write Stage to default-source-authentication flow 3. Login a few times 4. Each login adds a new entry under `goauthentik.io/user/sources` **Expected behavior** It should only add the source if it isn't there already **Version and Deployment (please complete the following information):** - authentik version: 2023.5.4 - Deployment: docker --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `authentik/stages/user_write/stage.py` Content: ``` 1 """Write stage logic""" 2 from typing import Any, Optional 3 4 from django.contrib.auth import update_session_auth_hash 5 from django.db import transaction 6 from django.db.utils import IntegrityError, InternalError 7 from django.http import HttpRequest, HttpResponse 8 from django.utils.translation import gettext as _ 9 from rest_framework.exceptions import ValidationError 10 11 from authentik.core.middleware import SESSION_KEY_IMPERSONATE_USER 12 from authentik.core.models import USER_ATTRIBUTE_SOURCES, User, UserSourceConnection 13 from authentik.core.sources.stage import PLAN_CONTEXT_SOURCES_CONNECTION 14 from authentik.flows.planner import PLAN_CONTEXT_PENDING_USER 15 from authentik.flows.stage import StageView 16 from authentik.flows.views.executor import FlowExecutorView 17 from authentik.stages.password import BACKEND_INBUILT 18 from authentik.stages.password.stage import PLAN_CONTEXT_AUTHENTICATION_BACKEND 19 from authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT 20 from authentik.stages.user_write.models import UserCreationMode 21 from authentik.stages.user_write.signals import user_write 22 23 PLAN_CONTEXT_GROUPS = "groups" 24 PLAN_CONTEXT_USER_PATH = "user_path" 25 26 27 class UserWriteStageView(StageView): 28 """Finalise Enrollment flow by creating a user object.""" 29 30 def __init__(self, executor: FlowExecutorView, **kwargs): 31 super().__init__(executor, **kwargs) 32 self.disallowed_user_attributes = [ 33 "groups", 34 ] 35 36 @staticmethod 37 def write_attribute(user: User, key: str, value: Any): 38 """Allow use of attributes.foo.bar when writing to a user, with full 39 recursion""" 40 parts = key.replace("_", ".").split(".") 41 if len(parts) < 1: # pragma: no cover 42 return 43 # Function will always be called with a key like attributes. 44 # this is just a sanity check to ensure that is removed 45 if parts[0] == "attributes": 46 parts = parts[1:] 47 attrs = user.attributes 48 for comp in parts[:-1]: 49 if comp not in attrs: 50 attrs[comp] = {} 51 attrs = attrs.get(comp) 52 attrs[parts[-1]] = value 53 54 def post(self, request: HttpRequest) -> HttpResponse: 55 """Wrapper for post requests""" 56 return self.get(request) 57 58 def ensure_user(self) -> tuple[Optional[User], bool]: 59 """Ensure a user exists""" 60 user_created = False 61 path = self.executor.plan.context.get( 62 PLAN_CONTEXT_USER_PATH, self.executor.current_stage.user_path_template 63 ) 64 if path == "": 65 path = User.default_path() 66 if not self.request.user.is_anonymous: 67 self.executor.plan.context.setdefault(PLAN_CONTEXT_PENDING_USER, self.request.user) 68 if ( 69 PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context 70 or self.executor.current_stage.user_creation_mode == UserCreationMode.ALWAYS_CREATE 71 ): 72 if self.executor.current_stage.user_creation_mode == UserCreationMode.NEVER_CREATE: 73 return None, False 74 self.executor.plan.context[PLAN_CONTEXT_PENDING_USER] = User( 75 is_active=not self.executor.current_stage.create_users_as_inactive, 76 path=path, 77 ) 78 self.executor.plan.context[PLAN_CONTEXT_AUTHENTICATION_BACKEND] = BACKEND_INBUILT 79 self.logger.debug( 80 "Created new user", 81 flow_slug=self.executor.flow.slug, 82 ) 83 user_created = True 84 user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER] 85 return user, user_created 86 87 def update_user(self, user: User): 88 """Update `user` with data from plan context 89 90 Only simple attributes are updated, nothing which requires a foreign key or m2m""" 91 data: dict = self.executor.plan.context[PLAN_CONTEXT_PROMPT] 92 # This is always sent back but not written to the user 93 data.pop("component", None) 94 for key, value in data.items(): 95 setter_name = f"set_{key}" 96 # Check if user has a setter for this key, like set_password 97 if hasattr(user, setter_name): 98 setter = getattr(user, setter_name) 99 if callable(setter): 100 setter(value) 101 elif key in self.disallowed_user_attributes: 102 self.logger.info("discarding key", key=key) 103 continue 104 # For exact attributes match, update the dictionary in place 105 elif key == "attributes": 106 user.attributes.update(value) 107 # If using dot notation, use the correct helper to update the nested value 108 elif key.startswith("attributes.") or key.startswith("attributes_"): 109 UserWriteStageView.write_attribute(user, key, value) 110 # User has this key already 111 elif hasattr(user, key): 112 setattr(user, key, value) 113 # If none of the cases above matched, we have an attribute that the user doesn't have, 114 # has no setter for, is not a nested attributes value and as such is invalid 115 else: 116 self.logger.info("discarding key", key=key) 117 continue 118 # Check if we're writing from a source, and save the source to the attributes 119 if PLAN_CONTEXT_SOURCES_CONNECTION in self.executor.plan.context: 120 if USER_ATTRIBUTE_SOURCES not in user.attributes or not isinstance( 121 user.attributes.get(USER_ATTRIBUTE_SOURCES), list 122 ): 123 user.attributes[USER_ATTRIBUTE_SOURCES] = [] 124 connection: UserSourceConnection = self.executor.plan.context[ 125 PLAN_CONTEXT_SOURCES_CONNECTION 126 ] 127 user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name) 128 129 def get(self, request: HttpRequest) -> HttpResponse: 130 """Save data in the current flow to the currently pending user. If no user is pending, 131 a new user is created.""" 132 if PLAN_CONTEXT_PROMPT not in self.executor.plan.context: 133 message = _("No Pending data.") 134 self.logger.debug(message) 135 return self.executor.stage_invalid(message) 136 data = self.executor.plan.context[PLAN_CONTEXT_PROMPT] 137 user, user_created = self.ensure_user() 138 if not user: 139 message = _("No user found and can't create new user.") 140 self.logger.info(message) 141 return self.executor.stage_invalid(message) 142 # Before we change anything, check if the user is the same as in the request 143 # and we're updating a password. In that case we need to update the session hash 144 # Also check that we're not currently impersonating, so we don't update the session 145 should_update_session = False 146 if ( 147 any("password" in x for x in data.keys()) 148 and self.request.user.pk == user.pk 149 and SESSION_KEY_IMPERSONATE_USER not in self.request.session 150 ): 151 should_update_session = True 152 try: 153 self.update_user(user) 154 except ValidationError as exc: 155 self.logger.warning("failed to update user", exc=exc) 156 return self.executor.stage_invalid(_("Failed to update user. Please try again later.")) 157 # Extra check to prevent flows from saving a user with a blank username 158 if user.username == "": 159 self.logger.warning("Aborting write to empty username", user=user) 160 return self.executor.stage_invalid() 161 try: 162 with transaction.atomic(): 163 user.save() 164 if self.executor.current_stage.create_users_group: 165 user.ak_groups.add(self.executor.current_stage.create_users_group) 166 if PLAN_CONTEXT_GROUPS in self.executor.plan.context: 167 user.ak_groups.add(*self.executor.plan.context[PLAN_CONTEXT_GROUPS]) 168 except (IntegrityError, ValueError, TypeError, InternalError) as exc: 169 self.logger.warning("Failed to save user", exc=exc) 170 return self.executor.stage_invalid(_("Failed to update user. Please try again later.")) 171 user_write.send(sender=self, request=request, user=user, data=data, created=user_created) 172 # Check if the password has been updated, and update the session auth hash 173 if should_update_session: 174 update_session_auth_hash(self.request, user) 175 self.logger.debug("Updated session hash", user=user) 176 self.logger.debug( 177 "Updated existing user", 178 user=user, 179 flow_slug=self.executor.flow.slug, 180 ) 181 return self.executor.stage_ok() 182 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/authentik/stages/user_write/stage.py b/authentik/stages/user_write/stage.py --- a/authentik/stages/user_write/stage.py +++ b/authentik/stages/user_write/stage.py @@ -124,7 +124,8 @@ connection: UserSourceConnection = self.executor.plan.context[ PLAN_CONTEXT_SOURCES_CONNECTION ] - user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name) + if connection.source.name not in user.attributes[USER_ATTRIBUTE_SOURCES]: + user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name) def get(self, request: HttpRequest) -> HttpResponse: """Save data in the current flow to the currently pending user. If no user is pending,
{"golden_diff": "diff --git a/authentik/stages/user_write/stage.py b/authentik/stages/user_write/stage.py\n--- a/authentik/stages/user_write/stage.py\n+++ b/authentik/stages/user_write/stage.py\n@@ -124,7 +124,8 @@\n connection: UserSourceConnection = self.executor.plan.context[\n PLAN_CONTEXT_SOURCES_CONNECTION\n ]\n- user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)\n+ if connection.source.name not in user.attributes[USER_ATTRIBUTE_SOURCES]:\n+ user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)\n \n def get(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Save data in the current flow to the currently pending user. If no user is pending,\n", "issue": "OAuth source is added to user attributes each time user write is run\n**Describe the bug**\r\nThe user attribute `goauthentik.io/user/sources` gets appended each time an user write stage is run.\r\n\r\n```\r\ngoauthentik.io/user/sources:\r\n - Wordpress\r\n - Wordpress\r\n - Wordpress\r\n - Wordpress\r\n - Wordpress\r\n ...\r\n```\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n\r\n1. Configure OAuth source\r\n2. Add User Write Stage to default-source-authentication flow\r\n3. Login a few times\r\n4. Each login adds a new entry under `goauthentik.io/user/sources`\r\n\r\n**Expected behavior**\r\nIt should only add the source if it isn't there already\r\n\r\n**Version and Deployment (please complete the following information):**\r\n\r\n- authentik version: 2023.5.4\r\n- Deployment: docker\n", "before_files": [{"content": "\"\"\"Write stage logic\"\"\"\nfrom typing import Any, Optional\n\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.db import transaction\nfrom django.db.utils import IntegrityError, InternalError\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\nfrom rest_framework.exceptions import ValidationError\n\nfrom authentik.core.middleware import SESSION_KEY_IMPERSONATE_USER\nfrom authentik.core.models import USER_ATTRIBUTE_SOURCES, User, UserSourceConnection\nfrom authentik.core.sources.stage import PLAN_CONTEXT_SOURCES_CONNECTION\nfrom authentik.flows.planner import PLAN_CONTEXT_PENDING_USER\nfrom authentik.flows.stage import StageView\nfrom authentik.flows.views.executor import FlowExecutorView\nfrom authentik.stages.password import BACKEND_INBUILT\nfrom authentik.stages.password.stage import PLAN_CONTEXT_AUTHENTICATION_BACKEND\nfrom authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\nfrom authentik.stages.user_write.models import UserCreationMode\nfrom authentik.stages.user_write.signals import user_write\n\nPLAN_CONTEXT_GROUPS = \"groups\"\nPLAN_CONTEXT_USER_PATH = \"user_path\"\n\n\nclass UserWriteStageView(StageView):\n \"\"\"Finalise Enrollment flow by creating a user object.\"\"\"\n\n def __init__(self, executor: FlowExecutorView, **kwargs):\n super().__init__(executor, **kwargs)\n self.disallowed_user_attributes = [\n \"groups\",\n ]\n\n @staticmethod\n def write_attribute(user: User, key: str, value: Any):\n \"\"\"Allow use of attributes.foo.bar when writing to a user, with full\n recursion\"\"\"\n parts = key.replace(\"_\", \".\").split(\".\")\n if len(parts) < 1: # pragma: no cover\n return\n # Function will always be called with a key like attributes.\n # this is just a sanity check to ensure that is removed\n if parts[0] == \"attributes\":\n parts = parts[1:]\n attrs = user.attributes\n for comp in parts[:-1]:\n if comp not in attrs:\n attrs[comp] = {}\n attrs = attrs.get(comp)\n attrs[parts[-1]] = value\n\n def post(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Wrapper for post requests\"\"\"\n return self.get(request)\n\n def ensure_user(self) -> tuple[Optional[User], bool]:\n \"\"\"Ensure a user exists\"\"\"\n user_created = False\n path = self.executor.plan.context.get(\n PLAN_CONTEXT_USER_PATH, self.executor.current_stage.user_path_template\n )\n if path == \"\":\n path = User.default_path()\n if not self.request.user.is_anonymous:\n self.executor.plan.context.setdefault(PLAN_CONTEXT_PENDING_USER, self.request.user)\n if (\n PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context\n or self.executor.current_stage.user_creation_mode == UserCreationMode.ALWAYS_CREATE\n ):\n if self.executor.current_stage.user_creation_mode == UserCreationMode.NEVER_CREATE:\n return None, False\n self.executor.plan.context[PLAN_CONTEXT_PENDING_USER] = User(\n is_active=not self.executor.current_stage.create_users_as_inactive,\n path=path,\n )\n self.executor.plan.context[PLAN_CONTEXT_AUTHENTICATION_BACKEND] = BACKEND_INBUILT\n self.logger.debug(\n \"Created new user\",\n flow_slug=self.executor.flow.slug,\n )\n user_created = True\n user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER]\n return user, user_created\n\n def update_user(self, user: User):\n \"\"\"Update `user` with data from plan context\n\n Only simple attributes are updated, nothing which requires a foreign key or m2m\"\"\"\n data: dict = self.executor.plan.context[PLAN_CONTEXT_PROMPT]\n # This is always sent back but not written to the user\n data.pop(\"component\", None)\n for key, value in data.items():\n setter_name = f\"set_{key}\"\n # Check if user has a setter for this key, like set_password\n if hasattr(user, setter_name):\n setter = getattr(user, setter_name)\n if callable(setter):\n setter(value)\n elif key in self.disallowed_user_attributes:\n self.logger.info(\"discarding key\", key=key)\n continue\n # For exact attributes match, update the dictionary in place\n elif key == \"attributes\":\n user.attributes.update(value)\n # If using dot notation, use the correct helper to update the nested value\n elif key.startswith(\"attributes.\") or key.startswith(\"attributes_\"):\n UserWriteStageView.write_attribute(user, key, value)\n # User has this key already\n elif hasattr(user, key):\n setattr(user, key, value)\n # If none of the cases above matched, we have an attribute that the user doesn't have,\n # has no setter for, is not a nested attributes value and as such is invalid\n else:\n self.logger.info(\"discarding key\", key=key)\n continue\n # Check if we're writing from a source, and save the source to the attributes\n if PLAN_CONTEXT_SOURCES_CONNECTION in self.executor.plan.context:\n if USER_ATTRIBUTE_SOURCES not in user.attributes or not isinstance(\n user.attributes.get(USER_ATTRIBUTE_SOURCES), list\n ):\n user.attributes[USER_ATTRIBUTE_SOURCES] = []\n connection: UserSourceConnection = self.executor.plan.context[\n PLAN_CONTEXT_SOURCES_CONNECTION\n ]\n user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)\n\n def get(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Save data in the current flow to the currently pending user. If no user is pending,\n a new user is created.\"\"\"\n if PLAN_CONTEXT_PROMPT not in self.executor.plan.context:\n message = _(\"No Pending data.\")\n self.logger.debug(message)\n return self.executor.stage_invalid(message)\n data = self.executor.plan.context[PLAN_CONTEXT_PROMPT]\n user, user_created = self.ensure_user()\n if not user:\n message = _(\"No user found and can't create new user.\")\n self.logger.info(message)\n return self.executor.stage_invalid(message)\n # Before we change anything, check if the user is the same as in the request\n # and we're updating a password. In that case we need to update the session hash\n # Also check that we're not currently impersonating, so we don't update the session\n should_update_session = False\n if (\n any(\"password\" in x for x in data.keys())\n and self.request.user.pk == user.pk\n and SESSION_KEY_IMPERSONATE_USER not in self.request.session\n ):\n should_update_session = True\n try:\n self.update_user(user)\n except ValidationError as exc:\n self.logger.warning(\"failed to update user\", exc=exc)\n return self.executor.stage_invalid(_(\"Failed to update user. Please try again later.\"))\n # Extra check to prevent flows from saving a user with a blank username\n if user.username == \"\":\n self.logger.warning(\"Aborting write to empty username\", user=user)\n return self.executor.stage_invalid()\n try:\n with transaction.atomic():\n user.save()\n if self.executor.current_stage.create_users_group:\n user.ak_groups.add(self.executor.current_stage.create_users_group)\n if PLAN_CONTEXT_GROUPS in self.executor.plan.context:\n user.ak_groups.add(*self.executor.plan.context[PLAN_CONTEXT_GROUPS])\n except (IntegrityError, ValueError, TypeError, InternalError) as exc:\n self.logger.warning(\"Failed to save user\", exc=exc)\n return self.executor.stage_invalid(_(\"Failed to update user. Please try again later.\"))\n user_write.send(sender=self, request=request, user=user, data=data, created=user_created)\n # Check if the password has been updated, and update the session auth hash\n if should_update_session:\n update_session_auth_hash(self.request, user)\n self.logger.debug(\"Updated session hash\", user=user)\n self.logger.debug(\n \"Updated existing user\",\n user=user,\n flow_slug=self.executor.flow.slug,\n )\n return self.executor.stage_ok()\n", "path": "authentik/stages/user_write/stage.py"}], "after_files": [{"content": "\"\"\"Write stage logic\"\"\"\nfrom typing import Any, Optional\n\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.db import transaction\nfrom django.db.utils import IntegrityError, InternalError\nfrom django.http import HttpRequest, HttpResponse\nfrom django.utils.translation import gettext as _\nfrom rest_framework.exceptions import ValidationError\n\nfrom authentik.core.middleware import SESSION_KEY_IMPERSONATE_USER\nfrom authentik.core.models import USER_ATTRIBUTE_SOURCES, User, UserSourceConnection\nfrom authentik.core.sources.stage import PLAN_CONTEXT_SOURCES_CONNECTION\nfrom authentik.flows.planner import PLAN_CONTEXT_PENDING_USER\nfrom authentik.flows.stage import StageView\nfrom authentik.flows.views.executor import FlowExecutorView\nfrom authentik.stages.password import BACKEND_INBUILT\nfrom authentik.stages.password.stage import PLAN_CONTEXT_AUTHENTICATION_BACKEND\nfrom authentik.stages.prompt.stage import PLAN_CONTEXT_PROMPT\nfrom authentik.stages.user_write.models import UserCreationMode\nfrom authentik.stages.user_write.signals import user_write\n\nPLAN_CONTEXT_GROUPS = \"groups\"\nPLAN_CONTEXT_USER_PATH = \"user_path\"\n\n\nclass UserWriteStageView(StageView):\n \"\"\"Finalise Enrollment flow by creating a user object.\"\"\"\n\n def __init__(self, executor: FlowExecutorView, **kwargs):\n super().__init__(executor, **kwargs)\n self.disallowed_user_attributes = [\n \"groups\",\n ]\n\n @staticmethod\n def write_attribute(user: User, key: str, value: Any):\n \"\"\"Allow use of attributes.foo.bar when writing to a user, with full\n recursion\"\"\"\n parts = key.replace(\"_\", \".\").split(\".\")\n if len(parts) < 1: # pragma: no cover\n return\n # Function will always be called with a key like attributes.\n # this is just a sanity check to ensure that is removed\n if parts[0] == \"attributes\":\n parts = parts[1:]\n attrs = user.attributes\n for comp in parts[:-1]:\n if comp not in attrs:\n attrs[comp] = {}\n attrs = attrs.get(comp)\n attrs[parts[-1]] = value\n\n def post(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Wrapper for post requests\"\"\"\n return self.get(request)\n\n def ensure_user(self) -> tuple[Optional[User], bool]:\n \"\"\"Ensure a user exists\"\"\"\n user_created = False\n path = self.executor.plan.context.get(\n PLAN_CONTEXT_USER_PATH, self.executor.current_stage.user_path_template\n )\n if path == \"\":\n path = User.default_path()\n if not self.request.user.is_anonymous:\n self.executor.plan.context.setdefault(PLAN_CONTEXT_PENDING_USER, self.request.user)\n if (\n PLAN_CONTEXT_PENDING_USER not in self.executor.plan.context\n or self.executor.current_stage.user_creation_mode == UserCreationMode.ALWAYS_CREATE\n ):\n if self.executor.current_stage.user_creation_mode == UserCreationMode.NEVER_CREATE:\n return None, False\n self.executor.plan.context[PLAN_CONTEXT_PENDING_USER] = User(\n is_active=not self.executor.current_stage.create_users_as_inactive,\n path=path,\n )\n self.executor.plan.context[PLAN_CONTEXT_AUTHENTICATION_BACKEND] = BACKEND_INBUILT\n self.logger.debug(\n \"Created new user\",\n flow_slug=self.executor.flow.slug,\n )\n user_created = True\n user: User = self.executor.plan.context[PLAN_CONTEXT_PENDING_USER]\n return user, user_created\n\n def update_user(self, user: User):\n \"\"\"Update `user` with data from plan context\n\n Only simple attributes are updated, nothing which requires a foreign key or m2m\"\"\"\n data: dict = self.executor.plan.context[PLAN_CONTEXT_PROMPT]\n # This is always sent back but not written to the user\n data.pop(\"component\", None)\n for key, value in data.items():\n setter_name = f\"set_{key}\"\n # Check if user has a setter for this key, like set_password\n if hasattr(user, setter_name):\n setter = getattr(user, setter_name)\n if callable(setter):\n setter(value)\n elif key in self.disallowed_user_attributes:\n self.logger.info(\"discarding key\", key=key)\n continue\n # For exact attributes match, update the dictionary in place\n elif key == \"attributes\":\n user.attributes.update(value)\n # If using dot notation, use the correct helper to update the nested value\n elif key.startswith(\"attributes.\") or key.startswith(\"attributes_\"):\n UserWriteStageView.write_attribute(user, key, value)\n # User has this key already\n elif hasattr(user, key):\n setattr(user, key, value)\n # If none of the cases above matched, we have an attribute that the user doesn't have,\n # has no setter for, is not a nested attributes value and as such is invalid\n else:\n self.logger.info(\"discarding key\", key=key)\n continue\n # Check if we're writing from a source, and save the source to the attributes\n if PLAN_CONTEXT_SOURCES_CONNECTION in self.executor.plan.context:\n if USER_ATTRIBUTE_SOURCES not in user.attributes or not isinstance(\n user.attributes.get(USER_ATTRIBUTE_SOURCES), list\n ):\n user.attributes[USER_ATTRIBUTE_SOURCES] = []\n connection: UserSourceConnection = self.executor.plan.context[\n PLAN_CONTEXT_SOURCES_CONNECTION\n ]\n if connection.source.name not in user.attributes[USER_ATTRIBUTE_SOURCES]:\n user.attributes[USER_ATTRIBUTE_SOURCES].append(connection.source.name)\n\n def get(self, request: HttpRequest) -> HttpResponse:\n \"\"\"Save data in the current flow to the currently pending user. If no user is pending,\n a new user is created.\"\"\"\n if PLAN_CONTEXT_PROMPT not in self.executor.plan.context:\n message = _(\"No Pending data.\")\n self.logger.debug(message)\n return self.executor.stage_invalid(message)\n data = self.executor.plan.context[PLAN_CONTEXT_PROMPT]\n user, user_created = self.ensure_user()\n if not user:\n message = _(\"No user found and can't create new user.\")\n self.logger.info(message)\n return self.executor.stage_invalid(message)\n # Before we change anything, check if the user is the same as in the request\n # and we're updating a password. In that case we need to update the session hash\n # Also check that we're not currently impersonating, so we don't update the session\n should_update_session = False\n if (\n any(\"password\" in x for x in data.keys())\n and self.request.user.pk == user.pk\n and SESSION_KEY_IMPERSONATE_USER not in self.request.session\n ):\n should_update_session = True\n try:\n self.update_user(user)\n except ValidationError as exc:\n self.logger.warning(\"failed to update user\", exc=exc)\n return self.executor.stage_invalid(_(\"Failed to update user. Please try again later.\"))\n # Extra check to prevent flows from saving a user with a blank username\n if user.username == \"\":\n self.logger.warning(\"Aborting write to empty username\", user=user)\n return self.executor.stage_invalid()\n try:\n with transaction.atomic():\n user.save()\n if self.executor.current_stage.create_users_group:\n user.ak_groups.add(self.executor.current_stage.create_users_group)\n if PLAN_CONTEXT_GROUPS in self.executor.plan.context:\n user.ak_groups.add(*self.executor.plan.context[PLAN_CONTEXT_GROUPS])\n except (IntegrityError, ValueError, TypeError, InternalError) as exc:\n self.logger.warning(\"Failed to save user\", exc=exc)\n return self.executor.stage_invalid(_(\"Failed to update user. Please try again later.\"))\n user_write.send(sender=self, request=request, user=user, data=data, created=user_created)\n # Check if the password has been updated, and update the session auth hash\n if should_update_session:\n update_session_auth_hash(self.request, user)\n self.logger.debug(\"Updated session hash\", user=user)\n self.logger.debug(\n \"Updated existing user\",\n user=user,\n flow_slug=self.executor.flow.slug,\n )\n return self.executor.stage_ok()\n", "path": "authentik/stages/user_write/stage.py"}]}
2,590
166
gh_patches_debug_33559
rasdani/github-patches
git_diff
aws-cloudformation__cfn-lint-954
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Condition: <list> on Resource triggers "Unknown exception while processing rule" *cfn-lint version: (`cfn-lint --version`)* cfn-lint 0.21.4 *Description of issue.* A resource with a Condition property defined as a list triggers: ``` E0002 Unknown exception while processing rule E8002: unhashable type: 'list_node' /tmp/cfn-lint-condition-list-error.yaml:1:1 ``` I believe that the use of lists / multiple values for a Condition property of a Resource is probably not legal (although I was unable to find clear confirmation of that in the documentation during a quick scan), but it should probably trigger a lint error rather than an exception. It would also be helpful, if possible, to include the template line-number where the exception was triggered, rather than line:char 1:1 to make tracking the cause of such problems easier. I have also seen the same exception, but for rule W1001, I though it was the same cause, but my reproduce test didn't re-trigger the W1001 case. *Reproduce example* ``` AWSTemplateFormatVersion: 2010-09-09 Description: "cfn-lint condition list error" Conditions: Cond1: !Equals [ !Ref 'AWS::Region', 'us-east-1' ] Cond2: !Equals [ !Ref 'AWS::Region', 'eu-west-1' ] Resources: EIP1: Type: AWS::EC2::EIP Condition: - Cond1 - Cond2 Properties: Domain: 'vpc' EIP2: Type: AWS::EC2::EIP Condition: Cond1 Properties: Domain: 'vpc' EIP3: Type: AWS::EC2::EIP Condition: Cond2 Properties: Domain: 'vpc' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/cfnlint/rules/conditions/Exists.py` Content: ``` 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 import six 18 from cfnlint import CloudFormationLintRule 19 from cfnlint import RuleMatch 20 21 22 class Exists(CloudFormationLintRule): 23 """Check if used Conditions are defined """ 24 id = 'E8002' 25 shortdesc = 'Check if the referenced Conditions are defined' 26 description = 'Making sure the used conditions are actually defined in the Conditions section' 27 source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html' 28 tags = ['conditions'] 29 30 def match(self, cfn): 31 """Check CloudFormation Conditions""" 32 33 matches = [] 34 ref_conditions = {} 35 36 # Get all defined conditions 37 conditions = cfn.template.get('Conditions', {}) 38 39 # Get all "If's" that reference a Condition 40 iftrees = cfn.search_deep_keys('Fn::If') 41 for iftree in iftrees: 42 if isinstance(iftree[-1], list): 43 ref_conditions[iftree[-1][0]] = iftree 44 else: 45 ref_conditions[iftree[-1]] = iftree 46 47 # Get resource's Conditions 48 for resource_name, resource_values in cfn.get_resources().items(): 49 if 'Condition' in resource_values: 50 path = ['Resources', resource_name, 'Condition'] 51 ref_conditions[resource_values['Condition']] = path 52 53 # Get conditions used by another condition 54 condtrees = cfn.search_deep_keys('Condition') 55 56 for condtree in condtrees: 57 if condtree[0] == 'Conditions': 58 if isinstance(condtree[-1], (str, six.text_type, six.string_types)): 59 path = ['Conditions', condtree[-1]] 60 ref_conditions[condtree[-1]] = path 61 62 # Get Output Conditions 63 for _, output_values in cfn.template.get('Outputs', {}).items(): 64 if 'Condition' in output_values: 65 path = ['Outputs', output_values['Condition']] 66 ref_conditions[output_values['Condition']] = path 67 68 # Check if all the conditions are defined 69 for ref_condition, ref_path in ref_conditions.items(): 70 if ref_condition not in conditions: 71 message = 'Condition {0} is not defined.' 72 matches.append(RuleMatch( 73 ref_path, 74 message.format(ref_condition) 75 )) 76 77 return matches 78 ``` Path: `src/cfnlint/rules/resources/Configuration.py` Content: ``` 1 """ 2 Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 4 Permission is hereby granted, free of charge, to any person obtaining a copy of this 5 software and associated documentation files (the "Software"), to deal in the Software 6 without restriction, including without limitation the rights to use, copy, modify, 7 merge, publish, distribute, sublicense, and/or sell copies of the Software, and to 8 permit persons to whom the Software is furnished to do so. 9 10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 11 INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A 12 PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT 13 HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 14 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 15 SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 16 """ 17 from cfnlint import CloudFormationLintRule 18 from cfnlint import RuleMatch 19 import cfnlint.helpers 20 21 22 class Configuration(CloudFormationLintRule): 23 """Check Base Resource Configuration""" 24 id = 'E3001' 25 shortdesc = 'Basic CloudFormation Resource Check' 26 description = 'Making sure the basic CloudFormation resources ' + \ 27 'are properly configured' 28 source_url = 'https://github.com/aws-cloudformation/cfn-python-lint' 29 tags = ['resources'] 30 31 def match(self, cfn): 32 """Check CloudFormation Resources""" 33 34 matches = [] 35 36 valid_attributes = [ 37 'CreationPolicy', 38 'DeletionPolicy', 39 'DependsOn', 40 'Metadata', 41 'UpdatePolicy', 42 'UpdateReplacePolicy', 43 'Properties', 44 'Type', 45 'Condition' 46 ] 47 48 valid_custom_attributes = [ 49 'Version', 50 'Properties', 51 'DependsOn', 52 'Metadata', 53 'Condition', 54 'Type', 55 ] 56 57 resources = cfn.template.get('Resources', {}) 58 if not isinstance(resources, dict): 59 message = 'Resource not properly configured' 60 matches.append(RuleMatch(['Resources'], message)) 61 else: 62 for resource_name, resource_values in cfn.template.get('Resources', {}).items(): 63 self.logger.debug('Validating resource %s base configuration', resource_name) 64 if not isinstance(resource_values, dict): 65 message = 'Resource not properly configured at {0}' 66 matches.append(RuleMatch( 67 ['Resources', resource_name], 68 message.format(resource_name) 69 )) 70 continue 71 resource_type = resource_values.get('Type', '') 72 check_attributes = [] 73 if resource_type.startswith('Custom::') or resource_type == 'AWS::CloudFormation::CustomResource': 74 check_attributes = valid_custom_attributes 75 else: 76 check_attributes = valid_attributes 77 78 for property_key, _ in resource_values.items(): 79 if property_key not in check_attributes: 80 message = 'Invalid resource attribute {0} for resource {1}' 81 matches.append(RuleMatch( 82 ['Resources', resource_name, property_key], 83 message.format(property_key, resource_name))) 84 85 resource_type = resource_values.get('Type', '') 86 if not resource_type: 87 message = 'Type not defined for resource {0}' 88 matches.append(RuleMatch( 89 ['Resources', resource_name], 90 message.format(resource_name) 91 )) 92 else: 93 self.logger.debug('Check resource types by region...') 94 for region, specs in cfnlint.helpers.RESOURCE_SPECS.items(): 95 if region in cfn.regions: 96 if resource_type not in specs['ResourceTypes']: 97 if not resource_type.startswith(('Custom::', 'AWS::Serverless::')): 98 message = 'Invalid or unsupported Type {0} for resource {1} in {2}' 99 matches.append(RuleMatch( 100 ['Resources', resource_name, 'Type'], 101 message.format(resource_type, resource_name, region) 102 )) 103 104 if 'Properties' not in resource_values: 105 resource_spec = cfnlint.helpers.RESOURCE_SPECS[cfn.regions[0]] 106 if resource_type in resource_spec['ResourceTypes']: 107 properties_spec = resource_spec['ResourceTypes'][resource_type]['Properties'] 108 # pylint: disable=len-as-condition 109 if len(properties_spec) > 0: 110 required = 0 111 for _, property_spec in properties_spec.items(): 112 if property_spec.get('Required', False): 113 required += 1 114 if required > 0: 115 if resource_type == 'AWS::CloudFormation::WaitCondition' and 'CreationPolicy' in resource_values.keys(): 116 self.logger.debug('Exception to required properties section as CreationPolicy is defined.') 117 else: 118 message = 'Properties not defined for resource {0}' 119 matches.append(RuleMatch( 120 ['Resources', resource_name], 121 message.format(resource_name) 122 )) 123 124 return matches 125 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/cfnlint/rules/conditions/Exists.py b/src/cfnlint/rules/conditions/Exists.py --- a/src/cfnlint/rules/conditions/Exists.py +++ b/src/cfnlint/rules/conditions/Exists.py @@ -46,9 +46,10 @@ # Get resource's Conditions for resource_name, resource_values in cfn.get_resources().items(): - if 'Condition' in resource_values: + condition = resource_values.get('Condition') + if isinstance(condition, six.string_types): # make sure its a string path = ['Resources', resource_name, 'Condition'] - ref_conditions[resource_values['Condition']] = path + ref_conditions[condition] = path # Get conditions used by another condition condtrees = cfn.search_deep_keys('Condition') diff --git a/src/cfnlint/rules/resources/Configuration.py b/src/cfnlint/rules/resources/Configuration.py --- a/src/cfnlint/rules/resources/Configuration.py +++ b/src/cfnlint/rules/resources/Configuration.py @@ -14,6 +14,7 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ +import six from cfnlint import CloudFormationLintRule from cfnlint import RuleMatch import cfnlint.helpers @@ -82,6 +83,15 @@ ['Resources', resource_name, property_key], message.format(property_key, resource_name))) + # validate condition is a string + condition = resource_values.get('Condition', '') + if not isinstance(condition, six.string_types): + message = 'Condition for resource {0} should be a string' + matches.append(RuleMatch( + ['Resources', resource_name, 'Condition'], + message.format(resource_name) + )) + resource_type = resource_values.get('Type', '') if not resource_type: message = 'Type not defined for resource {0}'
{"golden_diff": "diff --git a/src/cfnlint/rules/conditions/Exists.py b/src/cfnlint/rules/conditions/Exists.py\n--- a/src/cfnlint/rules/conditions/Exists.py\n+++ b/src/cfnlint/rules/conditions/Exists.py\n@@ -46,9 +46,10 @@\n \n # Get resource's Conditions\n for resource_name, resource_values in cfn.get_resources().items():\n- if 'Condition' in resource_values:\n+ condition = resource_values.get('Condition')\n+ if isinstance(condition, six.string_types): # make sure its a string\n path = ['Resources', resource_name, 'Condition']\n- ref_conditions[resource_values['Condition']] = path\n+ ref_conditions[condition] = path\n \n # Get conditions used by another condition\n condtrees = cfn.search_deep_keys('Condition')\ndiff --git a/src/cfnlint/rules/resources/Configuration.py b/src/cfnlint/rules/resources/Configuration.py\n--- a/src/cfnlint/rules/resources/Configuration.py\n+++ b/src/cfnlint/rules/resources/Configuration.py\n@@ -14,6 +14,7 @@\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n \"\"\"\n+import six\n from cfnlint import CloudFormationLintRule\n from cfnlint import RuleMatch\n import cfnlint.helpers\n@@ -82,6 +83,15 @@\n ['Resources', resource_name, property_key],\n message.format(property_key, resource_name)))\n \n+ # validate condition is a string\n+ condition = resource_values.get('Condition', '')\n+ if not isinstance(condition, six.string_types):\n+ message = 'Condition for resource {0} should be a string'\n+ matches.append(RuleMatch(\n+ ['Resources', resource_name, 'Condition'],\n+ message.format(resource_name)\n+ ))\n+\n resource_type = resource_values.get('Type', '')\n if not resource_type:\n message = 'Type not defined for resource {0}'\n", "issue": "Condition: <list> on Resource triggers \"Unknown exception while processing rule\"\n*cfn-lint version: (`cfn-lint --version`)*\r\ncfn-lint 0.21.4\r\n\r\n*Description of issue.*\r\nA resource with a Condition property defined as a list triggers:\r\n```\r\nE0002 Unknown exception while processing rule E8002: unhashable type: 'list_node'\r\n/tmp/cfn-lint-condition-list-error.yaml:1:1\r\n```\r\nI believe that the use of lists / multiple values for a Condition property of a Resource is probably not legal (although I was unable to find clear confirmation of that in the documentation during a quick scan), but it should probably trigger a lint error rather than an exception.\r\n\r\nIt would also be helpful, if possible, to include the template line-number where the exception was triggered, rather than line:char 1:1 to make tracking the cause of such problems easier.\r\n\r\nI have also seen the same exception, but for rule W1001, I though it was the same cause, but my reproduce test didn't re-trigger the W1001 case.\r\n\r\n*Reproduce example*\r\n```\r\nAWSTemplateFormatVersion: 2010-09-09\r\nDescription: \"cfn-lint condition list error\"\r\n\r\nConditions:\r\n Cond1: !Equals [ !Ref 'AWS::Region', 'us-east-1' ]\r\n Cond2: !Equals [ !Ref 'AWS::Region', 'eu-west-1' ]\r\n\r\nResources:\r\n\r\n EIP1:\r\n Type: AWS::EC2::EIP\r\n Condition:\r\n - Cond1\r\n - Cond2\r\n Properties:\r\n Domain: 'vpc'\r\n\r\n EIP2:\r\n Type: AWS::EC2::EIP\r\n Condition: Cond1\r\n Properties:\r\n Domain: 'vpc'\r\n\r\n EIP3:\r\n Type: AWS::EC2::EIP\r\n Condition: Cond2\r\n Properties:\r\n Domain: 'vpc'\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Exists(CloudFormationLintRule):\n \"\"\"Check if used Conditions are defined \"\"\"\n id = 'E8002'\n shortdesc = 'Check if the referenced Conditions are defined'\n description = 'Making sure the used conditions are actually defined in the Conditions section'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html'\n tags = ['conditions']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Conditions\"\"\"\n\n matches = []\n ref_conditions = {}\n\n # Get all defined conditions\n conditions = cfn.template.get('Conditions', {})\n\n # Get all \"If's\" that reference a Condition\n iftrees = cfn.search_deep_keys('Fn::If')\n for iftree in iftrees:\n if isinstance(iftree[-1], list):\n ref_conditions[iftree[-1][0]] = iftree\n else:\n ref_conditions[iftree[-1]] = iftree\n\n # Get resource's Conditions\n for resource_name, resource_values in cfn.get_resources().items():\n if 'Condition' in resource_values:\n path = ['Resources', resource_name, 'Condition']\n ref_conditions[resource_values['Condition']] = path\n\n # Get conditions used by another condition\n condtrees = cfn.search_deep_keys('Condition')\n\n for condtree in condtrees:\n if condtree[0] == 'Conditions':\n if isinstance(condtree[-1], (str, six.text_type, six.string_types)):\n path = ['Conditions', condtree[-1]]\n ref_conditions[condtree[-1]] = path\n\n # Get Output Conditions\n for _, output_values in cfn.template.get('Outputs', {}).items():\n if 'Condition' in output_values:\n path = ['Outputs', output_values['Condition']]\n ref_conditions[output_values['Condition']] = path\n\n # Check if all the conditions are defined\n for ref_condition, ref_path in ref_conditions.items():\n if ref_condition not in conditions:\n message = 'Condition {0} is not defined.'\n matches.append(RuleMatch(\n ref_path,\n message.format(ref_condition)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/conditions/Exists.py"}, {"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check Base Resource Configuration\"\"\"\n id = 'E3001'\n shortdesc = 'Basic CloudFormation Resource Check'\n description = 'Making sure the basic CloudFormation resources ' + \\\n 'are properly configured'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['resources']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Resources\"\"\"\n\n matches = []\n\n valid_attributes = [\n 'CreationPolicy',\n 'DeletionPolicy',\n 'DependsOn',\n 'Metadata',\n 'UpdatePolicy',\n 'UpdateReplacePolicy',\n 'Properties',\n 'Type',\n 'Condition'\n ]\n\n valid_custom_attributes = [\n 'Version',\n 'Properties',\n 'DependsOn',\n 'Metadata',\n 'Condition',\n 'Type',\n ]\n\n resources = cfn.template.get('Resources', {})\n if not isinstance(resources, dict):\n message = 'Resource not properly configured'\n matches.append(RuleMatch(['Resources'], message))\n else:\n for resource_name, resource_values in cfn.template.get('Resources', {}).items():\n self.logger.debug('Validating resource %s base configuration', resource_name)\n if not isinstance(resource_values, dict):\n message = 'Resource not properly configured at {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n continue\n resource_type = resource_values.get('Type', '')\n check_attributes = []\n if resource_type.startswith('Custom::') or resource_type == 'AWS::CloudFormation::CustomResource':\n check_attributes = valid_custom_attributes\n else:\n check_attributes = valid_attributes\n\n for property_key, _ in resource_values.items():\n if property_key not in check_attributes:\n message = 'Invalid resource attribute {0} for resource {1}'\n matches.append(RuleMatch(\n ['Resources', resource_name, property_key],\n message.format(property_key, resource_name)))\n\n resource_type = resource_values.get('Type', '')\n if not resource_type:\n message = 'Type not defined for resource {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n else:\n self.logger.debug('Check resource types by region...')\n for region, specs in cfnlint.helpers.RESOURCE_SPECS.items():\n if region in cfn.regions:\n if resource_type not in specs['ResourceTypes']:\n if not resource_type.startswith(('Custom::', 'AWS::Serverless::')):\n message = 'Invalid or unsupported Type {0} for resource {1} in {2}'\n matches.append(RuleMatch(\n ['Resources', resource_name, 'Type'],\n message.format(resource_type, resource_name, region)\n ))\n\n if 'Properties' not in resource_values:\n resource_spec = cfnlint.helpers.RESOURCE_SPECS[cfn.regions[0]]\n if resource_type in resource_spec['ResourceTypes']:\n properties_spec = resource_spec['ResourceTypes'][resource_type]['Properties']\n # pylint: disable=len-as-condition\n if len(properties_spec) > 0:\n required = 0\n for _, property_spec in properties_spec.items():\n if property_spec.get('Required', False):\n required += 1\n if required > 0:\n if resource_type == 'AWS::CloudFormation::WaitCondition' and 'CreationPolicy' in resource_values.keys():\n self.logger.debug('Exception to required properties section as CreationPolicy is defined.')\n else:\n message = 'Properties not defined for resource {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/Configuration.py"}], "after_files": [{"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\n\n\nclass Exists(CloudFormationLintRule):\n \"\"\"Check if used Conditions are defined \"\"\"\n id = 'E8002'\n shortdesc = 'Check if the referenced Conditions are defined'\n description = 'Making sure the used conditions are actually defined in the Conditions section'\n source_url = 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/conditions-section-structure.html'\n tags = ['conditions']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Conditions\"\"\"\n\n matches = []\n ref_conditions = {}\n\n # Get all defined conditions\n conditions = cfn.template.get('Conditions', {})\n\n # Get all \"If's\" that reference a Condition\n iftrees = cfn.search_deep_keys('Fn::If')\n for iftree in iftrees:\n if isinstance(iftree[-1], list):\n ref_conditions[iftree[-1][0]] = iftree\n else:\n ref_conditions[iftree[-1]] = iftree\n\n # Get resource's Conditions\n for resource_name, resource_values in cfn.get_resources().items():\n condition = resource_values.get('Condition')\n if isinstance(condition, six.string_types): # make sure its a string\n path = ['Resources', resource_name, 'Condition']\n ref_conditions[condition] = path\n\n # Get conditions used by another condition\n condtrees = cfn.search_deep_keys('Condition')\n\n for condtree in condtrees:\n if condtree[0] == 'Conditions':\n if isinstance(condtree[-1], (str, six.text_type, six.string_types)):\n path = ['Conditions', condtree[-1]]\n ref_conditions[condtree[-1]] = path\n\n # Get Output Conditions\n for _, output_values in cfn.template.get('Outputs', {}).items():\n if 'Condition' in output_values:\n path = ['Outputs', output_values['Condition']]\n ref_conditions[output_values['Condition']] = path\n\n # Check if all the conditions are defined\n for ref_condition, ref_path in ref_conditions.items():\n if ref_condition not in conditions:\n message = 'Condition {0} is not defined.'\n matches.append(RuleMatch(\n ref_path,\n message.format(ref_condition)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/conditions/Exists.py"}, {"content": "\"\"\"\n Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of this\n software and associated documentation files (the \"Software\"), to deal in the Software\n without restriction, including without limitation the rights to use, copy, modify,\n merge, publish, distribute, sublicense, and/or sell copies of the Software, and to\n permit persons to whom the Software is furnished to do so.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A\n PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION\n OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\nimport six\nfrom cfnlint import CloudFormationLintRule\nfrom cfnlint import RuleMatch\nimport cfnlint.helpers\n\n\nclass Configuration(CloudFormationLintRule):\n \"\"\"Check Base Resource Configuration\"\"\"\n id = 'E3001'\n shortdesc = 'Basic CloudFormation Resource Check'\n description = 'Making sure the basic CloudFormation resources ' + \\\n 'are properly configured'\n source_url = 'https://github.com/aws-cloudformation/cfn-python-lint'\n tags = ['resources']\n\n def match(self, cfn):\n \"\"\"Check CloudFormation Resources\"\"\"\n\n matches = []\n\n valid_attributes = [\n 'CreationPolicy',\n 'DeletionPolicy',\n 'DependsOn',\n 'Metadata',\n 'UpdatePolicy',\n 'UpdateReplacePolicy',\n 'Properties',\n 'Type',\n 'Condition'\n ]\n\n valid_custom_attributes = [\n 'Version',\n 'Properties',\n 'DependsOn',\n 'Metadata',\n 'Condition',\n 'Type',\n ]\n\n resources = cfn.template.get('Resources', {})\n if not isinstance(resources, dict):\n message = 'Resource not properly configured'\n matches.append(RuleMatch(['Resources'], message))\n else:\n for resource_name, resource_values in cfn.template.get('Resources', {}).items():\n self.logger.debug('Validating resource %s base configuration', resource_name)\n if not isinstance(resource_values, dict):\n message = 'Resource not properly configured at {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n continue\n resource_type = resource_values.get('Type', '')\n check_attributes = []\n if resource_type.startswith('Custom::') or resource_type == 'AWS::CloudFormation::CustomResource':\n check_attributes = valid_custom_attributes\n else:\n check_attributes = valid_attributes\n\n for property_key, _ in resource_values.items():\n if property_key not in check_attributes:\n message = 'Invalid resource attribute {0} for resource {1}'\n matches.append(RuleMatch(\n ['Resources', resource_name, property_key],\n message.format(property_key, resource_name)))\n\n # validate condition is a string\n condition = resource_values.get('Condition', '')\n if not isinstance(condition, six.string_types):\n message = 'Condition for resource {0} should be a string'\n matches.append(RuleMatch(\n ['Resources', resource_name, 'Condition'],\n message.format(resource_name)\n ))\n\n resource_type = resource_values.get('Type', '')\n if not resource_type:\n message = 'Type not defined for resource {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n else:\n self.logger.debug('Check resource types by region...')\n for region, specs in cfnlint.helpers.RESOURCE_SPECS.items():\n if region in cfn.regions:\n if resource_type not in specs['ResourceTypes']:\n if not resource_type.startswith(('Custom::', 'AWS::Serverless::')):\n message = 'Invalid or unsupported Type {0} for resource {1} in {2}'\n matches.append(RuleMatch(\n ['Resources', resource_name, 'Type'],\n message.format(resource_type, resource_name, region)\n ))\n\n if 'Properties' not in resource_values:\n resource_spec = cfnlint.helpers.RESOURCE_SPECS[cfn.regions[0]]\n if resource_type in resource_spec['ResourceTypes']:\n properties_spec = resource_spec['ResourceTypes'][resource_type]['Properties']\n # pylint: disable=len-as-condition\n if len(properties_spec) > 0:\n required = 0\n for _, property_spec in properties_spec.items():\n if property_spec.get('Required', False):\n required += 1\n if required > 0:\n if resource_type == 'AWS::CloudFormation::WaitCondition' and 'CreationPolicy' in resource_values.keys():\n self.logger.debug('Exception to required properties section as CreationPolicy is defined.')\n else:\n message = 'Properties not defined for resource {0}'\n matches.append(RuleMatch(\n ['Resources', resource_name],\n message.format(resource_name)\n ))\n\n return matches\n", "path": "src/cfnlint/rules/resources/Configuration.py"}]}
2,860
442
gh_patches_debug_1252
rasdani/github-patches
git_diff
mitmproxy__mitmproxy-4762
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- When too many requests come simultaneously, mitmdump called an error and quited [ValueError: too many file descriptors in select()] #### Problem Description A clear and concise description of what the bug is. When too many requests come simultaneously, mitmdump called an error and quited. Traceback (most recent call last): File "mitmdump", line 3, in <module> File "mitmproxy\tools\main.py", line 147, in mitmdump File "mitmproxy\tools\main.py", line 114, in run File "mitmproxy\master.py", line 76, in run File "mitmproxy\master.py", line 59, in run_loop File "mitmproxy\master.py", line 95, in shutdown File "asyncio\base_events.py", line 629, in run_until_complete File "asyncio\base_events.py", line 596, in run_forever File "asyncio\base_events.py", line 1854, in _run_once File "selectors.py", line 324, in select File "selectors.py", line 315, in _select ValueError: too many file descriptors in select() [77436] Failed to execute script 'mitmdump' due to unhandled exception! I googled the error message, and found the following answer. Don't know if it's related. https://stackoverflow.com/questions/57182009/why-am-i-getting-an-valueerror-too-many-file-descriptors-in-select #### Steps to reproduce the behavior: 1. I use the following command `mitmdump.exe -p 8080 --anticomp -q -s "d:\redirect-router.py"` In the script, I re-write the host for a specific URL 2. 3. #### System Information Paste the output of "mitmproxy --version" here. mitmproxy --version Mitmproxy: 7.0.2 binary Python: 3.9.6 OpenSSL: OpenSSL 1.1.1k 25 Mar 2021 Platform: Windows-10-10.0.18363-SP0 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mitmproxy/__init__.py` Content: ``` 1 import asyncio 2 import sys 3 4 if sys.platform == 'win32': 5 # workaround for 6 # https://github.com/tornadoweb/tornado/issues/2751 7 # https://www.tornadoweb.org/en/stable/index.html#installation 8 # (copied multiple times in the codebase, please remove all occurrences) 9 asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) 10 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mitmproxy/__init__.py b/mitmproxy/__init__.py --- a/mitmproxy/__init__.py +++ b/mitmproxy/__init__.py @@ -1,9 +0,0 @@ -import asyncio -import sys - -if sys.platform == 'win32': - # workaround for - # https://github.com/tornadoweb/tornado/issues/2751 - # https://www.tornadoweb.org/en/stable/index.html#installation - # (copied multiple times in the codebase, please remove all occurrences) - asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
{"golden_diff": "diff --git a/mitmproxy/__init__.py b/mitmproxy/__init__.py\n--- a/mitmproxy/__init__.py\n+++ b/mitmproxy/__init__.py\n@@ -1,9 +0,0 @@\n-import asyncio\n-import sys\n-\n-if sys.platform == 'win32':\n- # workaround for\n- # https://github.com/tornadoweb/tornado/issues/2751\n- # https://www.tornadoweb.org/en/stable/index.html#installation\n- # (copied multiple times in the codebase, please remove all occurrences)\n- asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n", "issue": "When too many requests come simultaneously, mitmdump called an error and quited [ValueError: too many file descriptors in select()]\n#### Problem Description\r\nA clear and concise description of what the bug is.\r\nWhen too many requests come simultaneously, mitmdump called an error and quited.\r\nTraceback (most recent call last):\r\n File \"mitmdump\", line 3, in <module>\r\n File \"mitmproxy\\tools\\main.py\", line 147, in mitmdump\r\n File \"mitmproxy\\tools\\main.py\", line 114, in run\r\n File \"mitmproxy\\master.py\", line 76, in run\r\n File \"mitmproxy\\master.py\", line 59, in run_loop\r\n File \"mitmproxy\\master.py\", line 95, in shutdown\r\n File \"asyncio\\base_events.py\", line 629, in run_until_complete\r\n File \"asyncio\\base_events.py\", line 596, in run_forever\r\n File \"asyncio\\base_events.py\", line 1854, in _run_once\r\n File \"selectors.py\", line 324, in select\r\n File \"selectors.py\", line 315, in _select\r\nValueError: too many file descriptors in select()\r\n[77436] Failed to execute script 'mitmdump' due to unhandled exception!\r\n\r\nI googled the error message, and found the following answer. Don't know if it's related.\r\nhttps://stackoverflow.com/questions/57182009/why-am-i-getting-an-valueerror-too-many-file-descriptors-in-select\r\n\r\n#### Steps to reproduce the behavior:\r\n1. I use the following command\r\n`mitmdump.exe -p 8080 --anticomp -q -s \"d:\\redirect-router.py\"`\r\nIn the script, I re-write the host for a specific URL\r\n2. \r\n3. \r\n\r\n#### System Information\r\nPaste the output of \"mitmproxy --version\" here.\r\nmitmproxy --version\r\nMitmproxy: 7.0.2 binary\r\nPython: 3.9.6\r\nOpenSSL: OpenSSL 1.1.1k 25 Mar 2021\r\nPlatform: Windows-10-10.0.18363-SP0\n", "before_files": [{"content": "import asyncio\nimport sys\n\nif sys.platform == 'win32':\n # workaround for\n # https://github.com/tornadoweb/tornado/issues/2751\n # https://www.tornadoweb.org/en/stable/index.html#installation\n # (copied multiple times in the codebase, please remove all occurrences)\n asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n", "path": "mitmproxy/__init__.py"}], "after_files": [{"content": "", "path": "mitmproxy/__init__.py"}]}
871
144
gh_patches_debug_30874
rasdani/github-patches
git_diff
fal-ai__dbt-fal-544
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add the dbt target.name as a property on the context object **Context** I am sending slack messages and to keep things simple I have opted for one channel. (for some given set of models, that's handled through meta anyway i.e. slack_channel_id). However, doing this - it's hard to tell what the given environment was if you use one channel for all dbt invocations. **Describe the solution you'd like** I would like to be able to use context to access dbt properties such as the target.name e.g. ``` context.target.name # dev, test, prod, etc. ``` **Describe alternatives you've considered** This can be worked around by injecting the {{ target.name }} as a meta field to a model.yml e.g: ``` version: 2 models: - name: my_funky_model meta: SLACK_CHANNEL_ID: XXXXXXXXXX TARGET: "{{ target.name }}" fal: scripts: - fal_scripts/notify_slack.py ``` However this does seem redundant/laborious to add to all .yml definitions --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/fal/fal_script.py` Content: ``` 1 import os 2 import json 3 from typing import Dict, Any, List, Optional, Union, Callable 4 from pathlib import Path 5 from functools import partial 6 from dataclasses import dataclass, field 7 from deprecation import deprecated 8 9 from faldbt.parse import normalize_path 10 from faldbt.project import DbtModel, FalDbt 11 12 from dbt.contracts.results import RunStatus 13 from dbt.config.runtime import RuntimeConfig 14 from fal.logger import LOGGER 15 16 from dbt.contracts.graph.parsed import ColumnInfo 17 18 19 class Hook: 20 path: str 21 arguments: Dict[str, Any] 22 23 24 @dataclass 25 class LocalHook(Hook): 26 path: str 27 arguments: Dict[str, Any] = field(default_factory=dict) 28 29 30 @dataclass 31 class IsolatedHook(Hook): 32 path: str 33 environment_name: str 34 arguments: Dict[str, Any] = field(default_factory=dict) 35 36 37 def _is_local_environment(environment_name: str) -> None: 38 return environment_name == "local" 39 40 41 def create_hook(raw_hook: Any, default_environment_name: Optional[str] = None) -> Hook: 42 if isinstance(raw_hook, str): 43 raw_hook = {"path": raw_hook} 44 45 if not isinstance(raw_hook, dict): 46 raise ValueError(f"Unrecognized hook value: {raw_hook}") 47 48 if "path" not in raw_hook: 49 raise ValueError(f"A hook must specify path.") 50 51 environment_name = raw_hook.get("environment", default_environment_name) 52 if environment_name and not _is_local_environment(environment_name): 53 return IsolatedHook( 54 raw_hook["path"], 55 environment_name, 56 raw_hook.get("with", {}), 57 ) 58 else: 59 return LocalHook(raw_hook["path"], raw_hook.get("with", {})) 60 61 62 @dataclass 63 class CurrentAdapterResponse: 64 message: str 65 code: Optional[str] 66 rows_affected: Optional[int] 67 68 69 @dataclass 70 class CurrentModel: 71 name: str 72 alias: str 73 status: RunStatus 74 columns: Dict[str, ColumnInfo] 75 tests: List[Any] 76 meta: Dict[Any, Any] 77 adapter_response: Optional[CurrentAdapterResponse] 78 79 80 @dataclass 81 class CurrentTest: 82 name: str 83 model_name: str 84 column: str 85 status: str 86 87 @property 88 @deprecated(details="Use 'model_name' instead") 89 def modelname(self): 90 return self.model_name 91 92 93 @dataclass 94 class ContextConfig: 95 target_path: Path 96 97 def __init__(self, config: RuntimeConfig): 98 self.target_path = Path( 99 os.path.realpath(os.path.join(config.project_root, config.target_path)) 100 ) 101 102 103 @dataclass 104 class Context: 105 current_model: Union[CurrentModel, None] 106 config: ContextConfig 107 _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None) 108 109 @property 110 def arguments(self) -> Dict[str, Any]: 111 if self._arguments is None: 112 raise ValueError( 113 "'context.arguments' is only accessible from hooks, " 114 "not from scripts/models" 115 ) 116 return self._arguments 117 118 119 @dataclass(frozen=True, init=False) 120 class FalScript: 121 model: Optional[DbtModel] 122 path: Path 123 faldbt: FalDbt 124 hook_arguments: Optional[Dict[str, Any]] 125 is_hook: bool 126 127 def __init__( 128 self, 129 faldbt: FalDbt, 130 model: Optional[DbtModel], 131 path: str, 132 hook_arguments: Optional[Dict[str, Any]] = None, 133 is_hook: bool = False, 134 ): 135 # Necessary because of frozen=True 136 object.__setattr__(self, "model", model) 137 object.__setattr__(self, "path", normalize_path(faldbt.scripts_dir, path)) 138 object.__setattr__(self, "faldbt", faldbt) 139 object.__setattr__(self, "hook_arguments", hook_arguments) 140 object.__setattr__(self, "is_hook", is_hook) 141 142 @classmethod 143 def from_hook(cls, faldbt: FalDbt, model: DbtModel, hook: Hook): 144 """ 145 Creates a FalScript from a hook 146 """ 147 assert isinstance(hook, LocalHook) 148 return cls( 149 faldbt=faldbt, 150 model=model, 151 path=hook.path, 152 hook_arguments=hook.arguments, 153 is_hook=True, 154 ) 155 156 @classmethod 157 def model_script(cls, faldbt: FalDbt, model: DbtModel): 158 script = FalScript(faldbt, model, "") 159 # HACK: Set the script path specially for this case 160 object.__setattr__(script, "path", model.python_model) 161 return script 162 163 def exec(self): 164 """ 165 Executes the script 166 """ 167 # Enable local imports 168 try: 169 source_code = python_from_file(self.path) 170 program = compile(source_code, self.path, "exec") 171 172 exec_globals = { 173 "__name__": "__main__", 174 "context": self._build_script_context(), 175 "ref": self.faldbt.ref, 176 "source": self.faldbt.source, 177 "write_to_firestore": self.faldbt.write_to_firestore, 178 "list_models": self.faldbt.list_models, 179 "list_models_ids": self.faldbt.list_models_ids, 180 "list_sources": self.faldbt.list_sources, 181 "list_features": self.faldbt.list_features, 182 "el": self.faldbt.el, 183 "execute_sql": self.faldbt.execute_sql, 184 } 185 186 if not self.is_hook: 187 exec_globals["write_to_source"] = self.faldbt.write_to_source 188 189 if self.model is not None: 190 # Hard-wire the model 191 exec_globals["write_to_model"] = partial( 192 self.faldbt.write_to_model, 193 target_1=self.model.name, 194 target_2=None, 195 ) 196 197 else: 198 exec_globals["write_to_source"] = _not_allowed_function_maker( 199 "write_to_source" 200 ) 201 exec_globals["write_to_model"] = _not_allowed_function_maker( 202 "write_to_model" 203 ) 204 exec(program, exec_globals) 205 finally: 206 pass 207 208 @property 209 def relative_path(self): 210 if self.is_model: 211 return self.path.relative_to(self.faldbt.project_dir) 212 else: 213 return self.path.relative_to(self.faldbt.scripts_dir) 214 215 @property 216 def id(self): 217 if self.is_model: 218 return f"(model: {self.relative_path})" 219 else: 220 return f"({self.model_name}, {self.relative_path})" 221 222 @property 223 def is_global(self): 224 return self.model is None 225 226 @property 227 def is_model(self): 228 if self.model is not None and self.model.python_model is not None: 229 return self.model.python_model == self.path 230 231 @property 232 def model_name(self): 233 return "<GLOBAL>" if self.is_global else self.model.name # type: ignore 234 235 def _build_script_context(self) -> Context: 236 context_config = ContextConfig(self.faldbt._config) 237 if self.is_global: 238 return Context(current_model=None, config=context_config) 239 240 model: DbtModel = self.model # type: ignore 241 242 meta = model.meta or {} 243 _del_key(meta, self.faldbt.keyword) 244 245 tests = _process_tests(model.tests) 246 247 current_adapter_response = None 248 if model.adapter_response: 249 current_adapter_response = CurrentAdapterResponse( 250 message=str(model.adapter_response), 251 code=model.adapter_response.code, 252 rows_affected=model.adapter_response.rows_affected, 253 ) 254 255 current_model = CurrentModel( 256 name=model.name, 257 alias=model.alias, 258 status=model.status, 259 columns=model.columns, 260 tests=tests, 261 meta=meta, 262 adapter_response=current_adapter_response, 263 ) 264 265 return Context( 266 current_model=current_model, 267 config=context_config, 268 _arguments=self.hook_arguments, 269 ) 270 271 272 def _del_key(dict: Dict[str, Any], key: str): 273 try: 274 del dict[key] 275 except KeyError: 276 pass 277 278 279 def _process_tests(tests: List[Any]): 280 return list( 281 map( 282 lambda test: CurrentTest( 283 name=test.name, 284 column=test.column, 285 status=test.status, 286 model_name=test.model, 287 ), 288 tests, 289 ) 290 ) 291 292 293 def python_from_file(path: Path) -> str: 294 with open(path) as file: 295 raw_source_code = file.read() 296 if path.suffix == ".ipynb": 297 raw_source_code = _process_ipynb(raw_source_code) 298 return raw_source_code 299 300 301 def _process_ipynb(raw_source_code: str) -> str: 302 def strip_magic(source: List[str]) -> List[str]: 303 NOTEBOOK_LIB = "faldbt.magics" 304 return [item for item in source if item[0] != "%" and NOTEBOOK_LIB not in item] 305 306 ipynb_struct = json.loads(raw_source_code) 307 308 script_list = [] 309 for cell in ipynb_struct["cells"]: 310 if cell["cell_type"] == "code": 311 source = strip_magic(cell["source"]) 312 script_list.append("".join(source)) 313 314 joined_script = "\n #cell \n".join(script_list) 315 316 LOGGER.debug(f"Joined .ipynb cells to:\n{joined_script}") 317 318 return joined_script 319 320 321 def _not_allowed_function_maker(function_name: str) -> Callable[[Any], None]: 322 def not_allowed_function(*args, **kwargs): 323 raise Exception( 324 ( 325 f"{function_name} is not allowed in hooks." 326 " Consider using a Python model." 327 ) 328 ) 329 330 return not_allowed_function 331 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/fal/fal_script.py b/src/fal/fal_script.py --- a/src/fal/fal_script.py +++ b/src/fal/fal_script.py @@ -99,11 +99,21 @@ os.path.realpath(os.path.join(config.project_root, config.target_path)) ) +@dataclass +class ContextTarget: + def __init__(self, config: RuntimeConfig): + self.profile_name = config.profile_name + self.name = config.target_name + self.threads = config.threads + self.type = config.credentials.type + self.database = config.credentials.database + self.schema = config.credentials.schema @dataclass class Context: current_model: Union[CurrentModel, None] config: ContextConfig + target: ContextTarget _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None) @property @@ -233,9 +243,12 @@ return "<GLOBAL>" if self.is_global else self.model.name # type: ignore def _build_script_context(self) -> Context: - context_config = ContextConfig(self.faldbt._config) + config: RuntimeConfig = self.faldbt._config + context_config = ContextConfig(config) + target = ContextTarget(config) + if self.is_global: - return Context(current_model=None, config=context_config) + return Context(current_model=None, target=target, config=context_config) model: DbtModel = self.model # type: ignore @@ -264,6 +277,7 @@ return Context( current_model=current_model, + target=target, config=context_config, _arguments=self.hook_arguments, )
{"golden_diff": "diff --git a/src/fal/fal_script.py b/src/fal/fal_script.py\n--- a/src/fal/fal_script.py\n+++ b/src/fal/fal_script.py\n@@ -99,11 +99,21 @@\n os.path.realpath(os.path.join(config.project_root, config.target_path))\n )\n \n+@dataclass\n+class ContextTarget:\n+ def __init__(self, config: RuntimeConfig):\n+ self.profile_name = config.profile_name\n+ self.name = config.target_name\n+ self.threads = config.threads\n+ self.type = config.credentials.type\n+ self.database = config.credentials.database\n+ self.schema = config.credentials.schema\n \n @dataclass\n class Context:\n current_model: Union[CurrentModel, None]\n config: ContextConfig\n+ target: ContextTarget\n _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None)\n \n @property\n@@ -233,9 +243,12 @@\n return \"<GLOBAL>\" if self.is_global else self.model.name # type: ignore\n \n def _build_script_context(self) -> Context:\n- context_config = ContextConfig(self.faldbt._config)\n+ config: RuntimeConfig = self.faldbt._config\n+ context_config = ContextConfig(config)\n+ target = ContextTarget(config)\n+\n if self.is_global:\n- return Context(current_model=None, config=context_config)\n+ return Context(current_model=None, target=target, config=context_config)\n \n model: DbtModel = self.model # type: ignore\n \n@@ -264,6 +277,7 @@\n \n return Context(\n current_model=current_model,\n+ target=target,\n config=context_config,\n _arguments=self.hook_arguments,\n )\n", "issue": "Add the dbt target.name as a property on the context object\n**Context**\r\nI am sending slack messages and to keep things simple I have opted for one channel. (for some given set of models, that's handled through meta anyway i.e. slack_channel_id). \r\n\r\nHowever, doing this - it's hard to tell what the given environment was if you use one channel for all dbt invocations.\r\n\r\n**Describe the solution you'd like**\r\nI would like to be able to use context to access dbt properties such as the target.name e.g.\r\n\r\n```\r\n context.target.name # dev, test, prod, etc.\r\n```\r\n**Describe alternatives you've considered**\r\nThis can be worked around by injecting the {{ target.name }} as a meta field to a model.yml e.g:\r\n\r\n```\r\nversion: 2\r\n\r\nmodels:\r\n - name: my_funky_model\r\n meta:\r\n SLACK_CHANNEL_ID: XXXXXXXXXX\r\n TARGET: \"{{ target.name }}\"\r\n fal:\r\n scripts:\r\n - fal_scripts/notify_slack.py\r\n```\r\n\r\nHowever this does seem redundant/laborious to add to all .yml definitions \n", "before_files": [{"content": "import os\nimport json\nfrom typing import Dict, Any, List, Optional, Union, Callable\nfrom pathlib import Path\nfrom functools import partial\nfrom dataclasses import dataclass, field\nfrom deprecation import deprecated\n\nfrom faldbt.parse import normalize_path\nfrom faldbt.project import DbtModel, FalDbt\n\nfrom dbt.contracts.results import RunStatus\nfrom dbt.config.runtime import RuntimeConfig\nfrom fal.logger import LOGGER\n\nfrom dbt.contracts.graph.parsed import ColumnInfo\n\n\nclass Hook:\n path: str\n arguments: Dict[str, Any]\n\n\n@dataclass\nclass LocalHook(Hook):\n path: str\n arguments: Dict[str, Any] = field(default_factory=dict)\n\n\n@dataclass\nclass IsolatedHook(Hook):\n path: str\n environment_name: str\n arguments: Dict[str, Any] = field(default_factory=dict)\n\n\ndef _is_local_environment(environment_name: str) -> None:\n return environment_name == \"local\"\n\n\ndef create_hook(raw_hook: Any, default_environment_name: Optional[str] = None) -> Hook:\n if isinstance(raw_hook, str):\n raw_hook = {\"path\": raw_hook}\n\n if not isinstance(raw_hook, dict):\n raise ValueError(f\"Unrecognized hook value: {raw_hook}\")\n\n if \"path\" not in raw_hook:\n raise ValueError(f\"A hook must specify path.\")\n\n environment_name = raw_hook.get(\"environment\", default_environment_name)\n if environment_name and not _is_local_environment(environment_name):\n return IsolatedHook(\n raw_hook[\"path\"],\n environment_name,\n raw_hook.get(\"with\", {}),\n )\n else:\n return LocalHook(raw_hook[\"path\"], raw_hook.get(\"with\", {}))\n\n\n@dataclass\nclass CurrentAdapterResponse:\n message: str\n code: Optional[str]\n rows_affected: Optional[int]\n\n\n@dataclass\nclass CurrentModel:\n name: str\n alias: str\n status: RunStatus\n columns: Dict[str, ColumnInfo]\n tests: List[Any]\n meta: Dict[Any, Any]\n adapter_response: Optional[CurrentAdapterResponse]\n\n\n@dataclass\nclass CurrentTest:\n name: str\n model_name: str\n column: str\n status: str\n\n @property\n @deprecated(details=\"Use 'model_name' instead\")\n def modelname(self):\n return self.model_name\n\n\n@dataclass\nclass ContextConfig:\n target_path: Path\n\n def __init__(self, config: RuntimeConfig):\n self.target_path = Path(\n os.path.realpath(os.path.join(config.project_root, config.target_path))\n )\n\n\n@dataclass\nclass Context:\n current_model: Union[CurrentModel, None]\n config: ContextConfig\n _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None)\n\n @property\n def arguments(self) -> Dict[str, Any]:\n if self._arguments is None:\n raise ValueError(\n \"'context.arguments' is only accessible from hooks, \"\n \"not from scripts/models\"\n )\n return self._arguments\n\n\n@dataclass(frozen=True, init=False)\nclass FalScript:\n model: Optional[DbtModel]\n path: Path\n faldbt: FalDbt\n hook_arguments: Optional[Dict[str, Any]]\n is_hook: bool\n\n def __init__(\n self,\n faldbt: FalDbt,\n model: Optional[DbtModel],\n path: str,\n hook_arguments: Optional[Dict[str, Any]] = None,\n is_hook: bool = False,\n ):\n # Necessary because of frozen=True\n object.__setattr__(self, \"model\", model)\n object.__setattr__(self, \"path\", normalize_path(faldbt.scripts_dir, path))\n object.__setattr__(self, \"faldbt\", faldbt)\n object.__setattr__(self, \"hook_arguments\", hook_arguments)\n object.__setattr__(self, \"is_hook\", is_hook)\n\n @classmethod\n def from_hook(cls, faldbt: FalDbt, model: DbtModel, hook: Hook):\n \"\"\"\n Creates a FalScript from a hook\n \"\"\"\n assert isinstance(hook, LocalHook)\n return cls(\n faldbt=faldbt,\n model=model,\n path=hook.path,\n hook_arguments=hook.arguments,\n is_hook=True,\n )\n\n @classmethod\n def model_script(cls, faldbt: FalDbt, model: DbtModel):\n script = FalScript(faldbt, model, \"\")\n # HACK: Set the script path specially for this case\n object.__setattr__(script, \"path\", model.python_model)\n return script\n\n def exec(self):\n \"\"\"\n Executes the script\n \"\"\"\n # Enable local imports\n try:\n source_code = python_from_file(self.path)\n program = compile(source_code, self.path, \"exec\")\n\n exec_globals = {\n \"__name__\": \"__main__\",\n \"context\": self._build_script_context(),\n \"ref\": self.faldbt.ref,\n \"source\": self.faldbt.source,\n \"write_to_firestore\": self.faldbt.write_to_firestore,\n \"list_models\": self.faldbt.list_models,\n \"list_models_ids\": self.faldbt.list_models_ids,\n \"list_sources\": self.faldbt.list_sources,\n \"list_features\": self.faldbt.list_features,\n \"el\": self.faldbt.el,\n \"execute_sql\": self.faldbt.execute_sql,\n }\n\n if not self.is_hook:\n exec_globals[\"write_to_source\"] = self.faldbt.write_to_source\n\n if self.model is not None:\n # Hard-wire the model\n exec_globals[\"write_to_model\"] = partial(\n self.faldbt.write_to_model,\n target_1=self.model.name,\n target_2=None,\n )\n\n else:\n exec_globals[\"write_to_source\"] = _not_allowed_function_maker(\n \"write_to_source\"\n )\n exec_globals[\"write_to_model\"] = _not_allowed_function_maker(\n \"write_to_model\"\n )\n exec(program, exec_globals)\n finally:\n pass\n\n @property\n def relative_path(self):\n if self.is_model:\n return self.path.relative_to(self.faldbt.project_dir)\n else:\n return self.path.relative_to(self.faldbt.scripts_dir)\n\n @property\n def id(self):\n if self.is_model:\n return f\"(model: {self.relative_path})\"\n else:\n return f\"({self.model_name}, {self.relative_path})\"\n\n @property\n def is_global(self):\n return self.model is None\n\n @property\n def is_model(self):\n if self.model is not None and self.model.python_model is not None:\n return self.model.python_model == self.path\n\n @property\n def model_name(self):\n return \"<GLOBAL>\" if self.is_global else self.model.name # type: ignore\n\n def _build_script_context(self) -> Context:\n context_config = ContextConfig(self.faldbt._config)\n if self.is_global:\n return Context(current_model=None, config=context_config)\n\n model: DbtModel = self.model # type: ignore\n\n meta = model.meta or {}\n _del_key(meta, self.faldbt.keyword)\n\n tests = _process_tests(model.tests)\n\n current_adapter_response = None\n if model.adapter_response:\n current_adapter_response = CurrentAdapterResponse(\n message=str(model.adapter_response),\n code=model.adapter_response.code,\n rows_affected=model.adapter_response.rows_affected,\n )\n\n current_model = CurrentModel(\n name=model.name,\n alias=model.alias,\n status=model.status,\n columns=model.columns,\n tests=tests,\n meta=meta,\n adapter_response=current_adapter_response,\n )\n\n return Context(\n current_model=current_model,\n config=context_config,\n _arguments=self.hook_arguments,\n )\n\n\ndef _del_key(dict: Dict[str, Any], key: str):\n try:\n del dict[key]\n except KeyError:\n pass\n\n\ndef _process_tests(tests: List[Any]):\n return list(\n map(\n lambda test: CurrentTest(\n name=test.name,\n column=test.column,\n status=test.status,\n model_name=test.model,\n ),\n tests,\n )\n )\n\n\ndef python_from_file(path: Path) -> str:\n with open(path) as file:\n raw_source_code = file.read()\n if path.suffix == \".ipynb\":\n raw_source_code = _process_ipynb(raw_source_code)\n return raw_source_code\n\n\ndef _process_ipynb(raw_source_code: str) -> str:\n def strip_magic(source: List[str]) -> List[str]:\n NOTEBOOK_LIB = \"faldbt.magics\"\n return [item for item in source if item[0] != \"%\" and NOTEBOOK_LIB not in item]\n\n ipynb_struct = json.loads(raw_source_code)\n\n script_list = []\n for cell in ipynb_struct[\"cells\"]:\n if cell[\"cell_type\"] == \"code\":\n source = strip_magic(cell[\"source\"])\n script_list.append(\"\".join(source))\n\n joined_script = \"\\n #cell \\n\".join(script_list)\n\n LOGGER.debug(f\"Joined .ipynb cells to:\\n{joined_script}\")\n\n return joined_script\n\n\ndef _not_allowed_function_maker(function_name: str) -> Callable[[Any], None]:\n def not_allowed_function(*args, **kwargs):\n raise Exception(\n (\n f\"{function_name} is not allowed in hooks.\"\n \" Consider using a Python model.\"\n )\n )\n\n return not_allowed_function\n", "path": "src/fal/fal_script.py"}], "after_files": [{"content": "import os\nimport json\nfrom typing import Dict, Any, List, Optional, Union, Callable\nfrom pathlib import Path\nfrom functools import partial\nfrom dataclasses import dataclass, field\nfrom deprecation import deprecated\n\nfrom faldbt.parse import normalize_path\nfrom faldbt.project import DbtModel, FalDbt\n\nfrom dbt.contracts.results import RunStatus\nfrom dbt.config.runtime import RuntimeConfig\nfrom fal.logger import LOGGER\n\nfrom dbt.contracts.graph.parsed import ColumnInfo\n\n\nclass Hook:\n path: str\n arguments: Dict[str, Any]\n\n\n@dataclass\nclass LocalHook(Hook):\n path: str\n arguments: Dict[str, Any] = field(default_factory=dict)\n\n\n@dataclass\nclass IsolatedHook(Hook):\n path: str\n environment_name: str\n arguments: Dict[str, Any] = field(default_factory=dict)\n\n\ndef _is_local_environment(environment_name: str) -> None:\n return environment_name == \"local\"\n\n\ndef create_hook(raw_hook: Any, default_environment_name: Optional[str] = None) -> Hook:\n if isinstance(raw_hook, str):\n raw_hook = {\"path\": raw_hook}\n\n if not isinstance(raw_hook, dict):\n raise ValueError(f\"Unrecognized hook value: {raw_hook}\")\n\n if \"path\" not in raw_hook:\n raise ValueError(f\"A hook must specify path.\")\n\n environment_name = raw_hook.get(\"environment\", default_environment_name)\n if environment_name and not _is_local_environment(environment_name):\n return IsolatedHook(\n raw_hook[\"path\"],\n environment_name,\n raw_hook.get(\"with\", {}),\n )\n else:\n return LocalHook(raw_hook[\"path\"], raw_hook.get(\"with\", {}))\n\n\n@dataclass\nclass CurrentAdapterResponse:\n message: str\n code: Optional[str]\n rows_affected: Optional[int]\n\n\n@dataclass\nclass CurrentModel:\n name: str\n alias: str\n status: RunStatus\n columns: Dict[str, ColumnInfo]\n tests: List[Any]\n meta: Dict[Any, Any]\n adapter_response: Optional[CurrentAdapterResponse]\n\n\n@dataclass\nclass CurrentTest:\n name: str\n model_name: str\n column: str\n status: str\n\n @property\n @deprecated(details=\"Use 'model_name' instead\")\n def modelname(self):\n return self.model_name\n\n\n@dataclass\nclass ContextConfig:\n target_path: Path\n\n def __init__(self, config: RuntimeConfig):\n self.target_path = Path(\n os.path.realpath(os.path.join(config.project_root, config.target_path))\n )\n\n@dataclass\nclass ContextTarget:\n def __init__(self, config: RuntimeConfig):\n self.profile_name = config.profile_name\n self.name = config.target_name\n self.threads = config.threads\n self.type = config.credentials.type\n self.database = config.credentials.database\n self.schema = config.credentials.schema\n\n@dataclass\nclass Context:\n current_model: Union[CurrentModel, None]\n config: ContextConfig\n target: ContextTarget\n _arguments: Optional[Dict[str, Any]] = field(repr=False, default=None)\n\n @property\n def arguments(self) -> Dict[str, Any]:\n if self._arguments is None:\n raise ValueError(\n \"'context.arguments' is only accessible from hooks, \"\n \"not from scripts/models\"\n )\n return self._arguments\n\n\n@dataclass(frozen=True, init=False)\nclass FalScript:\n model: Optional[DbtModel]\n path: Path\n faldbt: FalDbt\n hook_arguments: Optional[Dict[str, Any]]\n is_hook: bool\n\n def __init__(\n self,\n faldbt: FalDbt,\n model: Optional[DbtModel],\n path: str,\n hook_arguments: Optional[Dict[str, Any]] = None,\n is_hook: bool = False,\n ):\n # Necessary because of frozen=True\n object.__setattr__(self, \"model\", model)\n object.__setattr__(self, \"path\", normalize_path(faldbt.scripts_dir, path))\n object.__setattr__(self, \"faldbt\", faldbt)\n object.__setattr__(self, \"hook_arguments\", hook_arguments)\n object.__setattr__(self, \"is_hook\", is_hook)\n\n @classmethod\n def from_hook(cls, faldbt: FalDbt, model: DbtModel, hook: Hook):\n \"\"\"\n Creates a FalScript from a hook\n \"\"\"\n assert isinstance(hook, LocalHook)\n return cls(\n faldbt=faldbt,\n model=model,\n path=hook.path,\n hook_arguments=hook.arguments,\n is_hook=True,\n )\n\n @classmethod\n def model_script(cls, faldbt: FalDbt, model: DbtModel):\n script = FalScript(faldbt, model, \"\")\n # HACK: Set the script path specially for this case\n object.__setattr__(script, \"path\", model.python_model)\n return script\n\n def exec(self):\n \"\"\"\n Executes the script\n \"\"\"\n # Enable local imports\n try:\n source_code = python_from_file(self.path)\n program = compile(source_code, self.path, \"exec\")\n\n exec_globals = {\n \"__name__\": \"__main__\",\n \"context\": self._build_script_context(),\n \"ref\": self.faldbt.ref,\n \"source\": self.faldbt.source,\n \"write_to_firestore\": self.faldbt.write_to_firestore,\n \"list_models\": self.faldbt.list_models,\n \"list_models_ids\": self.faldbt.list_models_ids,\n \"list_sources\": self.faldbt.list_sources,\n \"list_features\": self.faldbt.list_features,\n \"el\": self.faldbt.el,\n \"execute_sql\": self.faldbt.execute_sql,\n }\n\n if not self.is_hook:\n exec_globals[\"write_to_source\"] = self.faldbt.write_to_source\n\n if self.model is not None:\n # Hard-wire the model\n exec_globals[\"write_to_model\"] = partial(\n self.faldbt.write_to_model,\n target_1=self.model.name,\n target_2=None,\n )\n\n else:\n exec_globals[\"write_to_source\"] = _not_allowed_function_maker(\n \"write_to_source\"\n )\n exec_globals[\"write_to_model\"] = _not_allowed_function_maker(\n \"write_to_model\"\n )\n exec(program, exec_globals)\n finally:\n pass\n\n @property\n def relative_path(self):\n if self.is_model:\n return self.path.relative_to(self.faldbt.project_dir)\n else:\n return self.path.relative_to(self.faldbt.scripts_dir)\n\n @property\n def id(self):\n if self.is_model:\n return f\"(model: {self.relative_path})\"\n else:\n return f\"({self.model_name}, {self.relative_path})\"\n\n @property\n def is_global(self):\n return self.model is None\n\n @property\n def is_model(self):\n if self.model is not None and self.model.python_model is not None:\n return self.model.python_model == self.path\n\n @property\n def model_name(self):\n return \"<GLOBAL>\" if self.is_global else self.model.name # type: ignore\n\n def _build_script_context(self) -> Context:\n config: RuntimeConfig = self.faldbt._config\n context_config = ContextConfig(config)\n target = ContextTarget(config)\n\n if self.is_global:\n return Context(current_model=None, target=target, config=context_config)\n\n model: DbtModel = self.model # type: ignore\n\n meta = model.meta or {}\n _del_key(meta, self.faldbt.keyword)\n\n tests = _process_tests(model.tests)\n\n current_adapter_response = None\n if model.adapter_response:\n current_adapter_response = CurrentAdapterResponse(\n message=str(model.adapter_response),\n code=model.adapter_response.code,\n rows_affected=model.adapter_response.rows_affected,\n )\n\n current_model = CurrentModel(\n name=model.name,\n alias=model.alias,\n status=model.status,\n columns=model.columns,\n tests=tests,\n meta=meta,\n adapter_response=current_adapter_response,\n )\n\n return Context(\n current_model=current_model,\n target=target,\n config=context_config,\n _arguments=self.hook_arguments,\n )\n\n\ndef _del_key(dict: Dict[str, Any], key: str):\n try:\n del dict[key]\n except KeyError:\n pass\n\n\ndef _process_tests(tests: List[Any]):\n return list(\n map(\n lambda test: CurrentTest(\n name=test.name,\n column=test.column,\n status=test.status,\n model_name=test.model,\n ),\n tests,\n )\n )\n\n\ndef python_from_file(path: Path) -> str:\n with open(path) as file:\n raw_source_code = file.read()\n if path.suffix == \".ipynb\":\n raw_source_code = _process_ipynb(raw_source_code)\n return raw_source_code\n\n\ndef _process_ipynb(raw_source_code: str) -> str:\n def strip_magic(source: List[str]) -> List[str]:\n NOTEBOOK_LIB = \"faldbt.magics\"\n return [item for item in source if item[0] != \"%\" and NOTEBOOK_LIB not in item]\n\n ipynb_struct = json.loads(raw_source_code)\n\n script_list = []\n for cell in ipynb_struct[\"cells\"]:\n if cell[\"cell_type\"] == \"code\":\n source = strip_magic(cell[\"source\"])\n script_list.append(\"\".join(source))\n\n joined_script = \"\\n #cell \\n\".join(script_list)\n\n LOGGER.debug(f\"Joined .ipynb cells to:\\n{joined_script}\")\n\n return joined_script\n\n\ndef _not_allowed_function_maker(function_name: str) -> Callable[[Any], None]:\n def not_allowed_function(*args, **kwargs):\n raise Exception(\n (\n f\"{function_name} is not allowed in hooks.\"\n \" Consider using a Python model.\"\n )\n )\n\n return not_allowed_function\n", "path": "src/fal/fal_script.py"}]}
3,522
394
gh_patches_debug_23336
rasdani/github-patches
git_diff
pytorch__ignite-286
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- tqm_logger: metric_names is currently not optional Hi, https://github.com/pytorch/ignite/blob/master/ignite/contrib/handlers/tqdm_logger.py#L75 This line should be modified to make `metric_names` optional. Here is a suggestion: ``` if metric_names is not None and not isinstance(metric_names, list): raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names))) ``` Thanks --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `ignite/contrib/handlers/tqdm_logger.py` Content: ``` 1 try: 2 from tqdm import tqdm 3 except ImportError: 4 raise RuntimeError("This contrib module requires tqdm to be installed") 5 6 from ignite.engine import Events 7 8 9 class ProgressBar: 10 """ 11 TQDM progress bar handler to log training progress and computed metrics. 12 13 Examples: 14 15 Create a progress bar that shows you some metrics as they are computed, 16 by simply attaching the progress bar object to your engine. 17 18 .. code-block:: python 19 20 pbar = ProgressBar() 21 pbar.attach(trainer, ['loss']) 22 23 Note: 24 When adding attaching the progress bar to an engine, it is recommend that you replace 25 every print operation in the engine's handlers triggered every iteration with 26 ``pbar.log_message`` to guarantee the correct format of the stdout. 27 """ 28 29 def __init__(self): 30 self.pbar = None 31 32 def _reset(self, engine): 33 self.pbar = tqdm( 34 total=len(engine.state.dataloader), 35 leave=False, 36 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]') 37 38 def _close(self, engine): 39 self.pbar.close() 40 self.pbar = None 41 42 def _update(self, engine, metric_names=None): 43 if self.pbar is None: 44 self._reset(engine) 45 46 self.pbar.set_description('Epoch {}'.format(engine.state.epoch)) 47 48 if metric_names is not None: 49 if not all(metric in engine.state.metrics for metric in metric_names): 50 raise KeyError("metrics not found in engine.state.metrics") 51 52 metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names} 53 self.pbar.set_postfix(**metrics) 54 55 self.pbar.update() 56 57 @staticmethod 58 def log_message(message): 59 """ 60 Logs a message, preserving the progress bar correct output format 61 62 Args: 63 message (str): string you wish to log 64 """ 65 tqdm.write(message) 66 67 def attach(self, engine, metric_names=None): 68 """ 69 Attaches the progress bar to an engine object 70 71 Args: 72 engine (Engine): engine object 73 metric_names (list): (Optional) list of the metrics names to log as the bar progresses 74 """ 75 if not isinstance(metric_names, list): 76 raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names))) 77 78 engine.add_event_handler(Events.EPOCH_COMPLETED, self._close) 79 engine.add_event_handler(Events.ITERATION_COMPLETED, self._update, metric_names) 80 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py --- a/ignite/contrib/handlers/tqdm_logger.py +++ b/ignite/contrib/handlers/tqdm_logger.py @@ -47,6 +47,7 @@ if metric_names is not None: if not all(metric in engine.state.metrics for metric in metric_names): + self._close(engine) raise KeyError("metrics not found in engine.state.metrics") metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names} @@ -72,7 +73,7 @@ engine (Engine): engine object metric_names (list): (Optional) list of the metrics names to log as the bar progresses """ - if not isinstance(metric_names, list): + if metric_names is not None and not isinstance(metric_names, list): raise TypeError("metric_names should be a list, got {} instead".format(type(metric_names))) engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)
{"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -47,6 +47,7 @@\n \n if metric_names is not None:\n if not all(metric in engine.state.metrics for metric in metric_names):\n+ self._close(engine)\n raise KeyError(\"metrics not found in engine.state.metrics\")\n \n metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}\n@@ -72,7 +73,7 @@\n engine (Engine): engine object\n metric_names (list): (Optional) list of the metrics names to log as the bar progresses\n \"\"\"\n- if not isinstance(metric_names, list):\n+ if metric_names is not None and not isinstance(metric_names, list):\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\n \n engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)\n", "issue": "tqm_logger: metric_names is currently not optional \nHi,\r\n\r\nhttps://github.com/pytorch/ignite/blob/master/ignite/contrib/handlers/tqdm_logger.py#L75\r\nThis line should be modified to make `metric_names` optional. Here is a suggestion:\r\n```\r\nif metric_names is not None and not isinstance(metric_names, list):\r\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\r\n```\r\n\r\nThanks\n", "before_files": [{"content": "try:\n from tqdm import tqdm\nexcept ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed\")\n\nfrom ignite.engine import Events\n\n\nclass ProgressBar:\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Examples:\n\n Create a progress bar that shows you some metrics as they are computed,\n by simply attaching the progress bar object to your engine.\n\n .. code-block:: python\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n \"\"\"\n\n def __init__(self):\n self.pbar = None\n\n def _reset(self, engine):\n self.pbar = tqdm(\n total=len(engine.state.dataloader),\n leave=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]')\n\n def _close(self, engine):\n self.pbar.close()\n self.pbar = None\n\n def _update(self, engine, metric_names=None):\n if self.pbar is None:\n self._reset(engine)\n\n self.pbar.set_description('Epoch {}'.format(engine.state.epoch))\n\n if metric_names is not None:\n if not all(metric in engine.state.metrics for metric in metric_names):\n raise KeyError(\"metrics not found in engine.state.metrics\")\n\n metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}\n self.pbar.set_postfix(**metrics)\n\n self.pbar.update()\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format\n\n Args:\n message (str): string you wish to log\n \"\"\"\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None):\n \"\"\"\n Attaches the progress bar to an engine object\n\n Args:\n engine (Engine): engine object\n metric_names (list): (Optional) list of the metrics names to log as the bar progresses\n \"\"\"\n if not isinstance(metric_names, list):\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\n\n engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._update, metric_names)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}], "after_files": [{"content": "try:\n from tqdm import tqdm\nexcept ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed\")\n\nfrom ignite.engine import Events\n\n\nclass ProgressBar:\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Examples:\n\n Create a progress bar that shows you some metrics as they are computed,\n by simply attaching the progress bar object to your engine.\n\n .. code-block:: python\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n \"\"\"\n\n def __init__(self):\n self.pbar = None\n\n def _reset(self, engine):\n self.pbar = tqdm(\n total=len(engine.state.dataloader),\n leave=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]')\n\n def _close(self, engine):\n self.pbar.close()\n self.pbar = None\n\n def _update(self, engine, metric_names=None):\n if self.pbar is None:\n self._reset(engine)\n\n self.pbar.set_description('Epoch {}'.format(engine.state.epoch))\n\n if metric_names is not None:\n if not all(metric in engine.state.metrics for metric in metric_names):\n self._close(engine)\n raise KeyError(\"metrics not found in engine.state.metrics\")\n\n metrics = {name: '{:.2e}'.format(engine.state.metrics[name]) for name in metric_names}\n self.pbar.set_postfix(**metrics)\n\n self.pbar.update()\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format\n\n Args:\n message (str): string you wish to log\n \"\"\"\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None):\n \"\"\"\n Attaches the progress bar to an engine object\n\n Args:\n engine (Engine): engine object\n metric_names (list): (Optional) list of the metrics names to log as the bar progresses\n \"\"\"\n if metric_names is not None and not isinstance(metric_names, list):\n raise TypeError(\"metric_names should be a list, got {} instead\".format(type(metric_names)))\n\n engine.add_event_handler(Events.EPOCH_COMPLETED, self._close)\n engine.add_event_handler(Events.ITERATION_COMPLETED, self._update, metric_names)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]}
1,069
244
gh_patches_debug_60627
rasdani/github-patches
git_diff
CTPUG__wafer-111
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Wafer page editing fails on Django 1.8 with ImproperlyConfigured: error As seen on Travis, and confirmed locally, attempting to edit a page bombs out, ending with "Specifying both 'fields' and 'form_class' is not permitted." ImproperlyConfigured: Specifying both 'fields' and 'form_class' is not permitted. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `wafer/pages/views.py` Content: ``` 1 from django.http import Http404 2 from django.core.exceptions import PermissionDenied 3 from django.views.generic import DetailView, TemplateView, UpdateView 4 5 from wafer.pages.models import Page 6 from wafer.pages.forms import PageForm 7 8 9 class ShowPage(DetailView): 10 template_name = 'wafer.pages/page.html' 11 model = Page 12 13 14 class EditPage(UpdateView): 15 template_name = 'wafer.pages/page_form.html' 16 model = Page 17 form_class = PageForm 18 fields = ['name', 'content'] 19 20 21 def slug(request, url): 22 """Look up a page by url (which is a tree of slugs)""" 23 page = None 24 for slug in url.split('/'): 25 if not slug: 26 continue 27 try: 28 page = Page.objects.get(slug=slug, parent=page) 29 except Page.DoesNotExist: 30 raise Http404 31 32 if page is None: 33 try: 34 page = Page.objects.get(slug='index') 35 except Page.DoesNotExist: 36 return TemplateView.as_view( 37 template_name='wafer/index.html')(request) 38 39 if 'edit' in request.GET.keys(): 40 if not request.user.has_perm('pages.change_page'): 41 raise PermissionDenied 42 return EditPage.as_view()(request, pk=page.id) 43 44 return ShowPage.as_view()(request, pk=page.id) 45 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/wafer/pages/views.py b/wafer/pages/views.py --- a/wafer/pages/views.py +++ b/wafer/pages/views.py @@ -15,7 +15,6 @@ template_name = 'wafer.pages/page_form.html' model = Page form_class = PageForm - fields = ['name', 'content'] def slug(request, url):
{"golden_diff": "diff --git a/wafer/pages/views.py b/wafer/pages/views.py\n--- a/wafer/pages/views.py\n+++ b/wafer/pages/views.py\n@@ -15,7 +15,6 @@\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n- fields = ['name', 'content']\n \n \n def slug(request, url):\n", "issue": "Wafer page editing fails on Django 1.8 with ImproperlyConfigured: error\nAs seen on Travis, and confirmed locally, attempting to edit a page bombs out, ending with\n\n\"Specifying both 'fields' and 'form_class' is not permitted.\"\nImproperlyConfigured: Specifying both 'fields' and 'form_class' is not permitted.\n\n", "before_files": [{"content": "from django.http import Http404\nfrom django.core.exceptions import PermissionDenied\nfrom django.views.generic import DetailView, TemplateView, UpdateView\n\nfrom wafer.pages.models import Page\nfrom wafer.pages.forms import PageForm\n\n\nclass ShowPage(DetailView):\n template_name = 'wafer.pages/page.html'\n model = Page\n\n\nclass EditPage(UpdateView):\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n fields = ['name', 'content']\n\n\ndef slug(request, url):\n \"\"\"Look up a page by url (which is a tree of slugs)\"\"\"\n page = None\n for slug in url.split('/'):\n if not slug:\n continue\n try:\n page = Page.objects.get(slug=slug, parent=page)\n except Page.DoesNotExist:\n raise Http404\n\n if page is None:\n try:\n page = Page.objects.get(slug='index')\n except Page.DoesNotExist:\n return TemplateView.as_view(\n template_name='wafer/index.html')(request)\n\n if 'edit' in request.GET.keys():\n if not request.user.has_perm('pages.change_page'):\n raise PermissionDenied\n return EditPage.as_view()(request, pk=page.id)\n\n return ShowPage.as_view()(request, pk=page.id)\n", "path": "wafer/pages/views.py"}], "after_files": [{"content": "from django.http import Http404\nfrom django.core.exceptions import PermissionDenied\nfrom django.views.generic import DetailView, TemplateView, UpdateView\n\nfrom wafer.pages.models import Page\nfrom wafer.pages.forms import PageForm\n\n\nclass ShowPage(DetailView):\n template_name = 'wafer.pages/page.html'\n model = Page\n\n\nclass EditPage(UpdateView):\n template_name = 'wafer.pages/page_form.html'\n model = Page\n form_class = PageForm\n\n\ndef slug(request, url):\n \"\"\"Look up a page by url (which is a tree of slugs)\"\"\"\n page = None\n for slug in url.split('/'):\n if not slug:\n continue\n try:\n page = Page.objects.get(slug=slug, parent=page)\n except Page.DoesNotExist:\n raise Http404\n\n if page is None:\n try:\n page = Page.objects.get(slug='index')\n except Page.DoesNotExist:\n return TemplateView.as_view(\n template_name='wafer/index.html')(request)\n\n if 'edit' in request.GET.keys():\n if not request.user.has_perm('pages.change_page'):\n raise PermissionDenied\n return EditPage.as_view()(request, pk=page.id)\n\n return ShowPage.as_view()(request, pk=page.id)\n", "path": "wafer/pages/views.py"}]}
709
89
gh_patches_debug_38612
rasdani/github-patches
git_diff
microsoft__botbuilder-python-903
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- [PORT] Updated MicrosoftGovernmentAppCredentials to support Skills in Azure Gov > Port this change from botbuilder-dotnet/master branch: https://github.com/microsoft/botbuilder-dotnet/pull/3353 Fixes https://github.com/microsoft/botbuilder-dotnet/issues/3233 Added constructor to MicrosoftGovernmentAppCredentials that takes OAuthScope to support skills in gov. Updated BotFrameworkHttpClient and BotFrameworkAdapter to pass the OAuthScope to MicrosoftGovernmentAppCredentials Added SimpleBotToBot functional test for testing. Added Microsoft.Bot.Framework.Skills.sln to load skills test projects. # Changed projects * Microsoft.Bot.Builder * Microsoft.Bot.Connector * integration * Microsoft.Bot.Connector.Tests [Skills] --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 4 from botframework.connector.auth import MicrosoftAppCredentials, GovernmentConstants 5 6 7 class MicrosoftGovernmentAppCredentials(MicrosoftAppCredentials): 8 """ 9 MicrosoftGovernmentAppCredentials auth implementation. 10 """ 11 12 def __init__( 13 self, 14 app_id: str, 15 app_password: str, 16 channel_auth_tenant: str = None, 17 scope: str = GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE, 18 ): 19 super().__init__(app_id, app_password, channel_auth_tenant, scope) 20 self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL 21 22 @staticmethod 23 def empty(): 24 return MicrosoftGovernmentAppCredentials("", "") 25 ``` Path: `libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py` Content: ``` 1 # Copyright (c) Microsoft Corporation. All rights reserved. 2 # Licensed under the MIT License. 3 # pylint: disable=no-member 4 5 import json 6 from typing import Dict 7 from logging import Logger 8 9 import aiohttp 10 from botbuilder.core import InvokeResponse 11 from botbuilder.core.skills import BotFrameworkClient 12 from botbuilder.schema import ( 13 Activity, 14 ExpectedReplies, 15 ConversationReference, 16 ConversationAccount, 17 ) 18 from botframework.connector.auth import ( 19 ChannelProvider, 20 CredentialProvider, 21 GovernmentConstants, 22 MicrosoftAppCredentials, 23 ) 24 25 26 class BotFrameworkHttpClient(BotFrameworkClient): 27 28 """ 29 A skill host adapter implements API to forward activity to a skill and 30 implements routing ChannelAPI calls from the Skill up through the bot/adapter. 31 """ 32 33 INVOKE_ACTIVITY_NAME = "SkillEvents.ChannelApiInvoke" 34 _BOT_IDENTITY_KEY = "BotIdentity" 35 _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {} 36 37 def __init__( 38 self, 39 credential_provider: CredentialProvider, 40 channel_provider: ChannelProvider = None, 41 logger: Logger = None, 42 ): 43 if not credential_provider: 44 raise TypeError("credential_provider can't be None") 45 46 self._credential_provider = credential_provider 47 self._channel_provider = channel_provider 48 self._logger = logger 49 self._session = aiohttp.ClientSession() 50 51 async def post_activity( 52 self, 53 from_bot_id: str, 54 to_bot_id: str, 55 to_url: str, 56 service_url: str, 57 conversation_id: str, 58 activity: Activity, 59 ) -> InvokeResponse: 60 app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id) 61 62 if not app_credentials: 63 raise KeyError("Unable to get appCredentials to connect to the skill") 64 65 # Get token for the skill call 66 token = ( 67 app_credentials.get_access_token() 68 if app_credentials.microsoft_app_id 69 else None 70 ) 71 72 # Capture current activity settings before changing them. 73 # TODO: DO we need to set the activity ID? (events that are created manually don't have it). 74 original_conversation_id = activity.conversation.id 75 original_service_url = activity.service_url 76 original_caller_id = activity.caller_id 77 original_relates_to = activity.relates_to 78 79 try: 80 # TODO: The relato has to be ported to the adapter in the new integration library when 81 # resolving conflicts in merge 82 activity.relates_to = ConversationReference( 83 service_url=activity.service_url, 84 activity_id=activity.id, 85 channel_id=activity.channel_id, 86 conversation=ConversationAccount( 87 id=activity.conversation.id, 88 name=activity.conversation.name, 89 conversation_type=activity.conversation.conversation_type, 90 aad_object_id=activity.conversation.aad_object_id, 91 is_group=activity.conversation.is_group, 92 role=activity.conversation.role, 93 tenant_id=activity.conversation.tenant_id, 94 properties=activity.conversation.properties, 95 ), 96 bot=None, 97 ) 98 activity.conversation.id = conversation_id 99 activity.service_url = service_url 100 activity.caller_id = f"urn:botframework:aadappid:{from_bot_id}" 101 102 headers_dict = { 103 "Content-type": "application/json; charset=utf-8", 104 } 105 if token: 106 headers_dict.update( 107 {"Authorization": f"Bearer {token}",} 108 ) 109 110 json_content = json.dumps(activity.serialize()) 111 resp = await self._session.post( 112 to_url, data=json_content.encode("utf-8"), headers=headers_dict, 113 ) 114 resp.raise_for_status() 115 data = (await resp.read()).decode() 116 content = json.loads(data) if data else None 117 118 return InvokeResponse(status=resp.status, body=content) 119 120 finally: 121 # Restore activity properties. 122 activity.conversation.id = original_conversation_id 123 activity.service_url = original_service_url 124 activity.caller_id = original_caller_id 125 activity.relates_to = original_relates_to 126 127 async def post_buffered_activity( 128 self, 129 from_bot_id: str, 130 to_bot_id: str, 131 to_url: str, 132 service_url: str, 133 conversation_id: str, 134 activity: Activity, 135 ) -> [Activity]: 136 """ 137 Helper method to return a list of activities when an Activity is being 138 sent with DeliveryMode == expectReplies. 139 """ 140 response = await self.post_activity( 141 from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity 142 ) 143 if not response or (response.status / 100) != 2: 144 return [] 145 return ExpectedReplies().deserialize(response.body).activities 146 147 async def _get_app_credentials( 148 self, app_id: str, oauth_scope: str 149 ) -> MicrosoftAppCredentials: 150 if not app_id: 151 return MicrosoftAppCredentials(None, None) 152 153 cache_key = f"{app_id}{oauth_scope}" 154 app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key) 155 156 if app_credentials: 157 return app_credentials 158 159 app_password = await self._credential_provider.get_app_password(app_id) 160 app_credentials = MicrosoftAppCredentials( 161 app_id, app_password, oauth_scope=oauth_scope 162 ) 163 if self._channel_provider and self._channel_provider.is_government(): 164 app_credentials.oauth_endpoint = ( 165 GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL 166 ) 167 app_credentials.oauth_scope = ( 168 GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE 169 ) 170 171 BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials 172 return app_credentials 173 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py --- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py +++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py @@ -18,8 +18,9 @@ from botframework.connector.auth import ( ChannelProvider, CredentialProvider, - GovernmentConstants, MicrosoftAppCredentials, + AppCredentials, + MicrosoftGovernmentAppCredentials, ) @@ -146,27 +147,26 @@ async def _get_app_credentials( self, app_id: str, oauth_scope: str - ) -> MicrosoftAppCredentials: + ) -> AppCredentials: if not app_id: - return MicrosoftAppCredentials(None, None) + return MicrosoftAppCredentials.empty() + # in the cache? cache_key = f"{app_id}{oauth_scope}" app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key) - if app_credentials: return app_credentials + # create a new AppCredentials app_password = await self._credential_provider.get_app_password(app_id) - app_credentials = MicrosoftAppCredentials( - app_id, app_password, oauth_scope=oauth_scope + + app_credentials = ( + MicrosoftGovernmentAppCredentials(app_id, app_password, scope=oauth_scope) + if self._credential_provider and self._channel_provider.is_government() + else MicrosoftAppCredentials(app_id, app_password, oauth_scope=oauth_scope) ) - if self._channel_provider and self._channel_provider.is_government(): - app_credentials.oauth_endpoint = ( - GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL - ) - app_credentials.oauth_scope = ( - GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE - ) + # put it in the cache BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials + return app_credentials diff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py --- a/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py +++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py @@ -14,10 +14,13 @@ app_id: str, app_password: str, channel_auth_tenant: str = None, - scope: str = GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE, + scope: str = None, ): super().__init__(app_id, app_password, channel_auth_tenant, scope) self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL + self.oauth_scope = ( + scope if scope else GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE + ) @staticmethod def empty():
{"golden_diff": "diff --git a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n--- a/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n+++ b/libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py\n@@ -18,8 +18,9 @@\n from botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n- GovernmentConstants,\n MicrosoftAppCredentials,\n+ AppCredentials,\n+ MicrosoftGovernmentAppCredentials,\n )\n \n \n@@ -146,27 +147,26 @@\n \n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n- ) -> MicrosoftAppCredentials:\n+ ) -> AppCredentials:\n if not app_id:\n- return MicrosoftAppCredentials(None, None)\n+ return MicrosoftAppCredentials.empty()\n \n+ # in the cache?\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n-\n if app_credentials:\n return app_credentials\n \n+ # create a new AppCredentials\n app_password = await self._credential_provider.get_app_password(app_id)\n- app_credentials = MicrosoftAppCredentials(\n- app_id, app_password, oauth_scope=oauth_scope\n+\n+ app_credentials = (\n+ MicrosoftGovernmentAppCredentials(app_id, app_password, scope=oauth_scope)\n+ if self._credential_provider and self._channel_provider.is_government()\n+ else MicrosoftAppCredentials(app_id, app_password, oauth_scope=oauth_scope)\n )\n- if self._channel_provider and self._channel_provider.is_government():\n- app_credentials.oauth_endpoint = (\n- GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\n- )\n- app_credentials.oauth_scope = (\n- GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n- )\n \n+ # put it in the cache\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n+\n return app_credentials\ndiff --git a/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py b/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py\n--- a/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py\n+++ b/libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py\n@@ -14,10 +14,13 @@\n app_id: str,\r\n app_password: str,\r\n channel_auth_tenant: str = None,\r\n- scope: str = GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE,\r\n+ scope: str = None,\r\n ):\r\n super().__init__(app_id, app_password, channel_auth_tenant, scope)\r\n self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\r\n+ self.oauth_scope = (\r\n+ scope if scope else GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\r\n+ )\r\n \r\n @staticmethod\r\n def empty():\n", "issue": "[PORT] Updated MicrosoftGovernmentAppCredentials to support Skills in Azure Gov\n> Port this change from botbuilder-dotnet/master branch:\nhttps://github.com/microsoft/botbuilder-dotnet/pull/3353\n\nFixes https://github.com/microsoft/botbuilder-dotnet/issues/3233\r\n\r\nAdded constructor to MicrosoftGovernmentAppCredentials that takes OAuthScope to support skills in gov.\r\nUpdated BotFrameworkHttpClient and BotFrameworkAdapter to pass the OAuthScope to MicrosoftGovernmentAppCredentials\r\nAdded SimpleBotToBot functional test for testing.\r\nAdded Microsoft.Bot.Framework.Skills.sln to load skills test projects.\n\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder\r\n* Microsoft.Bot.Connector\r\n* integration\r\n* Microsoft.Bot.Connector.Tests\r\n\r\n[Skills]\r\n\r\n\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom botframework.connector.auth import MicrosoftAppCredentials, GovernmentConstants\r\n\r\n\r\nclass MicrosoftGovernmentAppCredentials(MicrosoftAppCredentials):\r\n \"\"\"\r\n MicrosoftGovernmentAppCredentials auth implementation.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n app_id: str,\r\n app_password: str,\r\n channel_auth_tenant: str = None,\r\n scope: str = GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE,\r\n ):\r\n super().__init__(app_id, app_password, channel_auth_tenant, scope)\r\n self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\r\n\r\n @staticmethod\r\n def empty():\r\n return MicrosoftGovernmentAppCredentials(\"\", \"\")\r\n", "path": "libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# pylint: disable=no-member\n\nimport json\nfrom typing import Dict\nfrom logging import Logger\n\nimport aiohttp\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.core.skills import BotFrameworkClient\nfrom botbuilder.schema import (\n Activity,\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n)\nfrom botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n GovernmentConstants,\n MicrosoftAppCredentials,\n)\n\n\nclass BotFrameworkHttpClient(BotFrameworkClient):\n\n \"\"\"\n A skill host adapter implements API to forward activity to a skill and\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n \"\"\"\n\n INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n _BOT_IDENTITY_KEY = \"BotIdentity\"\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n\n def __init__(\n self,\n credential_provider: CredentialProvider,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not credential_provider:\n raise TypeError(\"credential_provider can't be None\")\n\n self._credential_provider = credential_provider\n self._channel_provider = channel_provider\n self._logger = logger\n self._session = aiohttp.ClientSession()\n\n async def post_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> InvokeResponse:\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n\n if not app_credentials:\n raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n\n # Get token for the skill call\n token = (\n app_credentials.get_access_token()\n if app_credentials.microsoft_app_id\n else None\n )\n\n # Capture current activity settings before changing them.\n # TODO: DO we need to set the activity ID? (events that are created manually don't have it).\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_caller_id = activity.caller_id\n original_relates_to = activity.relates_to\n\n try:\n # TODO: The relato has to be ported to the adapter in the new integration library when\n # resolving conflicts in merge\n activity.relates_to = ConversationReference(\n service_url=activity.service_url,\n activity_id=activity.id,\n channel_id=activity.channel_id,\n conversation=ConversationAccount(\n id=activity.conversation.id,\n name=activity.conversation.name,\n conversation_type=activity.conversation.conversation_type,\n aad_object_id=activity.conversation.aad_object_id,\n is_group=activity.conversation.is_group,\n role=activity.conversation.role,\n tenant_id=activity.conversation.tenant_id,\n properties=activity.conversation.properties,\n ),\n bot=None,\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n activity.caller_id = f\"urn:botframework:aadappid:{from_bot_id}\"\n\n headers_dict = {\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n if token:\n headers_dict.update(\n {\"Authorization\": f\"Bearer {token}\",}\n )\n\n json_content = json.dumps(activity.serialize())\n resp = await self._session.post(\n to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n )\n resp.raise_for_status()\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n\n return InvokeResponse(status=resp.status, body=content)\n\n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.caller_id = original_caller_id\n activity.relates_to = original_relates_to\n\n async def post_buffered_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> [Activity]:\n \"\"\"\n Helper method to return a list of activities when an Activity is being\n sent with DeliveryMode == expectReplies.\n \"\"\"\n response = await self.post_activity(\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n )\n if not response or (response.status / 100) != 2:\n return []\n return ExpectedReplies().deserialize(response.body).activities\n\n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n ) -> MicrosoftAppCredentials:\n if not app_id:\n return MicrosoftAppCredentials(None, None)\n\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n\n if app_credentials:\n return app_credentials\n\n app_password = await self._credential_provider.get_app_password(app_id)\n app_credentials = MicrosoftAppCredentials(\n app_id, app_password, oauth_scope=oauth_scope\n )\n if self._channel_provider and self._channel_provider.is_government():\n app_credentials.oauth_endpoint = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\n )\n app_credentials.oauth_scope = (\n GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\n )\n\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n return app_credentials\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom botframework.connector.auth import MicrosoftAppCredentials, GovernmentConstants\r\n\r\n\r\nclass MicrosoftGovernmentAppCredentials(MicrosoftAppCredentials):\r\n \"\"\"\r\n MicrosoftGovernmentAppCredentials auth implementation.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n app_id: str,\r\n app_password: str,\r\n channel_auth_tenant: str = None,\r\n scope: str = None,\r\n ):\r\n super().__init__(app_id, app_password, channel_auth_tenant, scope)\r\n self.oauth_endpoint = GovernmentConstants.TO_CHANNEL_FROM_BOT_LOGIN_URL\r\n self.oauth_scope = (\r\n scope if scope else GovernmentConstants.TO_CHANNEL_FROM_BOT_OAUTH_SCOPE\r\n )\r\n\r\n @staticmethod\r\n def empty():\r\n return MicrosoftGovernmentAppCredentials(\"\", \"\")\r\n", "path": "libraries/botframework-connector/botframework/connector/auth/microsoft_government_app_credentials.py"}, {"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n# pylint: disable=no-member\n\nimport json\nfrom typing import Dict\nfrom logging import Logger\n\nimport aiohttp\nfrom botbuilder.core import InvokeResponse\nfrom botbuilder.core.skills import BotFrameworkClient\nfrom botbuilder.schema import (\n Activity,\n ExpectedReplies,\n ConversationReference,\n ConversationAccount,\n)\nfrom botframework.connector.auth import (\n ChannelProvider,\n CredentialProvider,\n MicrosoftAppCredentials,\n AppCredentials,\n MicrosoftGovernmentAppCredentials,\n)\n\n\nclass BotFrameworkHttpClient(BotFrameworkClient):\n\n \"\"\"\n A skill host adapter implements API to forward activity to a skill and\n implements routing ChannelAPI calls from the Skill up through the bot/adapter.\n \"\"\"\n\n INVOKE_ACTIVITY_NAME = \"SkillEvents.ChannelApiInvoke\"\n _BOT_IDENTITY_KEY = \"BotIdentity\"\n _APP_CREDENTIALS_CACHE: Dict[str, MicrosoftAppCredentials] = {}\n\n def __init__(\n self,\n credential_provider: CredentialProvider,\n channel_provider: ChannelProvider = None,\n logger: Logger = None,\n ):\n if not credential_provider:\n raise TypeError(\"credential_provider can't be None\")\n\n self._credential_provider = credential_provider\n self._channel_provider = channel_provider\n self._logger = logger\n self._session = aiohttp.ClientSession()\n\n async def post_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> InvokeResponse:\n app_credentials = await self._get_app_credentials(from_bot_id, to_bot_id)\n\n if not app_credentials:\n raise KeyError(\"Unable to get appCredentials to connect to the skill\")\n\n # Get token for the skill call\n token = (\n app_credentials.get_access_token()\n if app_credentials.microsoft_app_id\n else None\n )\n\n # Capture current activity settings before changing them.\n # TODO: DO we need to set the activity ID? (events that are created manually don't have it).\n original_conversation_id = activity.conversation.id\n original_service_url = activity.service_url\n original_caller_id = activity.caller_id\n original_relates_to = activity.relates_to\n\n try:\n # TODO: The relato has to be ported to the adapter in the new integration library when\n # resolving conflicts in merge\n activity.relates_to = ConversationReference(\n service_url=activity.service_url,\n activity_id=activity.id,\n channel_id=activity.channel_id,\n conversation=ConversationAccount(\n id=activity.conversation.id,\n name=activity.conversation.name,\n conversation_type=activity.conversation.conversation_type,\n aad_object_id=activity.conversation.aad_object_id,\n is_group=activity.conversation.is_group,\n role=activity.conversation.role,\n tenant_id=activity.conversation.tenant_id,\n properties=activity.conversation.properties,\n ),\n bot=None,\n )\n activity.conversation.id = conversation_id\n activity.service_url = service_url\n activity.caller_id = f\"urn:botframework:aadappid:{from_bot_id}\"\n\n headers_dict = {\n \"Content-type\": \"application/json; charset=utf-8\",\n }\n if token:\n headers_dict.update(\n {\"Authorization\": f\"Bearer {token}\",}\n )\n\n json_content = json.dumps(activity.serialize())\n resp = await self._session.post(\n to_url, data=json_content.encode(\"utf-8\"), headers=headers_dict,\n )\n resp.raise_for_status()\n data = (await resp.read()).decode()\n content = json.loads(data) if data else None\n\n return InvokeResponse(status=resp.status, body=content)\n\n finally:\n # Restore activity properties.\n activity.conversation.id = original_conversation_id\n activity.service_url = original_service_url\n activity.caller_id = original_caller_id\n activity.relates_to = original_relates_to\n\n async def post_buffered_activity(\n self,\n from_bot_id: str,\n to_bot_id: str,\n to_url: str,\n service_url: str,\n conversation_id: str,\n activity: Activity,\n ) -> [Activity]:\n \"\"\"\n Helper method to return a list of activities when an Activity is being\n sent with DeliveryMode == expectReplies.\n \"\"\"\n response = await self.post_activity(\n from_bot_id, to_bot_id, to_url, service_url, conversation_id, activity\n )\n if not response or (response.status / 100) != 2:\n return []\n return ExpectedReplies().deserialize(response.body).activities\n\n async def _get_app_credentials(\n self, app_id: str, oauth_scope: str\n ) -> AppCredentials:\n if not app_id:\n return MicrosoftAppCredentials.empty()\n\n # in the cache?\n cache_key = f\"{app_id}{oauth_scope}\"\n app_credentials = BotFrameworkHttpClient._APP_CREDENTIALS_CACHE.get(cache_key)\n if app_credentials:\n return app_credentials\n\n # create a new AppCredentials\n app_password = await self._credential_provider.get_app_password(app_id)\n\n app_credentials = (\n MicrosoftGovernmentAppCredentials(app_id, app_password, scope=oauth_scope)\n if self._credential_provider and self._channel_provider.is_government()\n else MicrosoftAppCredentials(app_id, app_password, oauth_scope=oauth_scope)\n )\n\n # put it in the cache\n BotFrameworkHttpClient._APP_CREDENTIALS_CACHE[cache_key] = app_credentials\n\n return app_credentials\n", "path": "libraries/botbuilder-integration-aiohttp/botbuilder/integration/aiohttp/bot_framework_http_client.py"}]}
2,288
713
gh_patches_debug_19417
rasdani/github-patches
git_diff
projectmesa__mesa-1796
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- JupyterViz: Changing simulation parameters shouldn't automatically start the simulation **Describe the bug** Currently, changing the parameters auto-play the simulation. **Expected behavior** It shouldn't. **To Reproduce** Run any `JupyterViz` simulation. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mesa/experimental/jupyter_viz.py` Content: ``` 1 import threading 2 3 import matplotlib.pyplot as plt 4 import networkx as nx 5 import reacton.ipywidgets as widgets 6 import solara 7 from matplotlib.figure import Figure 8 from matplotlib.ticker import MaxNLocator 9 10 import mesa 11 12 # Avoid interactive backend 13 plt.switch_backend("agg") 14 15 16 @solara.component 17 def JupyterViz( 18 model_class, 19 model_params, 20 measures=None, 21 name="Mesa Model", 22 agent_portrayal=None, 23 space_drawer="default", 24 play_interval=400, 25 ): 26 """Initialize a component to visualize a model. 27 Args: 28 model_class: class of the model to instantiate 29 model_params: parameters for initializing the model 30 measures: list of callables or data attributes to plot 31 name: name for display 32 agent_portrayal: options for rendering agents (dictionary) 33 space_drawer: method to render the agent space for 34 the model; default implementation is :meth:`make_space`; 35 simulations with no space to visualize should 36 specify `space_drawer=False` 37 play_interval: play interval (default: 400) 38 """ 39 40 current_step, set_current_step = solara.use_state(0) 41 42 # 1. Set up model parameters 43 user_params, fixed_params = split_model_params(model_params) 44 model_parameters, set_model_parameters = solara.use_state( 45 {**fixed_params, **{k: v["value"] for k, v in user_params.items()}} 46 ) 47 48 # 2. Set up Model 49 def make_model(): 50 model = model_class(**model_parameters) 51 set_current_step(0) 52 return model 53 54 reset_counter = solara.use_reactive(0) 55 model = solara.use_memo( 56 make_model, dependencies=[*list(model_parameters.values()), reset_counter.value] 57 ) 58 59 def handle_change_model_params(name: str, value: any): 60 set_model_parameters({**model_parameters, name: value}) 61 62 # 3. Set up UI 63 solara.Markdown(name) 64 UserInputs(user_params, on_change=handle_change_model_params) 65 ModelController(model, play_interval, current_step, set_current_step, reset_counter) 66 67 with solara.GridFixed(columns=2): 68 # 4. Space 69 if space_drawer == "default": 70 # draw with the default implementation 71 make_space(model, agent_portrayal) 72 elif space_drawer: 73 # if specified, draw agent space with an alternate renderer 74 space_drawer(model, agent_portrayal) 75 # otherwise, do nothing (do not draw space) 76 77 # 5. Plots 78 for measure in measures: 79 if callable(measure): 80 # Is a custom object 81 measure(model) 82 else: 83 make_plot(model, measure) 84 85 86 @solara.component 87 def ModelController( 88 model, play_interval, current_step, set_current_step, reset_counter 89 ): 90 playing = solara.use_reactive(False) 91 thread = solara.use_reactive(None) 92 93 def on_value_play(change): 94 if model.running: 95 do_step() 96 else: 97 playing.value = False 98 99 def do_step(): 100 model.step() 101 set_current_step(model.schedule.steps) 102 103 def do_play(): 104 model.running = True 105 while model.running: 106 do_step() 107 108 def threaded_do_play(): 109 if thread is not None and thread.is_alive(): 110 return 111 thread.value = threading.Thread(target=do_play) 112 thread.start() 113 114 def do_pause(): 115 if (thread is None) or (not thread.is_alive()): 116 return 117 model.running = False 118 thread.join() 119 120 def do_reset(): 121 reset_counter.value += 1 122 123 with solara.Row(): 124 solara.Button(label="Step", color="primary", on_click=do_step) 125 # This style is necessary so that the play widget has almost the same 126 # height as typical Solara buttons. 127 solara.Style( 128 """ 129 .widget-play { 130 height: 30px; 131 } 132 """ 133 ) 134 widgets.Play( 135 value=0, 136 interval=play_interval, 137 repeat=True, 138 show_repeat=False, 139 on_value=on_value_play, 140 playing=playing.value, 141 on_playing=playing.set, 142 ) 143 solara.Button(label="Reset", color="primary", on_click=do_reset) 144 solara.Markdown(md_text=f"**Step:** {current_step}") 145 # threaded_do_play is not used for now because it 146 # doesn't work in Google colab. We use 147 # ipywidgets.Play until it is fixed. The threading 148 # version is definite a much better implementation, 149 # if it works. 150 # solara.Button(label="▶", color="primary", on_click=viz.threaded_do_play) 151 # solara.Button(label="⏸︎", color="primary", on_click=viz.do_pause) 152 # solara.Button(label="Reset", color="primary", on_click=do_reset) 153 154 155 def split_model_params(model_params): 156 model_params_input = {} 157 model_params_fixed = {} 158 for k, v in model_params.items(): 159 if check_param_is_fixed(v): 160 model_params_fixed[k] = v 161 else: 162 model_params_input[k] = v 163 return model_params_input, model_params_fixed 164 165 166 def check_param_is_fixed(param): 167 if not isinstance(param, dict): 168 return True 169 if "type" not in param: 170 return True 171 172 173 @solara.component 174 def UserInputs(user_params, on_change=None): 175 """Initialize user inputs for configurable model parameters. 176 Currently supports :class:`solara.SliderInt`, :class:`solara.SliderFloat`, 177 and :class:`solara.Select`. 178 179 Props: 180 user_params: dictionary with options for the input, including label, 181 min and max values, and other fields specific to the input type. 182 on_change: function to be called with (name, value) when the value of an input changes. 183 """ 184 185 for name, options in user_params.items(): 186 # label for the input is "label" from options or name 187 label = options.get("label", name) 188 input_type = options.get("type") 189 190 def change_handler(value, name=name): 191 on_change(name, value) 192 193 if input_type == "SliderInt": 194 solara.SliderInt( 195 label, 196 value=options.get("value"), 197 on_value=change_handler, 198 min=options.get("min"), 199 max=options.get("max"), 200 step=options.get("step"), 201 ) 202 elif input_type == "SliderFloat": 203 solara.SliderFloat( 204 label, 205 value=options.get("value"), 206 on_value=change_handler, 207 min=options.get("min"), 208 max=options.get("max"), 209 step=options.get("step"), 210 ) 211 elif input_type == "Select": 212 solara.Select( 213 label, 214 value=options.get("value"), 215 on_value=change_handler, 216 values=options.get("values"), 217 ) 218 else: 219 raise ValueError(f"{input_type} is not a supported input type") 220 221 222 def make_space(model, agent_portrayal): 223 def portray(g): 224 x = [] 225 y = [] 226 s = [] # size 227 c = [] # color 228 for i in range(g.width): 229 for j in range(g.height): 230 content = g._grid[i][j] 231 if not content: 232 continue 233 if not hasattr(content, "__iter__"): 234 # Is a single grid 235 content = [content] 236 for agent in content: 237 data = agent_portrayal(agent) 238 x.append(i) 239 y.append(j) 240 if "size" in data: 241 s.append(data["size"]) 242 if "color" in data: 243 c.append(data["color"]) 244 out = {"x": x, "y": y} 245 if len(s) > 0: 246 out["s"] = s 247 if len(c) > 0: 248 out["c"] = c 249 return out 250 251 space_fig = Figure() 252 space_ax = space_fig.subplots() 253 if isinstance(model.grid, mesa.space.NetworkGrid): 254 _draw_network_grid(model, space_ax, agent_portrayal) 255 else: 256 space_ax.scatter(**portray(model.grid)) 257 space_ax.set_axis_off() 258 solara.FigureMatplotlib(space_fig) 259 260 261 def _draw_network_grid(model, space_ax, agent_portrayal): 262 graph = model.grid.G 263 pos = nx.spring_layout(graph, seed=0) 264 nx.draw( 265 graph, 266 ax=space_ax, 267 pos=pos, 268 **agent_portrayal(graph), 269 ) 270 271 272 def make_plot(model, measure): 273 fig = Figure() 274 ax = fig.subplots() 275 df = model.datacollector.get_model_vars_dataframe() 276 ax.plot(df.loc[:, measure]) 277 ax.set_ylabel(measure) 278 # Set integer x axis 279 ax.xaxis.set_major_locator(MaxNLocator(integer=True)) 280 solara.FigureMatplotlib(fig) 281 282 283 def make_text(renderer): 284 def function(model): 285 solara.Markdown(renderer(model)) 286 287 return function 288 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mesa/experimental/jupyter_viz.py b/mesa/experimental/jupyter_viz.py --- a/mesa/experimental/jupyter_viz.py +++ b/mesa/experimental/jupyter_viz.py @@ -89,15 +89,25 @@ ): playing = solara.use_reactive(False) thread = solara.use_reactive(None) + # We track the previous step to detect if user resets the model via + # clicking the reset button or changing the parameters. If previous_step > + # current_step, it means a model reset happens while the simulation is + # still playing. + previous_step = solara.use_reactive(0) def on_value_play(change): - if model.running: + if previous_step.value > current_step and current_step == 0: + # We add extra checks for current_step == 0, just to be sure. + # We automatically stop the playing if a model is reset. + playing.value = False + elif model.running: do_step() else: playing.value = False def do_step(): model.step() + previous_step.value = current_step set_current_step(model.schedule.steps) def do_play():
{"golden_diff": "diff --git a/mesa/experimental/jupyter_viz.py b/mesa/experimental/jupyter_viz.py\n--- a/mesa/experimental/jupyter_viz.py\n+++ b/mesa/experimental/jupyter_viz.py\n@@ -89,15 +89,25 @@\n ):\n playing = solara.use_reactive(False)\n thread = solara.use_reactive(None)\n+ # We track the previous step to detect if user resets the model via\n+ # clicking the reset button or changing the parameters. If previous_step >\n+ # current_step, it means a model reset happens while the simulation is\n+ # still playing.\n+ previous_step = solara.use_reactive(0)\n \n def on_value_play(change):\n- if model.running:\n+ if previous_step.value > current_step and current_step == 0:\n+ # We add extra checks for current_step == 0, just to be sure.\n+ # We automatically stop the playing if a model is reset.\n+ playing.value = False\n+ elif model.running:\n do_step()\n else:\n playing.value = False\n \n def do_step():\n model.step()\n+ previous_step.value = current_step\n set_current_step(model.schedule.steps)\n \n def do_play():\n", "issue": "JupyterViz: Changing simulation parameters shouldn't automatically start the simulation\n**Describe the bug**\r\nCurrently, changing the parameters auto-play the simulation.\r\n\r\n**Expected behavior**\r\nIt shouldn't.\r\n\r\n**To Reproduce**\r\nRun any `JupyterViz` simulation.\n", "before_files": [{"content": "import threading\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport reacton.ipywidgets as widgets\nimport solara\nfrom matplotlib.figure import Figure\nfrom matplotlib.ticker import MaxNLocator\n\nimport mesa\n\n# Avoid interactive backend\nplt.switch_backend(\"agg\")\n\n\[email protected]\ndef JupyterViz(\n model_class,\n model_params,\n measures=None,\n name=\"Mesa Model\",\n agent_portrayal=None,\n space_drawer=\"default\",\n play_interval=400,\n):\n \"\"\"Initialize a component to visualize a model.\n Args:\n model_class: class of the model to instantiate\n model_params: parameters for initializing the model\n measures: list of callables or data attributes to plot\n name: name for display\n agent_portrayal: options for rendering agents (dictionary)\n space_drawer: method to render the agent space for\n the model; default implementation is :meth:`make_space`;\n simulations with no space to visualize should\n specify `space_drawer=False`\n play_interval: play interval (default: 400)\n \"\"\"\n\n current_step, set_current_step = solara.use_state(0)\n\n # 1. Set up model parameters\n user_params, fixed_params = split_model_params(model_params)\n model_parameters, set_model_parameters = solara.use_state(\n {**fixed_params, **{k: v[\"value\"] for k, v in user_params.items()}}\n )\n\n # 2. Set up Model\n def make_model():\n model = model_class(**model_parameters)\n set_current_step(0)\n return model\n\n reset_counter = solara.use_reactive(0)\n model = solara.use_memo(\n make_model, dependencies=[*list(model_parameters.values()), reset_counter.value]\n )\n\n def handle_change_model_params(name: str, value: any):\n set_model_parameters({**model_parameters, name: value})\n\n # 3. Set up UI\n solara.Markdown(name)\n UserInputs(user_params, on_change=handle_change_model_params)\n ModelController(model, play_interval, current_step, set_current_step, reset_counter)\n\n with solara.GridFixed(columns=2):\n # 4. Space\n if space_drawer == \"default\":\n # draw with the default implementation\n make_space(model, agent_portrayal)\n elif space_drawer:\n # if specified, draw agent space with an alternate renderer\n space_drawer(model, agent_portrayal)\n # otherwise, do nothing (do not draw space)\n\n # 5. Plots\n for measure in measures:\n if callable(measure):\n # Is a custom object\n measure(model)\n else:\n make_plot(model, measure)\n\n\[email protected]\ndef ModelController(\n model, play_interval, current_step, set_current_step, reset_counter\n):\n playing = solara.use_reactive(False)\n thread = solara.use_reactive(None)\n\n def on_value_play(change):\n if model.running:\n do_step()\n else:\n playing.value = False\n\n def do_step():\n model.step()\n set_current_step(model.schedule.steps)\n\n def do_play():\n model.running = True\n while model.running:\n do_step()\n\n def threaded_do_play():\n if thread is not None and thread.is_alive():\n return\n thread.value = threading.Thread(target=do_play)\n thread.start()\n\n def do_pause():\n if (thread is None) or (not thread.is_alive()):\n return\n model.running = False\n thread.join()\n\n def do_reset():\n reset_counter.value += 1\n\n with solara.Row():\n solara.Button(label=\"Step\", color=\"primary\", on_click=do_step)\n # This style is necessary so that the play widget has almost the same\n # height as typical Solara buttons.\n solara.Style(\n \"\"\"\n .widget-play {\n height: 30px;\n }\n \"\"\"\n )\n widgets.Play(\n value=0,\n interval=play_interval,\n repeat=True,\n show_repeat=False,\n on_value=on_value_play,\n playing=playing.value,\n on_playing=playing.set,\n )\n solara.Button(label=\"Reset\", color=\"primary\", on_click=do_reset)\n solara.Markdown(md_text=f\"**Step:** {current_step}\")\n # threaded_do_play is not used for now because it\n # doesn't work in Google colab. We use\n # ipywidgets.Play until it is fixed. The threading\n # version is definite a much better implementation,\n # if it works.\n # solara.Button(label=\"\u25b6\", color=\"primary\", on_click=viz.threaded_do_play)\n # solara.Button(label=\"\u23f8\ufe0e\", color=\"primary\", on_click=viz.do_pause)\n # solara.Button(label=\"Reset\", color=\"primary\", on_click=do_reset)\n\n\ndef split_model_params(model_params):\n model_params_input = {}\n model_params_fixed = {}\n for k, v in model_params.items():\n if check_param_is_fixed(v):\n model_params_fixed[k] = v\n else:\n model_params_input[k] = v\n return model_params_input, model_params_fixed\n\n\ndef check_param_is_fixed(param):\n if not isinstance(param, dict):\n return True\n if \"type\" not in param:\n return True\n\n\[email protected]\ndef UserInputs(user_params, on_change=None):\n \"\"\"Initialize user inputs for configurable model parameters.\n Currently supports :class:`solara.SliderInt`, :class:`solara.SliderFloat`,\n and :class:`solara.Select`.\n\n Props:\n user_params: dictionary with options for the input, including label,\n min and max values, and other fields specific to the input type.\n on_change: function to be called with (name, value) when the value of an input changes.\n \"\"\"\n\n for name, options in user_params.items():\n # label for the input is \"label\" from options or name\n label = options.get(\"label\", name)\n input_type = options.get(\"type\")\n\n def change_handler(value, name=name):\n on_change(name, value)\n\n if input_type == \"SliderInt\":\n solara.SliderInt(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n min=options.get(\"min\"),\n max=options.get(\"max\"),\n step=options.get(\"step\"),\n )\n elif input_type == \"SliderFloat\":\n solara.SliderFloat(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n min=options.get(\"min\"),\n max=options.get(\"max\"),\n step=options.get(\"step\"),\n )\n elif input_type == \"Select\":\n solara.Select(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n values=options.get(\"values\"),\n )\n else:\n raise ValueError(f\"{input_type} is not a supported input type\")\n\n\ndef make_space(model, agent_portrayal):\n def portray(g):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for i in range(g.width):\n for j in range(g.height):\n content = g._grid[i][j]\n if not content:\n continue\n if not hasattr(content, \"__iter__\"):\n # Is a single grid\n content = [content]\n for agent in content:\n data = agent_portrayal(agent)\n x.append(i)\n y.append(j)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_fig = Figure()\n space_ax = space_fig.subplots()\n if isinstance(model.grid, mesa.space.NetworkGrid):\n _draw_network_grid(model, space_ax, agent_portrayal)\n else:\n space_ax.scatter(**portray(model.grid))\n space_ax.set_axis_off()\n solara.FigureMatplotlib(space_fig)\n\n\ndef _draw_network_grid(model, space_ax, agent_portrayal):\n graph = model.grid.G\n pos = nx.spring_layout(graph, seed=0)\n nx.draw(\n graph,\n ax=space_ax,\n pos=pos,\n **agent_portrayal(graph),\n )\n\n\ndef make_plot(model, measure):\n fig = Figure()\n ax = fig.subplots()\n df = model.datacollector.get_model_vars_dataframe()\n ax.plot(df.loc[:, measure])\n ax.set_ylabel(measure)\n # Set integer x axis\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n solara.FigureMatplotlib(fig)\n\n\ndef make_text(renderer):\n def function(model):\n solara.Markdown(renderer(model))\n\n return function\n", "path": "mesa/experimental/jupyter_viz.py"}], "after_files": [{"content": "import threading\n\nimport matplotlib.pyplot as plt\nimport networkx as nx\nimport reacton.ipywidgets as widgets\nimport solara\nfrom matplotlib.figure import Figure\nfrom matplotlib.ticker import MaxNLocator\n\nimport mesa\n\n# Avoid interactive backend\nplt.switch_backend(\"agg\")\n\n\[email protected]\ndef JupyterViz(\n model_class,\n model_params,\n measures=None,\n name=\"Mesa Model\",\n agent_portrayal=None,\n space_drawer=\"default\",\n play_interval=400,\n):\n \"\"\"Initialize a component to visualize a model.\n Args:\n model_class: class of the model to instantiate\n model_params: parameters for initializing the model\n measures: list of callables or data attributes to plot\n name: name for display\n agent_portrayal: options for rendering agents (dictionary)\n space_drawer: method to render the agent space for\n the model; default implementation is :meth:`make_space`;\n simulations with no space to visualize should\n specify `space_drawer=False`\n play_interval: play interval (default: 400)\n \"\"\"\n\n current_step, set_current_step = solara.use_state(0)\n\n # 1. Set up model parameters\n user_params, fixed_params = split_model_params(model_params)\n model_parameters, set_model_parameters = solara.use_state(\n {**fixed_params, **{k: v[\"value\"] for k, v in user_params.items()}}\n )\n\n # 2. Set up Model\n def make_model():\n model = model_class(**model_parameters)\n set_current_step(0)\n return model\n\n reset_counter = solara.use_reactive(0)\n model = solara.use_memo(\n make_model, dependencies=[*list(model_parameters.values()), reset_counter.value]\n )\n\n def handle_change_model_params(name: str, value: any):\n set_model_parameters({**model_parameters, name: value})\n\n # 3. Set up UI\n solara.Markdown(name)\n UserInputs(user_params, on_change=handle_change_model_params)\n ModelController(model, play_interval, current_step, set_current_step, reset_counter)\n\n with solara.GridFixed(columns=2):\n # 4. Space\n if space_drawer == \"default\":\n # draw with the default implementation\n make_space(model, agent_portrayal)\n elif space_drawer:\n # if specified, draw agent space with an alternate renderer\n space_drawer(model, agent_portrayal)\n # otherwise, do nothing (do not draw space)\n\n # 5. Plots\n for measure in measures:\n if callable(measure):\n # Is a custom object\n measure(model)\n else:\n make_plot(model, measure)\n\n\[email protected]\ndef ModelController(\n model, play_interval, current_step, set_current_step, reset_counter\n):\n playing = solara.use_reactive(False)\n thread = solara.use_reactive(None)\n # We track the previous step to detect if user resets the model via\n # clicking the reset button or changing the parameters. If previous_step >\n # current_step, it means a model reset happens while the simulation is\n # still playing.\n previous_step = solara.use_reactive(0)\n\n def on_value_play(change):\n if previous_step.value > current_step and current_step == 0:\n # We add extra checks for current_step == 0, just to be sure.\n # We automatically stop the playing if a model is reset.\n playing.value = False\n elif model.running:\n do_step()\n else:\n playing.value = False\n\n def do_step():\n model.step()\n previous_step.value = current_step\n set_current_step(model.schedule.steps)\n\n def do_play():\n model.running = True\n while model.running:\n do_step()\n\n def threaded_do_play():\n if thread is not None and thread.is_alive():\n return\n thread.value = threading.Thread(target=do_play)\n thread.start()\n\n def do_pause():\n if (thread is None) or (not thread.is_alive()):\n return\n model.running = False\n thread.join()\n\n def do_reset():\n reset_counter.value += 1\n\n with solara.Row():\n solara.Button(label=\"Step\", color=\"primary\", on_click=do_step)\n # This style is necessary so that the play widget has almost the same\n # height as typical Solara buttons.\n solara.Style(\n \"\"\"\n .widget-play {\n height: 30px;\n }\n \"\"\"\n )\n widgets.Play(\n value=0,\n interval=play_interval,\n repeat=True,\n show_repeat=False,\n on_value=on_value_play,\n playing=playing.value,\n on_playing=playing.set,\n )\n solara.Button(label=\"Reset\", color=\"primary\", on_click=do_reset)\n solara.Markdown(md_text=f\"**Step:** {current_step}\")\n # threaded_do_play is not used for now because it\n # doesn't work in Google colab. We use\n # ipywidgets.Play until it is fixed. The threading\n # version is definite a much better implementation,\n # if it works.\n # solara.Button(label=\"\u25b6\", color=\"primary\", on_click=viz.threaded_do_play)\n # solara.Button(label=\"\u23f8\ufe0e\", color=\"primary\", on_click=viz.do_pause)\n # solara.Button(label=\"Reset\", color=\"primary\", on_click=do_reset)\n\n\ndef split_model_params(model_params):\n model_params_input = {}\n model_params_fixed = {}\n for k, v in model_params.items():\n if check_param_is_fixed(v):\n model_params_fixed[k] = v\n else:\n model_params_input[k] = v\n return model_params_input, model_params_fixed\n\n\ndef check_param_is_fixed(param):\n if not isinstance(param, dict):\n return True\n if \"type\" not in param:\n return True\n\n\[email protected]\ndef UserInputs(user_params, on_change=None):\n \"\"\"Initialize user inputs for configurable model parameters.\n Currently supports :class:`solara.SliderInt`, :class:`solara.SliderFloat`,\n and :class:`solara.Select`.\n\n Props:\n user_params: dictionary with options for the input, including label,\n min and max values, and other fields specific to the input type.\n on_change: function to be called with (name, value) when the value of an input changes.\n \"\"\"\n\n for name, options in user_params.items():\n # label for the input is \"label\" from options or name\n label = options.get(\"label\", name)\n input_type = options.get(\"type\")\n\n def change_handler(value, name=name):\n on_change(name, value)\n\n if input_type == \"SliderInt\":\n solara.SliderInt(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n min=options.get(\"min\"),\n max=options.get(\"max\"),\n step=options.get(\"step\"),\n )\n elif input_type == \"SliderFloat\":\n solara.SliderFloat(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n min=options.get(\"min\"),\n max=options.get(\"max\"),\n step=options.get(\"step\"),\n )\n elif input_type == \"Select\":\n solara.Select(\n label,\n value=options.get(\"value\"),\n on_value=change_handler,\n values=options.get(\"values\"),\n )\n else:\n raise ValueError(f\"{input_type} is not a supported input type\")\n\n\ndef make_space(model, agent_portrayal):\n def portray(g):\n x = []\n y = []\n s = [] # size\n c = [] # color\n for i in range(g.width):\n for j in range(g.height):\n content = g._grid[i][j]\n if not content:\n continue\n if not hasattr(content, \"__iter__\"):\n # Is a single grid\n content = [content]\n for agent in content:\n data = agent_portrayal(agent)\n x.append(i)\n y.append(j)\n if \"size\" in data:\n s.append(data[\"size\"])\n if \"color\" in data:\n c.append(data[\"color\"])\n out = {\"x\": x, \"y\": y}\n if len(s) > 0:\n out[\"s\"] = s\n if len(c) > 0:\n out[\"c\"] = c\n return out\n\n space_fig = Figure()\n space_ax = space_fig.subplots()\n if isinstance(model.grid, mesa.space.NetworkGrid):\n _draw_network_grid(model, space_ax, agent_portrayal)\n else:\n space_ax.scatter(**portray(model.grid))\n space_ax.set_axis_off()\n solara.FigureMatplotlib(space_fig)\n\n\ndef _draw_network_grid(model, space_ax, agent_portrayal):\n graph = model.grid.G\n pos = nx.spring_layout(graph, seed=0)\n nx.draw(\n graph,\n ax=space_ax,\n pos=pos,\n **agent_portrayal(graph),\n )\n\n\ndef make_plot(model, measure):\n fig = Figure()\n ax = fig.subplots()\n df = model.datacollector.get_model_vars_dataframe()\n ax.plot(df.loc[:, measure])\n ax.set_ylabel(measure)\n # Set integer x axis\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n solara.FigureMatplotlib(fig)\n\n\ndef make_text(renderer):\n def function(model):\n solara.Markdown(renderer(model))\n\n return function\n", "path": "mesa/experimental/jupyter_viz.py"}]}
3,025
273
gh_patches_debug_42830
rasdani/github-patches
git_diff
horovod__horovod-3148
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support MPICH when using horovodrun I'm using MPICH instead of OpenMPI. I'm able to train models using mpiexec and mpirun on my cluster but when I try to use horovodrun, it fails with the following error ```python [ec2-user@master ~]$python3 ./horovodrun -np 8 python3 <train command> [[email protected]] match_arg (utils/args/args.c:159): unrecognized argument allow-run-as-root [[email protected]] HYDU_parse_array (utils/args/args.c:174): argument matching returned error [[email protected]] parse_args (ui/mpich/utils.c:1596): error parsing input array [[email protected]] HYD_uii_mpx_get_parameters (ui/mpich/utils.c:1648): unable to parse user arguments [[email protected]] main (ui/mpich/mpiexec.c:153): error parsing parameters ``` Horovod version: 0.18.2 MPICH version: 3.2 From looking at it, it looks like certain args are not available in MPICH. Is horovodrun compatible with MPICH? Support MPICH when using horovodrun I'm using MPICH instead of OpenMPI. I'm able to train models using mpiexec and mpirun on my cluster but when I try to use horovodrun, it fails with the following error ```python [ec2-user@master ~]$python3 ./horovodrun -np 8 python3 <train command> [[email protected]] match_arg (utils/args/args.c:159): unrecognized argument allow-run-as-root [[email protected]] HYDU_parse_array (utils/args/args.c:174): argument matching returned error [[email protected]] parse_args (ui/mpich/utils.c:1596): error parsing input array [[email protected]] HYD_uii_mpx_get_parameters (ui/mpich/utils.c:1648): unable to parse user arguments [[email protected]] main (ui/mpich/mpiexec.c:153): error parsing parameters ``` Horovod version: 0.18.2 MPICH version: 3.2 From looking at it, it looks like certain args are not available in MPICH. Is horovodrun compatible with MPICH? --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `horovod/runner/mpi_run.py` Content: ``` 1 # Copyright 2019 Uber Technologies, Inc. All Rights Reserved. 2 # 3 # Licensed under the Apache License, Version 2.0 (the "License"); 4 # you may not use this file except in compliance with the License. 5 # You may obtain a copy of the License at 6 # 7 # http://www.apache.org/licenses/LICENSE-2.0 8 # 9 # Unless required by applicable law or agreed to in writing, software 10 # distributed under the License is distributed on an "AS IS" BASIS, 11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 # See the License for the specific language governing permissions and 13 # limitations under the License. 14 # ============================================================================== 15 16 import copy 17 import os 18 import sys 19 20 from shlex import quote 21 22 from horovod.runner.common.util import env as env_util, hosts, safe_shell_exec, tiny_shell_exec 23 24 # MPI implementations 25 _OMPI_IMPL = 'OpenMPI' 26 _SMPI_IMPL = 'SpectrumMPI' 27 _MPICH_IMPL = 'MPICH' 28 _IMPI_IMPL = "IntelMPI" 29 _UNKNOWN_IMPL = 'Unknown' 30 _MISSING_IMPL = 'Missing' 31 32 # Open MPI Flags 33 _OMPI_FLAGS = ['-mca pml ob1', '-mca btl ^openib'] 34 # Spectrum MPI Flags 35 _SMPI_FLAGS = [] 36 _SMPI_FLAGS_TCP = ['-tcp'] 37 # MPICH Flags 38 _MPICH_FLAGS = [] 39 # Intel MPI Flags 40 _IMPI_FLAGS = [] 41 42 # Threshold for large cluster MPI issues: 43 _LARGE_CLUSTER_THRESHOLD = 64 44 # No process binding args 45 _NO_BINDING_ARGS = ['-bind-to none', '-map-by slot'] 46 # Process socket binding args 47 _SOCKET_BINDING_ARGS = ['-bind-to socket', '-map-by socket', '-rank-by core'] 48 49 # MPI not found error message 50 _MPI_NOT_FOUND_ERROR_MSG= ('horovod does not find an installed MPI.\n\n' 51 'Choose one of:\n' 52 '1. Install Open MPI 4.0.0+ or IBM Spectrum MPI or MPICH and re-install Horovod ' 53 '(use --no-cache-dir pip option).\n' 54 '2. Run distributed ' 55 'training script using the standard way provided by your' 56 ' MPI distribution (usually mpirun, srun, or jsrun).\n' 57 '3. Use built-in gloo option (horovodrun --gloo ...).') 58 59 60 def mpi_available(env=None): 61 return _get_mpi_implementation(env) not in {_UNKNOWN_IMPL, _MISSING_IMPL} 62 63 64 def is_open_mpi(env=None): 65 return _get_mpi_implementation(env) == _OMPI_IMPL 66 67 68 def is_spectrum_mpi(env=None): 69 return _get_mpi_implementation(env) == _SMPI_IMPL 70 71 72 def is_mpich(env=None): 73 return _get_mpi_implementation(env) == _MPICH_IMPL 74 75 76 def is_intel_mpi(env=None): 77 return _get_mpi_implementation(env) == _IMPI_IMPL 78 79 80 def _get_mpi_implementation(env=None): 81 """ 82 Detects the available MPI implementation by invoking `mpirun --version`. 83 This command is executed by the given execute function, which takes the 84 command as the only argument and returns (output, exit code). Output 85 represents the stdout and stderr as a string. 86 87 Returns one of: 88 - _OMPI_IMPL, _SMPI_IMPL, _MPICH_IMPL or _IMPI_IMPL for known implementations 89 - _UNKNOWN_IMPL for any unknown implementation 90 - _MISSING_IMPL if `mpirun --version` could not be executed. 91 92 :param env: environment variable to use to run mpirun 93 :return: string representing identified implementation 94 """ 95 command = 'mpirun --version' 96 res = tiny_shell_exec.execute(command, env) 97 if res is None: 98 return _MISSING_IMPL 99 (output, exit_code) = res 100 101 if exit_code == 0: 102 if 'Open MPI' in output or 'OpenRTE' in output: 103 return _OMPI_IMPL 104 elif 'IBM Spectrum MPI' in output: 105 return _SMPI_IMPL 106 elif 'MPICH' in output: 107 return _MPICH_IMPL 108 elif 'Intel(R) MPI' in output: 109 return _IMPI_IMPL 110 111 print('Unknown MPI implementation given in output of mpirun --version:', file=sys.stderr) 112 print(output, file=sys.stderr) 113 return _UNKNOWN_IMPL 114 else: 115 print('Was unable to run {command}:'.format(command=command), file=sys.stderr) 116 print(output, file=sys.stderr) 117 return _MISSING_IMPL 118 119 120 def _get_mpi_implementation_flags(tcp_flag, env=None): 121 if is_open_mpi(env): 122 return list(_OMPI_FLAGS), list(_NO_BINDING_ARGS), _OMPI_IMPL 123 elif is_spectrum_mpi(env): 124 return (list(_SMPI_FLAGS_TCP) if tcp_flag else list(_SMPI_FLAGS)), list(_SOCKET_BINDING_ARGS), _SMPI_IMPL 125 elif is_mpich(env): 126 return list(_MPICH_FLAGS), list(_NO_BINDING_ARGS), _MPICH_IMPL 127 elif is_intel_mpi(env): 128 return list(_IMPI_FLAGS), [], _IMPI_IMPL 129 else: 130 return None, None, None 131 132 133 def mpi_run(settings, nics, env, command, stdout=None, stderr=None): 134 """ 135 Runs mpi_run. 136 137 Args: 138 settings: Settings for running MPI. 139 Note: settings.num_proc and settings.hosts must not be None. 140 nics: Interfaces to include by MPI. 141 env: Environment dictionary to use for running command. 142 command: Command and arguments to run as a list of string. 143 stdout: Stdout of the mpi process. 144 Only used when settings.run_func_mode is True. 145 stderr: Stderr of the mpi process. 146 Only used when settings.run_func_mode is True. 147 """ 148 if env is not None and not isinstance(env, dict): 149 raise Exception('env argument must be a dict, not {type}: {env}' 150 .format(type=type(env), env=env)) 151 152 mpi_impl_flags, impl_binding_args, mpi = _get_mpi_implementation_flags(settings.tcp_flag, env=env) 153 if mpi_impl_flags is None: 154 raise Exception(_MPI_NOT_FOUND_ERROR_MSG) 155 156 impi = _IMPI_IMPL == mpi 157 158 ssh_args = [] 159 if settings.ssh_port: 160 ssh_args += [f'-p {settings.ssh_port}'] 161 if settings.ssh_identity_file: 162 ssh_args += [f'-i {settings.ssh_identity_file}'] 163 164 mpi_ssh_args = '' 165 if ssh_args: 166 joined_ssh_args = ' '.join(ssh_args) 167 mpi_ssh_args = f'-bootstrap=ssh -bootstrap-exec-args \"{joined_ssh_args}\"' if impi else f'-mca plm_rsh_args \"{joined_ssh_args}\"' 168 169 tcp_intf_arg = '-mca btl_tcp_if_include {nics}'.format( 170 nics=','.join(nics)) if nics and not impi else '' 171 nccl_socket_intf_arg = '-{opt} NCCL_SOCKET_IFNAME={nics}'.format( 172 opt='genv' if impi else 'x', 173 nics=','.join(nics)) if nics else '' 174 175 # On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues 176 host_names, host_to_slots = hosts.parse_hosts_and_slots(settings.hosts) 177 if not impi and host_names and len(host_names) >= _LARGE_CLUSTER_THRESHOLD: 178 mpi_impl_flags.append('-mca plm_rsh_no_tree_spawn true') 179 mpi_impl_flags.append('-mca plm_rsh_num_concurrent {}'.format(len(host_names))) 180 181 # if user does not specify any hosts, mpirun by default uses local host. 182 # There is no need to specify localhost. 183 hosts_arg = '-{opt} {hosts}'.format(opt='hosts' if impi else 'H', 184 hosts=','.join(host_names) if host_names and impi else settings.hosts) 185 186 ppn_arg = ' ' 187 if host_to_slots and impi: 188 ppn = host_to_slots[host_names[0]] 189 for h_name in host_names[1:]: 190 if ppn != host_to_slots[h_name]: 191 raise Exception('''Different slots in -hosts parameter are not supported in Intel(R) MPI. 192 Use -machinefile <machine_file> for this purpose.''') 193 ppn_arg = ' -ppn {} '.format(ppn) 194 195 if settings.prefix_output_with_timestamp and not impi: 196 mpi_impl_flags.append('--timestamp-output') 197 198 binding_args = settings.binding_args if settings.binding_args and not impi else ' '.join(impl_binding_args) 199 200 basic_args = '-l' if impi else '--allow-run-as-root --tag-output' 201 202 output = [] 203 if settings.output_filename: 204 output.append('-outfile-pattern' if impi else '--output-filename') 205 output.append(settings.output_filename) 206 207 env_list = '' if impi else ' '.join( 208 '-x %s' % key for key in sorted(env.keys()) if env_util.is_exportable(key)) 209 210 # Pass all the env variables to the mpirun command. 211 mpirun_command = ( 212 'mpirun {basic_args} ' 213 '-np {num_proc}{ppn_arg}{hosts_arg} ' 214 '{binding_args} ' 215 '{mpi_args} ' 216 '{mpi_ssh_args} ' 217 '{tcp_intf_arg} ' 218 '{nccl_socket_intf_arg} ' 219 '{output_filename_arg} ' 220 '{env} {extra_mpi_args} {command}' # expect a lot of environment variables 221 .format(basic_args=basic_args, 222 num_proc=settings.num_proc, 223 ppn_arg=ppn_arg, 224 hosts_arg=hosts_arg, 225 binding_args=binding_args, 226 mpi_args=' '.join(mpi_impl_flags), 227 tcp_intf_arg=tcp_intf_arg, 228 nccl_socket_intf_arg=nccl_socket_intf_arg, 229 mpi_ssh_args=mpi_ssh_args, 230 output_filename_arg=' '.join(output), 231 env=env_list, 232 extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else '', 233 command=' '.join(quote(par) for par in command)) 234 ) 235 236 if settings.verbose >= 2: 237 print(mpirun_command) 238 239 # we need the driver's PATH and PYTHONPATH in env to run mpirun, 240 # env for mpirun is different to env encoded in mpirun_command 241 for var in ['PATH', 'PYTHONPATH']: 242 if var not in env and var in os.environ: 243 # copy env so we do not leak env modifications 244 env = copy.copy(env) 245 # copy var over from os.environ 246 env[var] = os.environ[var] 247 248 # Execute the mpirun command. 249 if settings.run_func_mode: 250 exit_code = safe_shell_exec.execute(mpirun_command, env=env, stdout=stdout, stderr=stderr) 251 if exit_code != 0: 252 raise RuntimeError("mpirun failed with exit code {exit_code}".format(exit_code=exit_code)) 253 else: 254 os.execve('/bin/sh', ['/bin/sh', '-c', mpirun_command], env) 255 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/horovod/runner/mpi_run.py b/horovod/runner/mpi_run.py --- a/horovod/runner/mpi_run.py +++ b/horovod/runner/mpi_run.py @@ -153,7 +153,7 @@ if mpi_impl_flags is None: raise Exception(_MPI_NOT_FOUND_ERROR_MSG) - impi = _IMPI_IMPL == mpi + impi_or_mpich = mpi in (_IMPI_IMPL, _MPICH_IMPL) ssh_args = [] if settings.ssh_port: @@ -164,27 +164,27 @@ mpi_ssh_args = '' if ssh_args: joined_ssh_args = ' '.join(ssh_args) - mpi_ssh_args = f'-bootstrap=ssh -bootstrap-exec-args \"{joined_ssh_args}\"' if impi else f'-mca plm_rsh_args \"{joined_ssh_args}\"' + mpi_ssh_args = f'-bootstrap=ssh -bootstrap-exec-args \"{joined_ssh_args}\"' if impi_or_mpich else f'-mca plm_rsh_args \"{joined_ssh_args}\"' tcp_intf_arg = '-mca btl_tcp_if_include {nics}'.format( - nics=','.join(nics)) if nics and not impi else '' + nics=','.join(nics)) if nics and not impi_or_mpich else '' nccl_socket_intf_arg = '-{opt} NCCL_SOCKET_IFNAME={nics}'.format( - opt='genv' if impi else 'x', + opt='genv' if impi_or_mpich else 'x', nics=','.join(nics)) if nics else '' # On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues host_names, host_to_slots = hosts.parse_hosts_and_slots(settings.hosts) - if not impi and host_names and len(host_names) >= _LARGE_CLUSTER_THRESHOLD: + if not impi_or_mpich and host_names and len(host_names) >= _LARGE_CLUSTER_THRESHOLD: mpi_impl_flags.append('-mca plm_rsh_no_tree_spawn true') mpi_impl_flags.append('-mca plm_rsh_num_concurrent {}'.format(len(host_names))) # if user does not specify any hosts, mpirun by default uses local host. # There is no need to specify localhost. - hosts_arg = '-{opt} {hosts}'.format(opt='hosts' if impi else 'H', - hosts=','.join(host_names) if host_names and impi else settings.hosts) + hosts_arg = '-{opt} {hosts}'.format(opt='hosts' if impi_or_mpich else 'H', + hosts=','.join(host_names) if host_names and impi_or_mpich else settings.hosts) ppn_arg = ' ' - if host_to_slots and impi: + if host_to_slots and impi_or_mpich: ppn = host_to_slots[host_names[0]] for h_name in host_names[1:]: if ppn != host_to_slots[h_name]: @@ -192,19 +192,19 @@ Use -machinefile <machine_file> for this purpose.''') ppn_arg = ' -ppn {} '.format(ppn) - if settings.prefix_output_with_timestamp and not impi: + if settings.prefix_output_with_timestamp and not impi_or_mpich: mpi_impl_flags.append('--timestamp-output') - binding_args = settings.binding_args if settings.binding_args and not impi else ' '.join(impl_binding_args) + binding_args = settings.binding_args if settings.binding_args and not impi_or_mpich else ' '.join(impl_binding_args) - basic_args = '-l' if impi else '--allow-run-as-root --tag-output' + basic_args = '-l' if impi_or_mpich else '--allow-run-as-root --tag-output' output = [] if settings.output_filename: - output.append('-outfile-pattern' if impi else '--output-filename') + output.append('-outfile-pattern' if impi_or_mpich else '--output-filename') output.append(settings.output_filename) - env_list = '' if impi else ' '.join( + env_list = '' if impi_or_mpich else ' '.join( '-x %s' % key for key in sorted(env.keys()) if env_util.is_exportable(key)) # Pass all the env variables to the mpirun command.
{"golden_diff": "diff --git a/horovod/runner/mpi_run.py b/horovod/runner/mpi_run.py\n--- a/horovod/runner/mpi_run.py\n+++ b/horovod/runner/mpi_run.py\n@@ -153,7 +153,7 @@\n if mpi_impl_flags is None:\n raise Exception(_MPI_NOT_FOUND_ERROR_MSG)\n \n- impi = _IMPI_IMPL == mpi\n+ impi_or_mpich = mpi in (_IMPI_IMPL, _MPICH_IMPL)\n \n ssh_args = []\n if settings.ssh_port:\n@@ -164,27 +164,27 @@\n mpi_ssh_args = ''\n if ssh_args:\n joined_ssh_args = ' '.join(ssh_args)\n- mpi_ssh_args = f'-bootstrap=ssh -bootstrap-exec-args \\\"{joined_ssh_args}\\\"' if impi else f'-mca plm_rsh_args \\\"{joined_ssh_args}\\\"'\n+ mpi_ssh_args = f'-bootstrap=ssh -bootstrap-exec-args \\\"{joined_ssh_args}\\\"' if impi_or_mpich else f'-mca plm_rsh_args \\\"{joined_ssh_args}\\\"'\n \n tcp_intf_arg = '-mca btl_tcp_if_include {nics}'.format(\n- nics=','.join(nics)) if nics and not impi else ''\n+ nics=','.join(nics)) if nics and not impi_or_mpich else ''\n nccl_socket_intf_arg = '-{opt} NCCL_SOCKET_IFNAME={nics}'.format(\n- opt='genv' if impi else 'x',\n+ opt='genv' if impi_or_mpich else 'x',\n nics=','.join(nics)) if nics else ''\n \n # On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues\n host_names, host_to_slots = hosts.parse_hosts_and_slots(settings.hosts)\n- if not impi and host_names and len(host_names) >= _LARGE_CLUSTER_THRESHOLD:\n+ if not impi_or_mpich and host_names and len(host_names) >= _LARGE_CLUSTER_THRESHOLD:\n mpi_impl_flags.append('-mca plm_rsh_no_tree_spawn true')\n mpi_impl_flags.append('-mca plm_rsh_num_concurrent {}'.format(len(host_names)))\n \n # if user does not specify any hosts, mpirun by default uses local host.\n # There is no need to specify localhost.\n- hosts_arg = '-{opt} {hosts}'.format(opt='hosts' if impi else 'H',\n- hosts=','.join(host_names) if host_names and impi else settings.hosts)\n+ hosts_arg = '-{opt} {hosts}'.format(opt='hosts' if impi_or_mpich else 'H',\n+ hosts=','.join(host_names) if host_names and impi_or_mpich else settings.hosts)\n \n ppn_arg = ' '\n- if host_to_slots and impi:\n+ if host_to_slots and impi_or_mpich:\n ppn = host_to_slots[host_names[0]]\n for h_name in host_names[1:]:\n if ppn != host_to_slots[h_name]:\n@@ -192,19 +192,19 @@\n Use -machinefile <machine_file> for this purpose.''')\n ppn_arg = ' -ppn {} '.format(ppn)\n \n- if settings.prefix_output_with_timestamp and not impi:\n+ if settings.prefix_output_with_timestamp and not impi_or_mpich:\n mpi_impl_flags.append('--timestamp-output')\n \n- binding_args = settings.binding_args if settings.binding_args and not impi else ' '.join(impl_binding_args)\n+ binding_args = settings.binding_args if settings.binding_args and not impi_or_mpich else ' '.join(impl_binding_args)\n \n- basic_args = '-l' if impi else '--allow-run-as-root --tag-output'\n+ basic_args = '-l' if impi_or_mpich else '--allow-run-as-root --tag-output'\n \n output = []\n if settings.output_filename:\n- output.append('-outfile-pattern' if impi else '--output-filename')\n+ output.append('-outfile-pattern' if impi_or_mpich else '--output-filename')\n output.append(settings.output_filename)\n \n- env_list = '' if impi else ' '.join(\n+ env_list = '' if impi_or_mpich else ' '.join(\n '-x %s' % key for key in sorted(env.keys()) if env_util.is_exportable(key))\n \n # Pass all the env variables to the mpirun command.\n", "issue": "Support MPICH when using horovodrun\nI'm using MPICH instead of OpenMPI. \r\nI'm able to train models using mpiexec and mpirun on my cluster but when I try to use horovodrun, it fails with the following error\r\n\r\n```python\r\n[ec2-user@master ~]$python3 ./horovodrun -np 8 python3 <train command>\r\n[[email protected]] match_arg (utils/args/args.c:159): unrecognized argument allow-run-as-root\r\n[[email protected]] HYDU_parse_array (utils/args/args.c:174): argument matching returned error\r\n[[email protected]] parse_args (ui/mpich/utils.c:1596): error parsing input array\r\n[[email protected]] HYD_uii_mpx_get_parameters (ui/mpich/utils.c:1648): unable to parse user arguments\r\n[[email protected]] main (ui/mpich/mpiexec.c:153): error parsing parameters\r\n```\r\n\r\nHorovod version: 0.18.2\r\nMPICH version: 3.2\r\n\r\nFrom looking at it, it looks like certain args are not available in MPICH.\r\nIs horovodrun compatible with MPICH?\nSupport MPICH when using horovodrun\nI'm using MPICH instead of OpenMPI. \r\nI'm able to train models using mpiexec and mpirun on my cluster but when I try to use horovodrun, it fails with the following error\r\n\r\n```python\r\n[ec2-user@master ~]$python3 ./horovodrun -np 8 python3 <train command>\r\n[[email protected]] match_arg (utils/args/args.c:159): unrecognized argument allow-run-as-root\r\n[[email protected]] HYDU_parse_array (utils/args/args.c:174): argument matching returned error\r\n[[email protected]] parse_args (ui/mpich/utils.c:1596): error parsing input array\r\n[[email protected]] HYD_uii_mpx_get_parameters (ui/mpich/utils.c:1648): unable to parse user arguments\r\n[[email protected]] main (ui/mpich/mpiexec.c:153): error parsing parameters\r\n```\r\n\r\nHorovod version: 0.18.2\r\nMPICH version: 3.2\r\n\r\nFrom looking at it, it looks like certain args are not available in MPICH.\r\nIs horovodrun compatible with MPICH?\n", "before_files": [{"content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport copy\nimport os\nimport sys\n\nfrom shlex import quote\n\nfrom horovod.runner.common.util import env as env_util, hosts, safe_shell_exec, tiny_shell_exec\n\n# MPI implementations\n_OMPI_IMPL = 'OpenMPI'\n_SMPI_IMPL = 'SpectrumMPI'\n_MPICH_IMPL = 'MPICH'\n_IMPI_IMPL = \"IntelMPI\"\n_UNKNOWN_IMPL = 'Unknown'\n_MISSING_IMPL = 'Missing'\n\n# Open MPI Flags\n_OMPI_FLAGS = ['-mca pml ob1', '-mca btl ^openib']\n# Spectrum MPI Flags\n_SMPI_FLAGS = []\n_SMPI_FLAGS_TCP = ['-tcp']\n# MPICH Flags\n_MPICH_FLAGS = []\n# Intel MPI Flags\n_IMPI_FLAGS = []\n\n# Threshold for large cluster MPI issues:\n_LARGE_CLUSTER_THRESHOLD = 64\n# No process binding args\n_NO_BINDING_ARGS = ['-bind-to none', '-map-by slot']\n# Process socket binding args\n_SOCKET_BINDING_ARGS = ['-bind-to socket', '-map-by socket', '-rank-by core']\n\n# MPI not found error message\n_MPI_NOT_FOUND_ERROR_MSG= ('horovod does not find an installed MPI.\\n\\n'\n 'Choose one of:\\n'\n '1. Install Open MPI 4.0.0+ or IBM Spectrum MPI or MPICH and re-install Horovod '\n '(use --no-cache-dir pip option).\\n'\n '2. Run distributed '\n 'training script using the standard way provided by your'\n ' MPI distribution (usually mpirun, srun, or jsrun).\\n'\n '3. Use built-in gloo option (horovodrun --gloo ...).')\n\n\ndef mpi_available(env=None):\n return _get_mpi_implementation(env) not in {_UNKNOWN_IMPL, _MISSING_IMPL}\n\n\ndef is_open_mpi(env=None):\n return _get_mpi_implementation(env) == _OMPI_IMPL\n\n\ndef is_spectrum_mpi(env=None):\n return _get_mpi_implementation(env) == _SMPI_IMPL\n\n\ndef is_mpich(env=None):\n return _get_mpi_implementation(env) == _MPICH_IMPL\n\n\ndef is_intel_mpi(env=None):\n return _get_mpi_implementation(env) == _IMPI_IMPL\n\n\ndef _get_mpi_implementation(env=None):\n \"\"\"\n Detects the available MPI implementation by invoking `mpirun --version`.\n This command is executed by the given execute function, which takes the\n command as the only argument and returns (output, exit code). Output\n represents the stdout and stderr as a string.\n\n Returns one of:\n - _OMPI_IMPL, _SMPI_IMPL, _MPICH_IMPL or _IMPI_IMPL for known implementations\n - _UNKNOWN_IMPL for any unknown implementation\n - _MISSING_IMPL if `mpirun --version` could not be executed.\n\n :param env: environment variable to use to run mpirun\n :return: string representing identified implementation\n \"\"\"\n command = 'mpirun --version'\n res = tiny_shell_exec.execute(command, env)\n if res is None:\n return _MISSING_IMPL\n (output, exit_code) = res\n\n if exit_code == 0:\n if 'Open MPI' in output or 'OpenRTE' in output:\n return _OMPI_IMPL\n elif 'IBM Spectrum MPI' in output:\n return _SMPI_IMPL\n elif 'MPICH' in output:\n return _MPICH_IMPL\n elif 'Intel(R) MPI' in output:\n return _IMPI_IMPL\n\n print('Unknown MPI implementation given in output of mpirun --version:', file=sys.stderr)\n print(output, file=sys.stderr)\n return _UNKNOWN_IMPL\n else:\n print('Was unable to run {command}:'.format(command=command), file=sys.stderr)\n print(output, file=sys.stderr)\n return _MISSING_IMPL\n\n\ndef _get_mpi_implementation_flags(tcp_flag, env=None):\n if is_open_mpi(env):\n return list(_OMPI_FLAGS), list(_NO_BINDING_ARGS), _OMPI_IMPL\n elif is_spectrum_mpi(env):\n return (list(_SMPI_FLAGS_TCP) if tcp_flag else list(_SMPI_FLAGS)), list(_SOCKET_BINDING_ARGS), _SMPI_IMPL\n elif is_mpich(env):\n return list(_MPICH_FLAGS), list(_NO_BINDING_ARGS), _MPICH_IMPL\n elif is_intel_mpi(env):\n return list(_IMPI_FLAGS), [], _IMPI_IMPL\n else:\n return None, None, None\n\n\ndef mpi_run(settings, nics, env, command, stdout=None, stderr=None):\n \"\"\"\n Runs mpi_run.\n\n Args:\n settings: Settings for running MPI.\n Note: settings.num_proc and settings.hosts must not be None.\n nics: Interfaces to include by MPI.\n env: Environment dictionary to use for running command.\n command: Command and arguments to run as a list of string.\n stdout: Stdout of the mpi process.\n Only used when settings.run_func_mode is True.\n stderr: Stderr of the mpi process.\n Only used when settings.run_func_mode is True.\n \"\"\"\n if env is not None and not isinstance(env, dict):\n raise Exception('env argument must be a dict, not {type}: {env}'\n .format(type=type(env), env=env))\n\n mpi_impl_flags, impl_binding_args, mpi = _get_mpi_implementation_flags(settings.tcp_flag, env=env)\n if mpi_impl_flags is None:\n raise Exception(_MPI_NOT_FOUND_ERROR_MSG)\n\n impi = _IMPI_IMPL == mpi\n\n ssh_args = []\n if settings.ssh_port:\n ssh_args += [f'-p {settings.ssh_port}']\n if settings.ssh_identity_file:\n ssh_args += [f'-i {settings.ssh_identity_file}']\n\n mpi_ssh_args = ''\n if ssh_args:\n joined_ssh_args = ' '.join(ssh_args)\n mpi_ssh_args = f'-bootstrap=ssh -bootstrap-exec-args \\\"{joined_ssh_args}\\\"' if impi else f'-mca plm_rsh_args \\\"{joined_ssh_args}\\\"'\n\n tcp_intf_arg = '-mca btl_tcp_if_include {nics}'.format(\n nics=','.join(nics)) if nics and not impi else ''\n nccl_socket_intf_arg = '-{opt} NCCL_SOCKET_IFNAME={nics}'.format(\n opt='genv' if impi else 'x',\n nics=','.join(nics)) if nics else ''\n\n # On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues\n host_names, host_to_slots = hosts.parse_hosts_and_slots(settings.hosts)\n if not impi and host_names and len(host_names) >= _LARGE_CLUSTER_THRESHOLD:\n mpi_impl_flags.append('-mca plm_rsh_no_tree_spawn true')\n mpi_impl_flags.append('-mca plm_rsh_num_concurrent {}'.format(len(host_names)))\n\n # if user does not specify any hosts, mpirun by default uses local host.\n # There is no need to specify localhost.\n hosts_arg = '-{opt} {hosts}'.format(opt='hosts' if impi else 'H',\n hosts=','.join(host_names) if host_names and impi else settings.hosts)\n\n ppn_arg = ' '\n if host_to_slots and impi:\n ppn = host_to_slots[host_names[0]]\n for h_name in host_names[1:]:\n if ppn != host_to_slots[h_name]:\n raise Exception('''Different slots in -hosts parameter are not supported in Intel(R) MPI.\n Use -machinefile <machine_file> for this purpose.''')\n ppn_arg = ' -ppn {} '.format(ppn)\n\n if settings.prefix_output_with_timestamp and not impi:\n mpi_impl_flags.append('--timestamp-output')\n\n binding_args = settings.binding_args if settings.binding_args and not impi else ' '.join(impl_binding_args)\n\n basic_args = '-l' if impi else '--allow-run-as-root --tag-output'\n\n output = []\n if settings.output_filename:\n output.append('-outfile-pattern' if impi else '--output-filename')\n output.append(settings.output_filename)\n\n env_list = '' if impi else ' '.join(\n '-x %s' % key for key in sorted(env.keys()) if env_util.is_exportable(key))\n\n # Pass all the env variables to the mpirun command.\n mpirun_command = (\n 'mpirun {basic_args} '\n '-np {num_proc}{ppn_arg}{hosts_arg} '\n '{binding_args} '\n '{mpi_args} '\n '{mpi_ssh_args} '\n '{tcp_intf_arg} '\n '{nccl_socket_intf_arg} '\n '{output_filename_arg} '\n '{env} {extra_mpi_args} {command}' # expect a lot of environment variables\n .format(basic_args=basic_args,\n num_proc=settings.num_proc,\n ppn_arg=ppn_arg,\n hosts_arg=hosts_arg,\n binding_args=binding_args,\n mpi_args=' '.join(mpi_impl_flags),\n tcp_intf_arg=tcp_intf_arg,\n nccl_socket_intf_arg=nccl_socket_intf_arg,\n mpi_ssh_args=mpi_ssh_args,\n output_filename_arg=' '.join(output),\n env=env_list,\n extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else '',\n command=' '.join(quote(par) for par in command))\n )\n\n if settings.verbose >= 2:\n print(mpirun_command)\n\n # we need the driver's PATH and PYTHONPATH in env to run mpirun,\n # env for mpirun is different to env encoded in mpirun_command\n for var in ['PATH', 'PYTHONPATH']:\n if var not in env and var in os.environ:\n # copy env so we do not leak env modifications\n env = copy.copy(env)\n # copy var over from os.environ\n env[var] = os.environ[var]\n\n # Execute the mpirun command.\n if settings.run_func_mode:\n exit_code = safe_shell_exec.execute(mpirun_command, env=env, stdout=stdout, stderr=stderr)\n if exit_code != 0:\n raise RuntimeError(\"mpirun failed with exit code {exit_code}\".format(exit_code=exit_code))\n else:\n os.execve('/bin/sh', ['/bin/sh', '-c', mpirun_command], env)\n", "path": "horovod/runner/mpi_run.py"}], "after_files": [{"content": "# Copyright 2019 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport copy\nimport os\nimport sys\n\nfrom shlex import quote\n\nfrom horovod.runner.common.util import env as env_util, hosts, safe_shell_exec, tiny_shell_exec\n\n# MPI implementations\n_OMPI_IMPL = 'OpenMPI'\n_SMPI_IMPL = 'SpectrumMPI'\n_MPICH_IMPL = 'MPICH'\n_IMPI_IMPL = \"IntelMPI\"\n_UNKNOWN_IMPL = 'Unknown'\n_MISSING_IMPL = 'Missing'\n\n# Open MPI Flags\n_OMPI_FLAGS = ['-mca pml ob1', '-mca btl ^openib']\n# Spectrum MPI Flags\n_SMPI_FLAGS = []\n_SMPI_FLAGS_TCP = ['-tcp']\n# MPICH Flags\n_MPICH_FLAGS = []\n# Intel MPI Flags\n_IMPI_FLAGS = []\n\n# Threshold for large cluster MPI issues:\n_LARGE_CLUSTER_THRESHOLD = 64\n# No process binding args\n_NO_BINDING_ARGS = ['-bind-to none', '-map-by slot']\n# Process socket binding args\n_SOCKET_BINDING_ARGS = ['-bind-to socket', '-map-by socket', '-rank-by core']\n\n# MPI not found error message\n_MPI_NOT_FOUND_ERROR_MSG= ('horovod does not find an installed MPI.\\n\\n'\n 'Choose one of:\\n'\n '1. Install Open MPI 4.0.0+ or IBM Spectrum MPI or MPICH and re-install Horovod '\n '(use --no-cache-dir pip option).\\n'\n '2. Run distributed '\n 'training script using the standard way provided by your'\n ' MPI distribution (usually mpirun, srun, or jsrun).\\n'\n '3. Use built-in gloo option (horovodrun --gloo ...).')\n\n\ndef mpi_available(env=None):\n return _get_mpi_implementation(env) not in {_UNKNOWN_IMPL, _MISSING_IMPL}\n\n\ndef is_open_mpi(env=None):\n return _get_mpi_implementation(env) == _OMPI_IMPL\n\n\ndef is_spectrum_mpi(env=None):\n return _get_mpi_implementation(env) == _SMPI_IMPL\n\n\ndef is_mpich(env=None):\n return _get_mpi_implementation(env) == _MPICH_IMPL\n\n\ndef is_intel_mpi(env=None):\n return _get_mpi_implementation(env) == _IMPI_IMPL\n\n\ndef _get_mpi_implementation(env=None):\n \"\"\"\n Detects the available MPI implementation by invoking `mpirun --version`.\n This command is executed by the given execute function, which takes the\n command as the only argument and returns (output, exit code). Output\n represents the stdout and stderr as a string.\n\n Returns one of:\n - _OMPI_IMPL, _SMPI_IMPL, _MPICH_IMPL or _IMPI_IMPL for known implementations\n - _UNKNOWN_IMPL for any unknown implementation\n - _MISSING_IMPL if `mpirun --version` could not be executed.\n\n :param env: environment variable to use to run mpirun\n :return: string representing identified implementation\n \"\"\"\n command = 'mpirun --version'\n res = tiny_shell_exec.execute(command, env)\n if res is None:\n return _MISSING_IMPL\n (output, exit_code) = res\n\n if exit_code == 0:\n if 'Open MPI' in output or 'OpenRTE' in output:\n return _OMPI_IMPL\n elif 'IBM Spectrum MPI' in output:\n return _SMPI_IMPL\n elif 'MPICH' in output:\n return _MPICH_IMPL\n elif 'Intel(R) MPI' in output:\n return _IMPI_IMPL\n\n print('Unknown MPI implementation given in output of mpirun --version:', file=sys.stderr)\n print(output, file=sys.stderr)\n return _UNKNOWN_IMPL\n else:\n print('Was unable to run {command}:'.format(command=command), file=sys.stderr)\n print(output, file=sys.stderr)\n return _MISSING_IMPL\n\n\ndef _get_mpi_implementation_flags(tcp_flag, env=None):\n if is_open_mpi(env):\n return list(_OMPI_FLAGS), list(_NO_BINDING_ARGS), _OMPI_IMPL\n elif is_spectrum_mpi(env):\n return (list(_SMPI_FLAGS_TCP) if tcp_flag else list(_SMPI_FLAGS)), list(_SOCKET_BINDING_ARGS), _SMPI_IMPL\n elif is_mpich(env):\n return list(_MPICH_FLAGS), list(_NO_BINDING_ARGS), _MPICH_IMPL\n elif is_intel_mpi(env):\n return list(_IMPI_FLAGS), [], _IMPI_IMPL\n else:\n return None, None, None\n\n\ndef mpi_run(settings, nics, env, command, stdout=None, stderr=None):\n \"\"\"\n Runs mpi_run.\n\n Args:\n settings: Settings for running MPI.\n Note: settings.num_proc and settings.hosts must not be None.\n nics: Interfaces to include by MPI.\n env: Environment dictionary to use for running command.\n command: Command and arguments to run as a list of string.\n stdout: Stdout of the mpi process.\n Only used when settings.run_func_mode is True.\n stderr: Stderr of the mpi process.\n Only used when settings.run_func_mode is True.\n \"\"\"\n if env is not None and not isinstance(env, dict):\n raise Exception('env argument must be a dict, not {type}: {env}'\n .format(type=type(env), env=env))\n\n mpi_impl_flags, impl_binding_args, mpi = _get_mpi_implementation_flags(settings.tcp_flag, env=env)\n if mpi_impl_flags is None:\n raise Exception(_MPI_NOT_FOUND_ERROR_MSG)\n\n impi_or_mpich = mpi in (_IMPI_IMPL, _MPICH_IMPL)\n\n ssh_args = []\n if settings.ssh_port:\n ssh_args += [f'-p {settings.ssh_port}']\n if settings.ssh_identity_file:\n ssh_args += [f'-i {settings.ssh_identity_file}']\n\n mpi_ssh_args = ''\n if ssh_args:\n joined_ssh_args = ' '.join(ssh_args)\n mpi_ssh_args = f'-bootstrap=ssh -bootstrap-exec-args \\\"{joined_ssh_args}\\\"' if impi_or_mpich else f'-mca plm_rsh_args \\\"{joined_ssh_args}\\\"'\n\n tcp_intf_arg = '-mca btl_tcp_if_include {nics}'.format(\n nics=','.join(nics)) if nics and not impi_or_mpich else ''\n nccl_socket_intf_arg = '-{opt} NCCL_SOCKET_IFNAME={nics}'.format(\n opt='genv' if impi_or_mpich else 'x',\n nics=','.join(nics)) if nics else ''\n\n # On large cluster runs (e.g. Summit), we need extra settings to work around OpenMPI issues\n host_names, host_to_slots = hosts.parse_hosts_and_slots(settings.hosts)\n if not impi_or_mpich and host_names and len(host_names) >= _LARGE_CLUSTER_THRESHOLD:\n mpi_impl_flags.append('-mca plm_rsh_no_tree_spawn true')\n mpi_impl_flags.append('-mca plm_rsh_num_concurrent {}'.format(len(host_names)))\n\n # if user does not specify any hosts, mpirun by default uses local host.\n # There is no need to specify localhost.\n hosts_arg = '-{opt} {hosts}'.format(opt='hosts' if impi_or_mpich else 'H',\n hosts=','.join(host_names) if host_names and impi_or_mpich else settings.hosts)\n\n ppn_arg = ' '\n if host_to_slots and impi_or_mpich:\n ppn = host_to_slots[host_names[0]]\n for h_name in host_names[1:]:\n if ppn != host_to_slots[h_name]:\n raise Exception('''Different slots in -hosts parameter are not supported in Intel(R) MPI.\n Use -machinefile <machine_file> for this purpose.''')\n ppn_arg = ' -ppn {} '.format(ppn)\n\n if settings.prefix_output_with_timestamp and not impi_or_mpich:\n mpi_impl_flags.append('--timestamp-output')\n\n binding_args = settings.binding_args if settings.binding_args and not impi_or_mpich else ' '.join(impl_binding_args)\n\n basic_args = '-l' if impi_or_mpich else '--allow-run-as-root --tag-output'\n\n output = []\n if settings.output_filename:\n output.append('-outfile-pattern' if impi_or_mpich else '--output-filename')\n output.append(settings.output_filename)\n\n env_list = '' if impi_or_mpich else ' '.join(\n '-x %s' % key for key in sorted(env.keys()) if env_util.is_exportable(key))\n\n # Pass all the env variables to the mpirun command.\n mpirun_command = (\n 'mpirun {basic_args} '\n '-np {num_proc}{ppn_arg}{hosts_arg} '\n '{binding_args} '\n '{mpi_args} '\n '{mpi_ssh_args} '\n '{tcp_intf_arg} '\n '{nccl_socket_intf_arg} '\n '{output_filename_arg} '\n '{env} {extra_mpi_args} {command}' # expect a lot of environment variables\n .format(basic_args=basic_args,\n num_proc=settings.num_proc,\n ppn_arg=ppn_arg,\n hosts_arg=hosts_arg,\n binding_args=binding_args,\n mpi_args=' '.join(mpi_impl_flags),\n tcp_intf_arg=tcp_intf_arg,\n nccl_socket_intf_arg=nccl_socket_intf_arg,\n mpi_ssh_args=mpi_ssh_args,\n output_filename_arg=' '.join(output),\n env=env_list,\n extra_mpi_args=settings.extra_mpi_args if settings.extra_mpi_args else '',\n command=' '.join(quote(par) for par in command))\n )\n\n if settings.verbose >= 2:\n print(mpirun_command)\n\n # we need the driver's PATH and PYTHONPATH in env to run mpirun,\n # env for mpirun is different to env encoded in mpirun_command\n for var in ['PATH', 'PYTHONPATH']:\n if var not in env and var in os.environ:\n # copy env so we do not leak env modifications\n env = copy.copy(env)\n # copy var over from os.environ\n env[var] = os.environ[var]\n\n # Execute the mpirun command.\n if settings.run_func_mode:\n exit_code = safe_shell_exec.execute(mpirun_command, env=env, stdout=stdout, stderr=stderr)\n if exit_code != 0:\n raise RuntimeError(\"mpirun failed with exit code {exit_code}\".format(exit_code=exit_code))\n else:\n os.execve('/bin/sh', ['/bin/sh', '-c', mpirun_command], env)\n", "path": "horovod/runner/mpi_run.py"}]}
3,896
1,004
gh_patches_debug_20149
rasdani/github-patches
git_diff
hydroshare__hydroshare-5219
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- User account deletion doesn't remove resources from SOLR index **Description of the bug** Deleting a user in mezzanine also cascades to delete the user's resources. However it looks like the resources are not removed from the SOLR index. They still show up in discover. Steps to reproduce the bug: 1. make a new user account 2. add a resource and make it discoverable 3. login as admin user and delete the account that you created in step 1 (via the mezzanine admin panel) 4. See that the resource listing persists on the Discover search page **Expected behavior** User account deletion should remove the user's resources from the SOLR index **Additional information** HS v 2.9.2 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `hs_core/hydro_realtime_signal_processor.py` Content: ``` 1 import logging 2 3 from django.conf import settings 4 from django.db import models 5 from hs_core.models import Date, BaseResource 6 from hs_access_control.models import ResourceAccess 7 from haystack.exceptions import NotHandled 8 from haystack.signals import BaseSignalProcessor 9 10 logger = logging.getLogger(__name__) 11 12 13 class HydroRealtimeSignalProcessor(BaseSignalProcessor): 14 """ 15 Notes: 16 1. We assume everytime metadata is updated the modified datetime is updated 17 2. ResourceAccess does not update the modified datetime (it is not scientific metadata) 18 """ 19 20 def setup(self): 21 if not getattr(settings, "DISABLE_HAYSTACK", False): 22 models.signals.post_save.connect(self.handle_update, sender=Date) 23 models.signals.post_save.connect(self.handle_access, sender=ResourceAccess) 24 25 def teardown(self): 26 if not getattr(settings, "DISABLE_HAYSTACK", False): 27 models.signals.post_save.disconnect(self.handle_update, sender=Date) 28 models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess) 29 30 def handle_update(self, sender, instance, **kwargs): 31 try: 32 # resolve the BaseResource corresponding to the metadata element. 33 newbase = instance.metadata.resource 34 index_resource(self, newbase) 35 except Exception as e: 36 logger.exception("{} exception: {}".format(type(instance), str(e))) 37 38 def handle_access(self, sender, instance, **kwargs): 39 try: 40 newbase = instance.resource 41 index_resource(self, newbase) 42 except Exception as e: 43 logger.exception("{} exception: {}".format(type(instance), str(e))) 44 45 46 def index_resource(signal_processor, instance: BaseResource): 47 if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'): 48 # work around for failure of super(BaseResource, instance) to work properly. 49 # this always succeeds because this is a post-save object action. 50 newbase = BaseResource.objects.get(pk=instance.pk) 51 newsender = BaseResource 52 using_backends = signal_processor.connection_router.for_write(instance=newbase) 53 for using in using_backends: 54 # if object is public/discoverable or becoming public/discoverable, index it 55 # test whether the object should be exposed. 56 if instance.show_in_discover: 57 try: 58 index = signal_processor.connections[using].get_unified_index().get_index(newsender) 59 index.update_object(newbase, using=using) 60 except NotHandled: 61 logger.exception("Failure: changes to %s with short_id %s not added to Solr Index.", 62 str(type(instance)), newbase.short_id) 63 64 # if object is private or becoming private, delete from index 65 else: # not to be shown in discover 66 try: 67 index = signal_processor.connections[using].get_unified_index().get_index(newsender) 68 index.remove_object(newbase, using=using) 69 except NotHandled: 70 logger.exception("Failure: delete of %s with short_id %s failed.", 71 str(type(instance)), newbase.short_id) 72 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/hs_core/hydro_realtime_signal_processor.py b/hs_core/hydro_realtime_signal_processor.py --- a/hs_core/hydro_realtime_signal_processor.py +++ b/hs_core/hydro_realtime_signal_processor.py @@ -21,11 +21,13 @@ if not getattr(settings, "DISABLE_HAYSTACK", False): models.signals.post_save.connect(self.handle_update, sender=Date) models.signals.post_save.connect(self.handle_access, sender=ResourceAccess) + models.signals.post_delete.connect(self.handle_delete, sender=BaseResource) def teardown(self): if not getattr(settings, "DISABLE_HAYSTACK", False): models.signals.post_save.disconnect(self.handle_update, sender=Date) models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess) + models.signals.post_delete.disconnect(self.handle_delete, sender=BaseResource) def handle_update(self, sender, instance, **kwargs): try:
{"golden_diff": "diff --git a/hs_core/hydro_realtime_signal_processor.py b/hs_core/hydro_realtime_signal_processor.py\n--- a/hs_core/hydro_realtime_signal_processor.py\n+++ b/hs_core/hydro_realtime_signal_processor.py\n@@ -21,11 +21,13 @@\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.connect(self.handle_update, sender=Date)\n models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)\n+ models.signals.post_delete.connect(self.handle_delete, sender=BaseResource)\n \n def teardown(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.disconnect(self.handle_update, sender=Date)\n models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)\n+ models.signals.post_delete.disconnect(self.handle_delete, sender=BaseResource)\n \n def handle_update(self, sender, instance, **kwargs):\n try:\n", "issue": "User account deletion doesn't remove resources from SOLR index\n**Description of the bug**\r\nDeleting a user in mezzanine also cascades to delete the user's resources. However it looks like the resources are not removed from the SOLR index. They still show up in discover.\r\n\r\nSteps to reproduce the bug:\r\n1. make a new user account\r\n2. add a resource and make it discoverable\r\n3. login as admin user and delete the account that you created in step 1 (via the mezzanine admin panel)\r\n4. See that the resource listing persists on the Discover search page\r\n\r\n**Expected behavior**\r\nUser account deletion should remove the user's resources from the SOLR index\r\n\r\n**Additional information**\r\nHS v 2.9.2\r\n\n", "before_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.db import models\nfrom hs_core.models import Date, BaseResource\nfrom hs_access_control.models import ResourceAccess\nfrom haystack.exceptions import NotHandled\nfrom haystack.signals import BaseSignalProcessor\n\nlogger = logging.getLogger(__name__)\n\n\nclass HydroRealtimeSignalProcessor(BaseSignalProcessor):\n \"\"\"\n Notes:\n 1. We assume everytime metadata is updated the modified datetime is updated\n 2. ResourceAccess does not update the modified datetime (it is not scientific metadata)\n \"\"\"\n\n def setup(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.connect(self.handle_update, sender=Date)\n models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)\n\n def teardown(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.disconnect(self.handle_update, sender=Date)\n models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)\n\n def handle_update(self, sender, instance, **kwargs):\n try:\n # resolve the BaseResource corresponding to the metadata element.\n newbase = instance.metadata.resource\n index_resource(self, newbase)\n except Exception as e:\n logger.exception(\"{} exception: {}\".format(type(instance), str(e)))\n\n def handle_access(self, sender, instance, **kwargs):\n try:\n newbase = instance.resource\n index_resource(self, newbase)\n except Exception as e:\n logger.exception(\"{} exception: {}\".format(type(instance), str(e)))\n\n\ndef index_resource(signal_processor, instance: BaseResource):\n if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'):\n # work around for failure of super(BaseResource, instance) to work properly.\n # this always succeeds because this is a post-save object action.\n newbase = BaseResource.objects.get(pk=instance.pk)\n newsender = BaseResource\n using_backends = signal_processor.connection_router.for_write(instance=newbase)\n for using in using_backends:\n # if object is public/discoverable or becoming public/discoverable, index it\n # test whether the object should be exposed.\n if instance.show_in_discover:\n try:\n index = signal_processor.connections[using].get_unified_index().get_index(newsender)\n index.update_object(newbase, using=using)\n except NotHandled:\n logger.exception(\"Failure: changes to %s with short_id %s not added to Solr Index.\",\n str(type(instance)), newbase.short_id)\n\n # if object is private or becoming private, delete from index\n else: # not to be shown in discover\n try:\n index = signal_processor.connections[using].get_unified_index().get_index(newsender)\n index.remove_object(newbase, using=using)\n except NotHandled:\n logger.exception(\"Failure: delete of %s with short_id %s failed.\",\n str(type(instance)), newbase.short_id)\n", "path": "hs_core/hydro_realtime_signal_processor.py"}], "after_files": [{"content": "import logging\n\nfrom django.conf import settings\nfrom django.db import models\nfrom hs_core.models import Date, BaseResource\nfrom hs_access_control.models import ResourceAccess\nfrom haystack.exceptions import NotHandled\nfrom haystack.signals import BaseSignalProcessor\n\nlogger = logging.getLogger(__name__)\n\n\nclass HydroRealtimeSignalProcessor(BaseSignalProcessor):\n \"\"\"\n Notes:\n 1. We assume everytime metadata is updated the modified datetime is updated\n 2. ResourceAccess does not update the modified datetime (it is not scientific metadata)\n \"\"\"\n\n def setup(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.connect(self.handle_update, sender=Date)\n models.signals.post_save.connect(self.handle_access, sender=ResourceAccess)\n models.signals.post_delete.connect(self.handle_delete, sender=BaseResource)\n\n def teardown(self):\n if not getattr(settings, \"DISABLE_HAYSTACK\", False):\n models.signals.post_save.disconnect(self.handle_update, sender=Date)\n models.signals.post_save.disconnect(self.handle_access, sender=ResourceAccess)\n models.signals.post_delete.disconnect(self.handle_delete, sender=BaseResource)\n\n def handle_update(self, sender, instance, **kwargs):\n try:\n # resolve the BaseResource corresponding to the metadata element.\n newbase = instance.metadata.resource\n index_resource(self, newbase)\n except Exception as e:\n logger.exception(\"{} exception: {}\".format(type(instance), str(e)))\n\n def handle_access(self, sender, instance, **kwargs):\n try:\n newbase = instance.resource\n index_resource(self, newbase)\n except Exception as e:\n logger.exception(\"{} exception: {}\".format(type(instance), str(e)))\n\n\ndef index_resource(signal_processor, instance: BaseResource):\n if hasattr(instance, 'raccess') and hasattr(instance, 'metadata'):\n # work around for failure of super(BaseResource, instance) to work properly.\n # this always succeeds because this is a post-save object action.\n newbase = BaseResource.objects.get(pk=instance.pk)\n newsender = BaseResource\n using_backends = signal_processor.connection_router.for_write(instance=newbase)\n for using in using_backends:\n # if object is public/discoverable or becoming public/discoverable, index it\n # test whether the object should be exposed.\n if instance.show_in_discover:\n try:\n index = signal_processor.connections[using].get_unified_index().get_index(newsender)\n index.update_object(newbase, using=using)\n except NotHandled:\n logger.exception(\"Failure: changes to %s with short_id %s not added to Solr Index.\",\n str(type(instance)), newbase.short_id)\n\n # if object is private or becoming private, delete from index\n else: # not to be shown in discover\n try:\n index = signal_processor.connections[using].get_unified_index().get_index(newsender)\n index.remove_object(newbase, using=using)\n except NotHandled:\n logger.exception(\"Failure: delete of %s with short_id %s failed.\",\n str(type(instance)), newbase.short_id)\n", "path": "hs_core/hydro_realtime_signal_processor.py"}]}
1,182
207
gh_patches_debug_17183
rasdani/github-patches
git_diff
mindsdb__mindsdb-957
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Cant get GUI if `storage_dir` is relative path in config If specify in config.json storage_dir as relative path, then `GET /index.html` return 404. With absolute paths all work fine. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `mindsdb/utilities/config.py` Content: ``` 1 import os 2 import json 3 import hashlib 4 import datetime 5 6 7 default_config = { 8 "log": { 9 "level": { 10 "console": "ERROR", 11 "file": "WARNING" 12 } 13 }, 14 "debug": False, 15 "integrations": {}, 16 "api": { 17 "http": { 18 "host": "127.0.0.1", 19 "port": "47334" 20 }, 21 "mysql": { 22 "host": "127.0.0.1", 23 "password": "", 24 "port": "47335", 25 "user": "mindsdb", 26 "database": "mindsdb", 27 "ssl": True 28 }, 29 "mongodb": { 30 "host": "127.0.0.1", 31 "port": "47336", 32 "database": "mindsdb" 33 } 34 } 35 } 36 37 38 class Config(object): 39 current_version = '1.3' 40 _config = {} 41 paths = { 42 'root': '', 43 'datasources': '', 44 'predictors': '', 45 'static': '', 46 'tmp': '', 47 'log': '', 48 'obsolete': { 49 'predictors': '', 50 'datasources': '' 51 } 52 } 53 versions = {} 54 55 def __init__(self, config_path): 56 self._config_path = None 57 self._config_hash = None 58 self._config = None 59 if isinstance(config_path, str): 60 self.config_path = config_path 61 self._read() 62 self._config_hash = self._gen_hash() 63 64 storage_dir = self._config['storage_dir'] 65 if os.path.isabs(storage_dir) is False: 66 storage_dir = os.path.normpath( 67 os.path.join( 68 os.path.dirname(config_path), 69 storage_dir 70 ) 71 ) 72 self.paths['root'] = storage_dir 73 self.paths['datasources'] = os.path.join(storage_dir, 'datasources') 74 self.paths['predictors'] = os.path.join(storage_dir, 'predictors') 75 self.paths['static'] = os.path.join(storage_dir, 'static') 76 self.paths['tmp'] = os.path.join(storage_dir, 'tmp') 77 self.paths['log'] = os.path.join(storage_dir, 'log') 78 self.paths['obsolete']['predictors'] = os.path.join(storage_dir, 'obsolete', 'predictors') 79 self.paths['obsolete']['datasources'] = os.path.join(storage_dir, 'obsolete', 'datasources') 80 81 self._read_versions_file(os.path.join(self.paths['root'], 'versions.json')) 82 else: 83 raise TypeError('Argument must be string representing a file path <Later on to be switched to file path and/or database connection info>') 84 85 def _read_versions_file(self, path): 86 if os.path.isfile(path): 87 with open(path, 'rt') as f: 88 self.versions = json.loads(f.read()) 89 90 def _migrate(self): 91 def m1_0(config): 92 if 'default_clickhouse' in config['integrations'] and 'type' not in config['integrations']['default_clickhouse']: 93 config['integrations']['default_clickhouse']['type'] = 'clickhouse' 94 if 'default_mariadb' in config['integrations'] and 'type' not in config['integrations']['default_mariadb']: 95 config['integrations']['default_mariadb']['type'] = 'mariadb' 96 if 'datasources' in config['api']['mysql']: 97 del config['api']['mysql']['datasources'] 98 config['config_version'] = '1.1' 99 return config 100 101 def m1_1(config): 102 import tempfile 103 import shutil 104 from pathlib import Path 105 106 ds_storage_path = Path(config['interface']['datastore']['storage_dir']) 107 mdb_storage_path = Path(config['interface']['mindsdb_native']['storage_dir']) 108 109 temp_dir_path = tempfile.mkdtemp() 110 111 if ds_storage_path.is_dir(): 112 shutil.move( 113 str(ds_storage_path), 114 temp_dir_path 115 ) 116 117 ds_storage_path.mkdir(mode=0o777, exist_ok=True, parents=True) 118 119 if Path(temp_dir_path).joinpath('datastore').is_dir(): 120 shutil.move( 121 str(Path(temp_dir_path).joinpath('datastore')), 122 str(ds_storage_path.joinpath('datasources')) 123 ) 124 else: 125 ds_storage_path.joinpath('datasources').mkdir(mode=0o777, exist_ok=True) 126 127 if ds_storage_path == mdb_storage_path: 128 shutil.move( 129 str(Path(temp_dir_path)), 130 str(ds_storage_path.joinpath('predictors')) 131 ) 132 elif mdb_storage_path.is_dir(): 133 shutil.move( 134 str(mdb_storage_path), 135 str(ds_storage_path.joinpath('predictors')) 136 ) 137 else: 138 mdb_storage_path.joinpath('predictors').mkdir(mode=0o777, exist_ok=True) 139 140 ds_storage_path.joinpath('tmp').mkdir(mode=0o777, exist_ok=True) 141 ds_storage_path.joinpath('static').mkdir(mode=0o777, exist_ok=True) 142 143 if Path(temp_dir_path).is_dir(): 144 shutil.rmtree(temp_dir_path) 145 146 config['storage_dir'] = str(ds_storage_path) 147 del config['interface']['datastore']['storage_dir'] 148 del config['interface']['mindsdb_native']['storage_dir'] 149 config['config_version'] = '1.2' 150 return config 151 152 def m1_2(config): 153 ''' remove no longer needed fields 154 ''' 155 try: 156 del config['api']['mysql']['log'] 157 except Exception: 158 pass 159 160 try: 161 del config['interface'] 162 except Exception: 163 pass 164 165 if 'pip_path' in config and config['pip_path'] is None: 166 del config['pip_path'] 167 168 if 'python_interpreter' in config and config['python_interpreter'] is None: 169 del config['python_interpreter'] 170 171 config['config_version'] = '1.3' 172 return config 173 174 migrations = { 175 '1.0': m1_0, 176 '1.1': m1_1, 177 '1.2': m1_2 178 } 179 180 current_version = self._parse_version(self._config['config_version']) 181 target_version = self._parse_version(self.current_version) 182 while current_version < target_version: 183 str_version = '.'.join([str(x) for x in current_version]) 184 self._config = migrations[str_version](self._config) 185 current_version = self._parse_version(self._config['config_version']) 186 187 def _validate(self): 188 integrations = self._config.get('integrations', {}) 189 for key, value in integrations.items(): 190 if not isinstance(value, dict): 191 raise TypeError(f"Config error: integration '{key}' must be a json") 192 if 'type' not in integrations[key]: 193 raise KeyError(f"Config error: for integration '{key}' key 'type' must be specified") 194 195 storage_dir = self._config.get('storage_dir') 196 if storage_dir is None: 197 raise KeyError("'storage_dir' mandatory key in config") 198 199 def _parse_version(self, version): 200 if isinstance(version, str): 201 version = [int(x) for x in version.split('.')] 202 elif isinstance(version, int): 203 version = [version] 204 if len(version) == 1: 205 version.append(0) 206 return version 207 208 def _format(self): 209 ''' changing user input to formalised view 210 ''' 211 for integration in self._config.get('integrations', {}).values(): 212 password = integration.get('password') 213 password = '' if password is None else str(password) 214 integration['password'] = str(password) 215 216 password = self._config['api']['mysql'].get('password') 217 password = '' if password is None else str(password) 218 self._config['api']['mysql']['password'] = str(password) 219 220 def _merge_default_config(self): 221 def merge_key_recursive(target_dict, source_dict, key): 222 if key not in target_dict: 223 target_dict[key] = source_dict[key] 224 elif isinstance(target_dict[key], dict) and isinstance(source_dict[key], dict): 225 for k in source_dict[key]: 226 merge_key_recursive(target_dict[key], source_dict[key], k) 227 228 for key in default_config: 229 merge_key_recursive(self._config, default_config, key) 230 231 def _read(self): 232 if isinstance(self.config_path, str) and os.path.isfile(self.config_path): 233 with open(self.config_path, 'r') as fp: 234 self._config = json.load(fp) 235 if self._parse_version(self._config['config_version']) < self._parse_version(self.current_version): 236 self._migrate() 237 self._save() 238 self._validate() 239 self._format() 240 self._merge_default_config() 241 else: 242 raise TypeError('`self.config_path` must be a string representing a local file path to a json config') 243 244 def _save(self): 245 with open(self.config_path, 'w') as fp: 246 json.dump(self._config, fp, indent=4, sort_keys=True) 247 248 def _gen_hash(self): 249 with open(self.config_path, 'rb') as fp: 250 return hashlib.md5(fp.read()).hexdigest() 251 252 def _set_updated(self, key): 253 # Only check this for dynamically generated keys, won't be needed once we switch to using a database here 254 if key in ['integrations']: 255 file_hash = self._gen_hash() 256 if file_hash != self._config_hash: 257 self._read() 258 self._config_hash = self._gen_hash() 259 260 def __getitem__(self, key): 261 self._set_updated(key) 262 return self._config[key] 263 264 def get(self, key, default=None): 265 self._set_updated(key) 266 return self._config.get(key, default) 267 268 def get_all(self): 269 return self._config 270 271 def set(self, key_chain, value, delete=False): 272 with open(self.config_path, 'r') as fp: 273 self._config = json.load(fp) 274 275 c = self._config 276 for i, k in enumerate(key_chain): 277 if k in c and i + 1 < len(key_chain): 278 c = c[k] 279 elif k not in c and i + 1 < len(key_chain): 280 c[k] = {} 281 c = c[k] 282 else: 283 if delete: 284 del c[k] 285 else: 286 c[k] = value 287 self._save() 288 289 # Higher level interface 290 def add_db_integration(self, name, dict): 291 dict['date_last_update'] = str(datetime.datetime.now()).split('.')[0] 292 if 'database_name' not in dict: 293 dict['database_name'] = name 294 if 'enabled' not in dict: 295 dict['enabled'] = True 296 297 self.set(['integrations', name], dict) 298 299 def modify_db_integration(self, name, dict): 300 old_dict = self._config['integrations'][name] 301 for k in old_dict: 302 if k not in dict: 303 dict[k] = old_dict[k] 304 305 self.add_db_integration(name, dict) 306 307 def remove_db_integration(self, name): 308 self.set(['integrations', name], None, True) 309 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py --- a/mindsdb/utilities/config.py +++ b/mindsdb/utilities/config.py @@ -57,7 +57,7 @@ self._config_hash = None self._config = None if isinstance(config_path, str): - self.config_path = config_path + self.config_path = os.path.abspath(config_path) self._read() self._config_hash = self._gen_hash() @@ -65,7 +65,7 @@ if os.path.isabs(storage_dir) is False: storage_dir = os.path.normpath( os.path.join( - os.path.dirname(config_path), + os.path.dirname(self.config_path), storage_dir ) )
{"golden_diff": "diff --git a/mindsdb/utilities/config.py b/mindsdb/utilities/config.py\n--- a/mindsdb/utilities/config.py\n+++ b/mindsdb/utilities/config.py\n@@ -57,7 +57,7 @@\n self._config_hash = None\n self._config = None\n if isinstance(config_path, str):\n- self.config_path = config_path\n+ self.config_path = os.path.abspath(config_path)\n self._read()\n self._config_hash = self._gen_hash()\n \n@@ -65,7 +65,7 @@\n if os.path.isabs(storage_dir) is False:\n storage_dir = os.path.normpath(\n os.path.join(\n- os.path.dirname(config_path),\n+ os.path.dirname(self.config_path),\n storage_dir\n )\n )\n", "issue": "Cant get GUI if `storage_dir` is relative path in config\nIf specify in config.json storage_dir as relative path, then `GET /index.html` return 404. With absolute paths all work fine.\n", "before_files": [{"content": "import os\nimport json\nimport hashlib\nimport datetime\n\n\ndefault_config = {\n \"log\": {\n \"level\": {\n \"console\": \"ERROR\",\n \"file\": \"WARNING\"\n }\n },\n \"debug\": False,\n \"integrations\": {},\n \"api\": {\n \"http\": {\n \"host\": \"127.0.0.1\",\n \"port\": \"47334\"\n },\n \"mysql\": {\n \"host\": \"127.0.0.1\",\n \"password\": \"\",\n \"port\": \"47335\",\n \"user\": \"mindsdb\",\n \"database\": \"mindsdb\",\n \"ssl\": True\n },\n \"mongodb\": {\n \"host\": \"127.0.0.1\",\n \"port\": \"47336\",\n \"database\": \"mindsdb\"\n }\n }\n}\n\n\nclass Config(object):\n current_version = '1.3'\n _config = {}\n paths = {\n 'root': '',\n 'datasources': '',\n 'predictors': '',\n 'static': '',\n 'tmp': '',\n 'log': '',\n 'obsolete': {\n 'predictors': '',\n 'datasources': ''\n }\n }\n versions = {}\n\n def __init__(self, config_path):\n self._config_path = None\n self._config_hash = None\n self._config = None\n if isinstance(config_path, str):\n self.config_path = config_path\n self._read()\n self._config_hash = self._gen_hash()\n\n storage_dir = self._config['storage_dir']\n if os.path.isabs(storage_dir) is False:\n storage_dir = os.path.normpath(\n os.path.join(\n os.path.dirname(config_path),\n storage_dir\n )\n )\n self.paths['root'] = storage_dir\n self.paths['datasources'] = os.path.join(storage_dir, 'datasources')\n self.paths['predictors'] = os.path.join(storage_dir, 'predictors')\n self.paths['static'] = os.path.join(storage_dir, 'static')\n self.paths['tmp'] = os.path.join(storage_dir, 'tmp')\n self.paths['log'] = os.path.join(storage_dir, 'log')\n self.paths['obsolete']['predictors'] = os.path.join(storage_dir, 'obsolete', 'predictors')\n self.paths['obsolete']['datasources'] = os.path.join(storage_dir, 'obsolete', 'datasources')\n\n self._read_versions_file(os.path.join(self.paths['root'], 'versions.json'))\n else:\n raise TypeError('Argument must be string representing a file path <Later on to be switched to file path and/or database connection info>')\n\n def _read_versions_file(self, path):\n if os.path.isfile(path):\n with open(path, 'rt') as f:\n self.versions = json.loads(f.read())\n\n def _migrate(self):\n def m1_0(config):\n if 'default_clickhouse' in config['integrations'] and 'type' not in config['integrations']['default_clickhouse']:\n config['integrations']['default_clickhouse']['type'] = 'clickhouse'\n if 'default_mariadb' in config['integrations'] and 'type' not in config['integrations']['default_mariadb']:\n config['integrations']['default_mariadb']['type'] = 'mariadb'\n if 'datasources' in config['api']['mysql']:\n del config['api']['mysql']['datasources']\n config['config_version'] = '1.1'\n return config\n\n def m1_1(config):\n import tempfile\n import shutil\n from pathlib import Path\n\n ds_storage_path = Path(config['interface']['datastore']['storage_dir'])\n mdb_storage_path = Path(config['interface']['mindsdb_native']['storage_dir'])\n\n temp_dir_path = tempfile.mkdtemp()\n\n if ds_storage_path.is_dir():\n shutil.move(\n str(ds_storage_path),\n temp_dir_path\n )\n\n ds_storage_path.mkdir(mode=0o777, exist_ok=True, parents=True)\n\n if Path(temp_dir_path).joinpath('datastore').is_dir():\n shutil.move(\n str(Path(temp_dir_path).joinpath('datastore')),\n str(ds_storage_path.joinpath('datasources'))\n )\n else:\n ds_storage_path.joinpath('datasources').mkdir(mode=0o777, exist_ok=True)\n\n if ds_storage_path == mdb_storage_path:\n shutil.move(\n str(Path(temp_dir_path)),\n str(ds_storage_path.joinpath('predictors'))\n )\n elif mdb_storage_path.is_dir():\n shutil.move(\n str(mdb_storage_path),\n str(ds_storage_path.joinpath('predictors'))\n )\n else:\n mdb_storage_path.joinpath('predictors').mkdir(mode=0o777, exist_ok=True)\n\n ds_storage_path.joinpath('tmp').mkdir(mode=0o777, exist_ok=True)\n ds_storage_path.joinpath('static').mkdir(mode=0o777, exist_ok=True)\n\n if Path(temp_dir_path).is_dir():\n shutil.rmtree(temp_dir_path)\n\n config['storage_dir'] = str(ds_storage_path)\n del config['interface']['datastore']['storage_dir']\n del config['interface']['mindsdb_native']['storage_dir']\n config['config_version'] = '1.2'\n return config\n\n def m1_2(config):\n ''' remove no longer needed fields\n '''\n try:\n del config['api']['mysql']['log']\n except Exception:\n pass\n\n try:\n del config['interface']\n except Exception:\n pass\n\n if 'pip_path' in config and config['pip_path'] is None:\n del config['pip_path']\n\n if 'python_interpreter' in config and config['python_interpreter'] is None:\n del config['python_interpreter']\n\n config['config_version'] = '1.3'\n return config\n\n migrations = {\n '1.0': m1_0,\n '1.1': m1_1,\n '1.2': m1_2\n }\n\n current_version = self._parse_version(self._config['config_version'])\n target_version = self._parse_version(self.current_version)\n while current_version < target_version:\n str_version = '.'.join([str(x) for x in current_version])\n self._config = migrations[str_version](self._config)\n current_version = self._parse_version(self._config['config_version'])\n\n def _validate(self):\n integrations = self._config.get('integrations', {})\n for key, value in integrations.items():\n if not isinstance(value, dict):\n raise TypeError(f\"Config error: integration '{key}' must be a json\")\n if 'type' not in integrations[key]:\n raise KeyError(f\"Config error: for integration '{key}' key 'type' must be specified\")\n\n storage_dir = self._config.get('storage_dir')\n if storage_dir is None:\n raise KeyError(\"'storage_dir' mandatory key in config\")\n\n def _parse_version(self, version):\n if isinstance(version, str):\n version = [int(x) for x in version.split('.')]\n elif isinstance(version, int):\n version = [version]\n if len(version) == 1:\n version.append(0)\n return version\n\n def _format(self):\n ''' changing user input to formalised view\n '''\n for integration in self._config.get('integrations', {}).values():\n password = integration.get('password')\n password = '' if password is None else str(password)\n integration['password'] = str(password)\n\n password = self._config['api']['mysql'].get('password')\n password = '' if password is None else str(password)\n self._config['api']['mysql']['password'] = str(password)\n\n def _merge_default_config(self):\n def merge_key_recursive(target_dict, source_dict, key):\n if key not in target_dict:\n target_dict[key] = source_dict[key]\n elif isinstance(target_dict[key], dict) and isinstance(source_dict[key], dict):\n for k in source_dict[key]:\n merge_key_recursive(target_dict[key], source_dict[key], k)\n\n for key in default_config:\n merge_key_recursive(self._config, default_config, key)\n\n def _read(self):\n if isinstance(self.config_path, str) and os.path.isfile(self.config_path):\n with open(self.config_path, 'r') as fp:\n self._config = json.load(fp)\n if self._parse_version(self._config['config_version']) < self._parse_version(self.current_version):\n self._migrate()\n self._save()\n self._validate()\n self._format()\n self._merge_default_config()\n else:\n raise TypeError('`self.config_path` must be a string representing a local file path to a json config')\n\n def _save(self):\n with open(self.config_path, 'w') as fp:\n json.dump(self._config, fp, indent=4, sort_keys=True)\n\n def _gen_hash(self):\n with open(self.config_path, 'rb') as fp:\n return hashlib.md5(fp.read()).hexdigest()\n\n def _set_updated(self, key):\n # Only check this for dynamically generated keys, won't be needed once we switch to using a database here\n if key in ['integrations']:\n file_hash = self._gen_hash()\n if file_hash != self._config_hash:\n self._read()\n self._config_hash = self._gen_hash()\n\n def __getitem__(self, key):\n self._set_updated(key)\n return self._config[key]\n\n def get(self, key, default=None):\n self._set_updated(key)\n return self._config.get(key, default)\n\n def get_all(self):\n return self._config\n\n def set(self, key_chain, value, delete=False):\n with open(self.config_path, 'r') as fp:\n self._config = json.load(fp)\n\n c = self._config\n for i, k in enumerate(key_chain):\n if k in c and i + 1 < len(key_chain):\n c = c[k]\n elif k not in c and i + 1 < len(key_chain):\n c[k] = {}\n c = c[k]\n else:\n if delete:\n del c[k]\n else:\n c[k] = value\n self._save()\n\n # Higher level interface\n def add_db_integration(self, name, dict):\n dict['date_last_update'] = str(datetime.datetime.now()).split('.')[0]\n if 'database_name' not in dict:\n dict['database_name'] = name\n if 'enabled' not in dict:\n dict['enabled'] = True\n\n self.set(['integrations', name], dict)\n\n def modify_db_integration(self, name, dict):\n old_dict = self._config['integrations'][name]\n for k in old_dict:\n if k not in dict:\n dict[k] = old_dict[k]\n\n self.add_db_integration(name, dict)\n\n def remove_db_integration(self, name):\n self.set(['integrations', name], None, True)\n", "path": "mindsdb/utilities/config.py"}], "after_files": [{"content": "import os\nimport json\nimport hashlib\nimport datetime\n\n\ndefault_config = {\n \"log\": {\n \"level\": {\n \"console\": \"ERROR\",\n \"file\": \"WARNING\"\n }\n },\n \"debug\": False,\n \"integrations\": {},\n \"api\": {\n \"http\": {\n \"host\": \"127.0.0.1\",\n \"port\": \"47334\"\n },\n \"mysql\": {\n \"host\": \"127.0.0.1\",\n \"password\": \"\",\n \"port\": \"47335\",\n \"user\": \"mindsdb\",\n \"database\": \"mindsdb\",\n \"ssl\": True\n },\n \"mongodb\": {\n \"host\": \"127.0.0.1\",\n \"port\": \"47336\",\n \"database\": \"mindsdb\"\n }\n }\n}\n\n\nclass Config(object):\n current_version = '1.3'\n _config = {}\n paths = {\n 'root': '',\n 'datasources': '',\n 'predictors': '',\n 'static': '',\n 'tmp': '',\n 'log': '',\n 'obsolete': {\n 'predictors': '',\n 'datasources': ''\n }\n }\n versions = {}\n\n def __init__(self, config_path):\n self._config_path = None\n self._config_hash = None\n self._config = None\n if isinstance(config_path, str):\n self.config_path = os.path.abspath(config_path)\n self._read()\n self._config_hash = self._gen_hash()\n\n storage_dir = self._config['storage_dir']\n if os.path.isabs(storage_dir) is False:\n storage_dir = os.path.normpath(\n os.path.join(\n os.path.dirname(self.config_path),\n storage_dir\n )\n )\n self.paths['root'] = storage_dir\n self.paths['datasources'] = os.path.join(storage_dir, 'datasources')\n self.paths['predictors'] = os.path.join(storage_dir, 'predictors')\n self.paths['static'] = os.path.join(storage_dir, 'static')\n self.paths['tmp'] = os.path.join(storage_dir, 'tmp')\n self.paths['log'] = os.path.join(storage_dir, 'log')\n self.paths['obsolete']['predictors'] = os.path.join(storage_dir, 'obsolete', 'predictors')\n self.paths['obsolete']['datasources'] = os.path.join(storage_dir, 'obsolete', 'datasources')\n\n self._read_versions_file(os.path.join(self.paths['root'], 'versions.json'))\n else:\n raise TypeError('Argument must be string representing a file path <Later on to be switched to file path and/or database connection info>')\n\n def _read_versions_file(self, path):\n if os.path.isfile(path):\n with open(path, 'rt') as f:\n self.versions = json.loads(f.read())\n\n def _migrate(self):\n def m1_0(config):\n if 'default_clickhouse' in config['integrations'] and 'type' not in config['integrations']['default_clickhouse']:\n config['integrations']['default_clickhouse']['type'] = 'clickhouse'\n if 'default_mariadb' in config['integrations'] and 'type' not in config['integrations']['default_mariadb']:\n config['integrations']['default_mariadb']['type'] = 'mariadb'\n if 'datasources' in config['api']['mysql']:\n del config['api']['mysql']['datasources']\n config['config_version'] = '1.1'\n return config\n\n def m1_1(config):\n import tempfile\n import shutil\n from pathlib import Path\n\n ds_storage_path = Path(config['interface']['datastore']['storage_dir'])\n mdb_storage_path = Path(config['interface']['mindsdb_native']['storage_dir'])\n\n temp_dir_path = tempfile.mkdtemp()\n\n if ds_storage_path.is_dir():\n shutil.move(\n str(ds_storage_path),\n temp_dir_path\n )\n\n ds_storage_path.mkdir(mode=0o777, exist_ok=True, parents=True)\n\n if Path(temp_dir_path).joinpath('datastore').is_dir():\n shutil.move(\n str(Path(temp_dir_path).joinpath('datastore')),\n str(ds_storage_path.joinpath('datasources'))\n )\n else:\n ds_storage_path.joinpath('datasources').mkdir(mode=0o777, exist_ok=True)\n\n if ds_storage_path == mdb_storage_path:\n shutil.move(\n str(Path(temp_dir_path)),\n str(ds_storage_path.joinpath('predictors'))\n )\n elif mdb_storage_path.is_dir():\n shutil.move(\n str(mdb_storage_path),\n str(ds_storage_path.joinpath('predictors'))\n )\n else:\n mdb_storage_path.joinpath('predictors').mkdir(mode=0o777, exist_ok=True)\n\n ds_storage_path.joinpath('tmp').mkdir(mode=0o777, exist_ok=True)\n ds_storage_path.joinpath('static').mkdir(mode=0o777, exist_ok=True)\n\n if Path(temp_dir_path).is_dir():\n shutil.rmtree(temp_dir_path)\n\n config['storage_dir'] = str(ds_storage_path)\n del config['interface']['datastore']['storage_dir']\n del config['interface']['mindsdb_native']['storage_dir']\n config['config_version'] = '1.2'\n return config\n\n def m1_2(config):\n ''' remove no longer needed fields\n '''\n try:\n del config['api']['mysql']['log']\n except Exception:\n pass\n\n try:\n del config['interface']\n except Exception:\n pass\n\n if 'pip_path' in config and config['pip_path'] is None:\n del config['pip_path']\n\n if 'python_interpreter' in config and config['python_interpreter'] is None:\n del config['python_interpreter']\n\n config['config_version'] = '1.3'\n return config\n\n migrations = {\n '1.0': m1_0,\n '1.1': m1_1,\n '1.2': m1_2\n }\n\n current_version = self._parse_version(self._config['config_version'])\n target_version = self._parse_version(self.current_version)\n while current_version < target_version:\n str_version = '.'.join([str(x) for x in current_version])\n self._config = migrations[str_version](self._config)\n current_version = self._parse_version(self._config['config_version'])\n\n def _validate(self):\n integrations = self._config.get('integrations', {})\n for key, value in integrations.items():\n if not isinstance(value, dict):\n raise TypeError(f\"Config error: integration '{key}' must be a json\")\n if 'type' not in integrations[key]:\n raise KeyError(f\"Config error: for integration '{key}' key 'type' must be specified\")\n\n storage_dir = self._config.get('storage_dir')\n if storage_dir is None:\n raise KeyError(\"'storage_dir' mandatory key in config\")\n\n def _parse_version(self, version):\n if isinstance(version, str):\n version = [int(x) for x in version.split('.')]\n elif isinstance(version, int):\n version = [version]\n if len(version) == 1:\n version.append(0)\n return version\n\n def _format(self):\n ''' changing user input to formalised view\n '''\n for integration in self._config.get('integrations', {}).values():\n password = integration.get('password')\n password = '' if password is None else str(password)\n integration['password'] = str(password)\n\n password = self._config['api']['mysql'].get('password')\n password = '' if password is None else str(password)\n self._config['api']['mysql']['password'] = str(password)\n\n def _merge_default_config(self):\n def merge_key_recursive(target_dict, source_dict, key):\n if key not in target_dict:\n target_dict[key] = source_dict[key]\n elif isinstance(target_dict[key], dict) and isinstance(source_dict[key], dict):\n for k in source_dict[key]:\n merge_key_recursive(target_dict[key], source_dict[key], k)\n\n for key in default_config:\n merge_key_recursive(self._config, default_config, key)\n\n def _read(self):\n if isinstance(self.config_path, str) and os.path.isfile(self.config_path):\n with open(self.config_path, 'r') as fp:\n self._config = json.load(fp)\n if self._parse_version(self._config['config_version']) < self._parse_version(self.current_version):\n self._migrate()\n self._save()\n self._validate()\n self._format()\n self._merge_default_config()\n else:\n raise TypeError('`self.config_path` must be a string representing a local file path to a json config')\n\n def _save(self):\n with open(self.config_path, 'w') as fp:\n json.dump(self._config, fp, indent=4, sort_keys=True)\n\n def _gen_hash(self):\n with open(self.config_path, 'rb') as fp:\n return hashlib.md5(fp.read()).hexdigest()\n\n def _set_updated(self, key):\n # Only check this for dynamically generated keys, won't be needed once we switch to using a database here\n if key in ['integrations']:\n file_hash = self._gen_hash()\n if file_hash != self._config_hash:\n self._read()\n self._config_hash = self._gen_hash()\n\n def __getitem__(self, key):\n self._set_updated(key)\n return self._config[key]\n\n def get(self, key, default=None):\n self._set_updated(key)\n return self._config.get(key, default)\n\n def get_all(self):\n return self._config\n\n def set(self, key_chain, value, delete=False):\n with open(self.config_path, 'r') as fp:\n self._config = json.load(fp)\n\n c = self._config\n for i, k in enumerate(key_chain):\n if k in c and i + 1 < len(key_chain):\n c = c[k]\n elif k not in c and i + 1 < len(key_chain):\n c[k] = {}\n c = c[k]\n else:\n if delete:\n del c[k]\n else:\n c[k] = value\n self._save()\n\n # Higher level interface\n def add_db_integration(self, name, dict):\n dict['date_last_update'] = str(datetime.datetime.now()).split('.')[0]\n if 'database_name' not in dict:\n dict['database_name'] = name\n if 'enabled' not in dict:\n dict['enabled'] = True\n\n self.set(['integrations', name], dict)\n\n def modify_db_integration(self, name, dict):\n old_dict = self._config['integrations'][name]\n for k in old_dict:\n if k not in dict:\n dict[k] = old_dict[k]\n\n self.add_db_integration(name, dict)\n\n def remove_db_integration(self, name):\n self.set(['integrations', name], None, True)\n", "path": "mindsdb/utilities/config.py"}]}
3,567
176
gh_patches_debug_8403
rasdani/github-patches
git_diff
pypa__pip-10507
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- 21.3 regression with legacy resolver Assuming the following project with an empty `pyproject.toml` and the following `setup.cfg`: ```ini [metadata] name = pkgb version = 1.0 [options] install_requires = wrapt ``` We get the following stack trace, using pip main branch, today: ```console $ pip install --use-deprecated=legacy-resolver -e ./pkgb Obtaining file:///home/me/tmp/brol/pkgb Installing build dependencies ... done Getting requirements to build wheel ... done Preparing wheel metadata ... done Requirement already satisfied: wrapt in /home/me/.virtualenvs/tempenv-49ea1126817e6/lib/python3.8/site-packages (from pkgb==1.0) (1.12.1) ERROR: Exception: Traceback (most recent call last): File "/home/me/pip/src/pip/_internal/cli/base_command.py", line 179, in exc_logging_wrapper status = run_func(*args) File "/home/me/pip/src/pip/_internal/cli/req_command.py", line 203, in wrapper return func(self, options, args) File "/home/me/pip/src/pip/_internal/commands/install.py", line 334, in run requirement_set = resolver.resolve( File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 181, in resolve discovered_reqs.extend(self._resolve_one(requirement_set, req)) File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 382, in _resolve_one _check_dist_requires_python( File "/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py", line 75, in _check_dist_requires_python requires_python = str(dist.requires_python) File "/home/me/pip/src/pip/_vendor/pkg_resources/__init__.py", line 2816, in __getattr__ return getattr(self._provider, attr) AttributeError: 'PathMetadata' object has no attribute 'requires_python' ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/pip/_internal/distributions/installed.py` Content: ``` 1 from pip._internal.distributions.base import AbstractDistribution 2 from pip._internal.index.package_finder import PackageFinder 3 from pip._internal.metadata import BaseDistribution 4 5 6 class InstalledDistribution(AbstractDistribution): 7 """Represents an installed package. 8 9 This does not need any preparation as the required information has already 10 been computed. 11 """ 12 13 def get_metadata_distribution(self) -> BaseDistribution: 14 assert self.req.satisfied_by is not None, "not actually installed" 15 return self.req.satisfied_by 16 17 def prepare_distribution_metadata( 18 self, finder: PackageFinder, build_isolation: bool 19 ) -> None: 20 pass 21 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/src/pip/_internal/distributions/installed.py b/src/pip/_internal/distributions/installed.py --- a/src/pip/_internal/distributions/installed.py +++ b/src/pip/_internal/distributions/installed.py @@ -11,8 +11,10 @@ """ def get_metadata_distribution(self) -> BaseDistribution: + from pip._internal.metadata.pkg_resources import Distribution as _Dist + assert self.req.satisfied_by is not None, "not actually installed" - return self.req.satisfied_by + return _Dist(self.req.satisfied_by) def prepare_distribution_metadata( self, finder: PackageFinder, build_isolation: bool
{"golden_diff": "diff --git a/src/pip/_internal/distributions/installed.py b/src/pip/_internal/distributions/installed.py\n--- a/src/pip/_internal/distributions/installed.py\n+++ b/src/pip/_internal/distributions/installed.py\n@@ -11,8 +11,10 @@\n \"\"\"\n \n def get_metadata_distribution(self) -> BaseDistribution:\n+ from pip._internal.metadata.pkg_resources import Distribution as _Dist\n+\n assert self.req.satisfied_by is not None, \"not actually installed\"\n- return self.req.satisfied_by\n+ return _Dist(self.req.satisfied_by)\n \n def prepare_distribution_metadata(\n self, finder: PackageFinder, build_isolation: bool\n", "issue": "21.3 regression with legacy resolver\nAssuming the following project with an empty `pyproject.toml` and the following `setup.cfg`:\r\n\r\n```ini\r\n[metadata]\r\nname = pkgb\r\nversion = 1.0\r\n\r\n[options]\r\ninstall_requires =\r\n wrapt\r\n```\r\n\r\nWe get the following stack trace, using pip main branch, today:\r\n\r\n```console\r\n$ pip install --use-deprecated=legacy-resolver -e ./pkgb\r\nObtaining file:///home/me/tmp/brol/pkgb\r\n Installing build dependencies ... done\r\n Getting requirements to build wheel ... done\r\n Preparing wheel metadata ... done\r\nRequirement already satisfied: wrapt in /home/me/.virtualenvs/tempenv-49ea1126817e6/lib/python3.8/site-packages (from pkgb==1.0) (1.12.1)\r\nERROR: Exception:\r\nTraceback (most recent call last):\r\n File \"/home/me/pip/src/pip/_internal/cli/base_command.py\", line 179, in exc_logging_wrapper\r\n status = run_func(*args)\r\n File \"/home/me/pip/src/pip/_internal/cli/req_command.py\", line 203, in wrapper\r\n return func(self, options, args)\r\n File \"/home/me/pip/src/pip/_internal/commands/install.py\", line 334, in run\r\n requirement_set = resolver.resolve(\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 181, in resolve\r\n discovered_reqs.extend(self._resolve_one(requirement_set, req))\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 382, in _resolve_one\r\n _check_dist_requires_python(\r\n File \"/home/me/pip/src/pip/_internal/resolution/legacy/resolver.py\", line 75, in _check_dist_requires_python\r\n requires_python = str(dist.requires_python)\r\n File \"/home/me/pip/src/pip/_vendor/pkg_resources/__init__.py\", line 2816, in __getattr__\r\n return getattr(self._provider, attr)\r\nAttributeError: 'PathMetadata' object has no attribute 'requires_python'\r\n```\r\n\n", "before_files": [{"content": "from pip._internal.distributions.base import AbstractDistribution\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.metadata import BaseDistribution\n\n\nclass InstalledDistribution(AbstractDistribution):\n \"\"\"Represents an installed package.\n\n This does not need any preparation as the required information has already\n been computed.\n \"\"\"\n\n def get_metadata_distribution(self) -> BaseDistribution:\n assert self.req.satisfied_by is not None, \"not actually installed\"\n return self.req.satisfied_by\n\n def prepare_distribution_metadata(\n self, finder: PackageFinder, build_isolation: bool\n ) -> None:\n pass\n", "path": "src/pip/_internal/distributions/installed.py"}], "after_files": [{"content": "from pip._internal.distributions.base import AbstractDistribution\nfrom pip._internal.index.package_finder import PackageFinder\nfrom pip._internal.metadata import BaseDistribution\n\n\nclass InstalledDistribution(AbstractDistribution):\n \"\"\"Represents an installed package.\n\n This does not need any preparation as the required information has already\n been computed.\n \"\"\"\n\n def get_metadata_distribution(self) -> BaseDistribution:\n from pip._internal.metadata.pkg_resources import Distribution as _Dist\n\n assert self.req.satisfied_by is not None, \"not actually installed\"\n return _Dist(self.req.satisfied_by)\n\n def prepare_distribution_metadata(\n self, finder: PackageFinder, build_isolation: bool\n ) -> None:\n pass\n", "path": "src/pip/_internal/distributions/installed.py"}]}
914
152
gh_patches_debug_21164
rasdani/github-patches
git_diff
elastic__ecs-1528
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Support `match_only_text` type fallback for ES 6 artifacts The `match_only_text` field data type will be introduced in Elasticsearch 7.14 and an upcoming ECS version via an [RFC proposal](https://github.com/elastic/ecs/blob/master/rfcs/text/0023-match_only_text-data-type.md). Since `match_only_text` is a feature available in 7.14+, artifacts generated targeting ES 6.x will need to fall back to using the `text` data type. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scripts/generators/es_template.py` Content: ``` 1 import copy 2 import json 3 import sys 4 5 from os.path import join 6 7 from generators import ecs_helpers 8 from schema.cleaner import field_or_multi_field_datatype_defaults 9 10 11 TYPE_FALLBACKS = { 12 'constant_keyword': 'keyword', 13 'wildcard': 'keyword', 14 'version': 'keyword' 15 } 16 17 # Composable Template 18 19 20 def generate(ecs_nested, ecs_version, out_dir, mapping_settings_file): 21 """This generates all artifacts for the composable template approach""" 22 all_component_templates(ecs_nested, ecs_version, out_dir) 23 component_names = component_name_convention(ecs_version, ecs_nested) 24 save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file) 25 26 27 def save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file): 28 """Generate the master sample composable template""" 29 template = { 30 "index_patterns": ["try-ecs-*"], 31 "composed_of": component_names, 32 "priority": 1, # Very low, as this is a sample template 33 "_meta": { 34 "ecs_version": ecs_version, 35 "description": "Sample composable template that includes all ECS fields" 36 }, 37 "template": { 38 "settings": { 39 "index": { 40 "mapping": { 41 "total_fields": { 42 "limit": 2000 43 } 44 } 45 } 46 }, 47 "mappings": mapping_settings(mapping_settings_file) 48 } 49 } 50 filename = join(out_dir, "elasticsearch/template.json") 51 save_json(filename, template) 52 53 54 def all_component_templates(ecs_nested, ecs_version, out_dir): 55 """Generate one component template per field set""" 56 component_dir = join(out_dir, 'elasticsearch/component') 57 ecs_helpers.make_dirs(component_dir) 58 59 for (fieldset_name, fieldset) in candidate_components(ecs_nested).items(): 60 field_mappings = {} 61 for (flat_name, field) in fieldset['fields'].items(): 62 name_parts = flat_name.split('.') 63 dict_add_nested(field_mappings, name_parts, entry_for(field)) 64 65 save_component_template(fieldset_name, ecs_version, component_dir, field_mappings) 66 67 68 def save_component_template(template_name, ecs_version, out_dir, field_mappings): 69 filename = join(out_dir, template_name) + ".json" 70 reference_url = "https://www.elastic.co/guide/en/ecs/current/ecs-{}.html".format(template_name) 71 72 template = { 73 'template': {'mappings': {'properties': field_mappings}}, 74 '_meta': { 75 'ecs_version': ecs_version, 76 'documentation': reference_url 77 } 78 } 79 save_json(filename, template) 80 81 82 def component_name_convention(ecs_version, ecs_nested): 83 version = ecs_version.replace('+', '-') 84 names = [] 85 for (fieldset_name, fieldset) in candidate_components(ecs_nested).items(): 86 names.append("ecs_{}_{}".format(version, fieldset_name.lower())) 87 return names 88 89 90 def candidate_components(ecs_nested): 91 """Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False""" 92 components = {} 93 for (fieldset_name, fieldset) in ecs_nested.items(): 94 if fieldset.get('reusable', None): 95 if not fieldset['reusable']['top_level']: 96 continue 97 components[fieldset_name] = fieldset 98 return components 99 100 101 # Legacy template 102 103 104 def generate_legacy(ecs_flat, ecs_version, out_dir, template_settings_file, mapping_settings_file): 105 """Generate the legacy index template""" 106 field_mappings = {} 107 for flat_name in sorted(ecs_flat): 108 field = ecs_flat[flat_name] 109 name_parts = flat_name.split('.') 110 dict_add_nested(field_mappings, name_parts, entry_for(field)) 111 112 mappings_section = mapping_settings(mapping_settings_file) 113 mappings_section['properties'] = field_mappings 114 115 generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file) 116 generate_legacy_template_version(7, ecs_version, mappings_section, out_dir, template_settings_file) 117 118 119 def generate_legacy_template_version(es_version, ecs_version, mappings_section, out_dir, template_settings_file): 120 ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', str(es_version))) 121 template = template_settings(es_version, ecs_version, mappings_section, template_settings_file) 122 123 filename = join(out_dir, "elasticsearch/{}/template.json".format(es_version)) 124 save_json(filename, template) 125 126 127 # Common helpers 128 129 130 def dict_add_nested(dct, name_parts, value): 131 current_nesting = name_parts[0] 132 rest_name_parts = name_parts[1:] 133 if len(rest_name_parts) > 0: 134 dct.setdefault(current_nesting, {}) 135 dct[current_nesting].setdefault('properties', {}) 136 137 dict_add_nested( 138 dct[current_nesting]['properties'], 139 rest_name_parts, 140 value) 141 142 else: 143 if current_nesting in dct and 'type' in value and 'object' == value['type']: 144 return 145 dct[current_nesting] = value 146 147 148 def entry_for(field): 149 field_entry = {'type': field['type']} 150 try: 151 if field['type'] == 'object' or field['type'] == 'nested': 152 if 'enabled' in field and not field['enabled']: 153 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled']) 154 # the index field is only valid for field types that are not object and nested 155 elif 'index' in field and not field['index']: 156 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values']) 157 158 if field['type'] == 'keyword': 159 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above']) 160 elif field['type'] == 'constant_keyword': 161 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value']) 162 elif field['type'] == 'text': 163 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms']) 164 elif field['type'] == 'alias': 165 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path']) 166 elif field['type'] == 'scaled_float': 167 ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor']) 168 169 if 'multi_fields' in field: 170 field_entry['fields'] = {} 171 for mf in field['multi_fields']: 172 mf_type = mf['type'] 173 mf_entry = {'type': mf_type} 174 if mf_type == 'keyword': 175 ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above']) 176 elif mf_type == 'text': 177 ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms']) 178 field_entry['fields'][mf['name']] = mf_entry 179 180 except KeyError as ex: 181 print("Exception {} occurred for field {}".format(ex, field)) 182 raise ex 183 return field_entry 184 185 186 def mapping_settings(mapping_settings_file): 187 if mapping_settings_file: 188 with open(mapping_settings_file) as f: 189 mappings = json.load(f) 190 else: 191 mappings = default_mapping_settings() 192 return mappings 193 194 195 def template_settings(es_version, ecs_version, mappings_section, template_settings_file): 196 if template_settings_file: 197 with open(template_settings_file) as f: 198 template = json.load(f) 199 else: 200 template = default_template_settings(ecs_version) 201 202 if es_version == 6: 203 mappings_section = copy.deepcopy(mappings_section) 204 es6_type_fallback(mappings_section['properties']) 205 206 # error.stack_trace needs special handling to set 207 # index: false and doc_values: false if the field 208 # is present in the mappings 209 try: 210 error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace'] 211 error_stack_trace_mappings.setdefault('index', False) 212 error_stack_trace_mappings.setdefault('doc_values', False) 213 except KeyError: 214 pass 215 216 template['mappings'] = {'_doc': mappings_section} 217 else: 218 template['mappings'] = mappings_section 219 220 # _meta can't be at template root in legacy templates, so moving back to mappings section 221 # if present 222 if '_meta' in template: 223 mappings_section['_meta'] = template.pop('_meta') 224 225 return template 226 227 228 def save_json(file, data): 229 open_mode = "wb" 230 if sys.version_info >= (3, 0): 231 open_mode = "w" 232 with open(file, open_mode) as jsonfile: 233 jsonfile.write(json.dumps(data, indent=2, sort_keys=True)) 234 235 236 def default_template_settings(ecs_version): 237 return { 238 "index_patterns": ["try-ecs-*"], 239 "_meta": {"version": ecs_version}, 240 "order": 1, 241 "settings": { 242 "index": { 243 "mapping": { 244 "total_fields": { 245 "limit": 10000 246 } 247 }, 248 "refresh_interval": "5s" 249 } 250 } 251 } 252 253 254 def default_mapping_settings(): 255 return { 256 "date_detection": False, 257 "dynamic_templates": [ 258 { 259 "strings_as_keyword": { 260 "mapping": { 261 "ignore_above": 1024, 262 "type": "keyword" 263 }, 264 "match_mapping_type": "string" 265 } 266 } 267 ] 268 } 269 270 271 def es6_type_fallback(mappings): 272 """ 273 Visits each leaf in mappings object and fallback to an 274 Elasticsearch 6.x supported type. 275 276 Since a field like `wildcard` won't have the same defaults as 277 a `keyword` field, we must add any missing defaults. 278 """ 279 280 for (name, details) in mappings.items(): 281 if 'type' in details: 282 fallback_type = TYPE_FALLBACKS.get(details['type']) 283 if fallback_type: 284 mappings[name]['type'] = fallback_type 285 field_or_multi_field_datatype_defaults(mappings[name]) 286 if 'properties' in details: 287 es6_type_fallback(details['properties']) 288 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scripts/generators/es_template.py b/scripts/generators/es_template.py --- a/scripts/generators/es_template.py +++ b/scripts/generators/es_template.py @@ -11,7 +11,8 @@ TYPE_FALLBACKS = { 'constant_keyword': 'keyword', 'wildcard': 'keyword', - 'version': 'keyword' + 'version': 'keyword', + 'match_only_text': 'text' } # Composable Template @@ -283,5 +284,13 @@ if fallback_type: mappings[name]['type'] = fallback_type field_or_multi_field_datatype_defaults(mappings[name]) + # support multi-fields + if 'fields' in details: + # potentially multiple multi-fields + for field_name, field_value in details['fields'].items(): + fallback_type = TYPE_FALLBACKS.get(field_value['type']) + if fallback_type: + mappings[name]['fields'][field_name]['type'] = fallback_type + field_or_multi_field_datatype_defaults(mappings[name]['fields'][field_name]) if 'properties' in details: es6_type_fallback(details['properties'])
{"golden_diff": "diff --git a/scripts/generators/es_template.py b/scripts/generators/es_template.py\n--- a/scripts/generators/es_template.py\n+++ b/scripts/generators/es_template.py\n@@ -11,7 +11,8 @@\n TYPE_FALLBACKS = {\n 'constant_keyword': 'keyword',\n 'wildcard': 'keyword',\n- 'version': 'keyword'\n+ 'version': 'keyword',\n+ 'match_only_text': 'text'\n }\n \n # Composable Template\n@@ -283,5 +284,13 @@\n if fallback_type:\n mappings[name]['type'] = fallback_type\n field_or_multi_field_datatype_defaults(mappings[name])\n+ # support multi-fields\n+ if 'fields' in details:\n+ # potentially multiple multi-fields\n+ for field_name, field_value in details['fields'].items():\n+ fallback_type = TYPE_FALLBACKS.get(field_value['type'])\n+ if fallback_type:\n+ mappings[name]['fields'][field_name]['type'] = fallback_type\n+ field_or_multi_field_datatype_defaults(mappings[name]['fields'][field_name])\n if 'properties' in details:\n es6_type_fallback(details['properties'])\n", "issue": "Support `match_only_text` type fallback for ES 6 artifacts\nThe `match_only_text` field data type will be introduced in Elasticsearch 7.14 and an upcoming ECS version via an [RFC proposal](https://github.com/elastic/ecs/blob/master/rfcs/text/0023-match_only_text-data-type.md).\r\n\r\nSince `match_only_text` is a feature available in 7.14+, artifacts generated targeting ES 6.x will need to fall back to using the `text` data type.\n", "before_files": [{"content": "import copy\nimport json\nimport sys\n\nfrom os.path import join\n\nfrom generators import ecs_helpers\nfrom schema.cleaner import field_or_multi_field_datatype_defaults\n\n\nTYPE_FALLBACKS = {\n 'constant_keyword': 'keyword',\n 'wildcard': 'keyword',\n 'version': 'keyword'\n}\n\n# Composable Template\n\n\ndef generate(ecs_nested, ecs_version, out_dir, mapping_settings_file):\n \"\"\"This generates all artifacts for the composable template approach\"\"\"\n all_component_templates(ecs_nested, ecs_version, out_dir)\n component_names = component_name_convention(ecs_version, ecs_nested)\n save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file)\n\n\ndef save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file):\n \"\"\"Generate the master sample composable template\"\"\"\n template = {\n \"index_patterns\": [\"try-ecs-*\"],\n \"composed_of\": component_names,\n \"priority\": 1, # Very low, as this is a sample template\n \"_meta\": {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n },\n \"template\": {\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 2000\n }\n }\n }\n },\n \"mappings\": mapping_settings(mapping_settings_file)\n }\n }\n filename = join(out_dir, \"elasticsearch/template.json\")\n save_json(filename, template)\n\n\ndef all_component_templates(ecs_nested, ecs_version, out_dir):\n \"\"\"Generate one component template per field set\"\"\"\n component_dir = join(out_dir, 'elasticsearch/component')\n ecs_helpers.make_dirs(component_dir)\n\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n field_mappings = {}\n for (flat_name, field) in fieldset['fields'].items():\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n save_component_template(fieldset_name, ecs_version, component_dir, field_mappings)\n\n\ndef save_component_template(template_name, ecs_version, out_dir, field_mappings):\n filename = join(out_dir, template_name) + \".json\"\n reference_url = \"https://www.elastic.co/guide/en/ecs/current/ecs-{}.html\".format(template_name)\n\n template = {\n 'template': {'mappings': {'properties': field_mappings}},\n '_meta': {\n 'ecs_version': ecs_version,\n 'documentation': reference_url\n }\n }\n save_json(filename, template)\n\n\ndef component_name_convention(ecs_version, ecs_nested):\n version = ecs_version.replace('+', '-')\n names = []\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n names.append(\"ecs_{}_{}\".format(version, fieldset_name.lower()))\n return names\n\n\ndef candidate_components(ecs_nested):\n \"\"\"Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False\"\"\"\n components = {}\n for (fieldset_name, fieldset) in ecs_nested.items():\n if fieldset.get('reusable', None):\n if not fieldset['reusable']['top_level']:\n continue\n components[fieldset_name] = fieldset\n return components\n\n\n# Legacy template\n\n\ndef generate_legacy(ecs_flat, ecs_version, out_dir, template_settings_file, mapping_settings_file):\n \"\"\"Generate the legacy index template\"\"\"\n field_mappings = {}\n for flat_name in sorted(ecs_flat):\n field = ecs_flat[flat_name]\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n mappings_section = mapping_settings(mapping_settings_file)\n mappings_section['properties'] = field_mappings\n\n generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)\n generate_legacy_template_version(7, ecs_version, mappings_section, out_dir, template_settings_file)\n\n\ndef generate_legacy_template_version(es_version, ecs_version, mappings_section, out_dir, template_settings_file):\n ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', str(es_version)))\n template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)\n\n filename = join(out_dir, \"elasticsearch/{}/template.json\".format(es_version))\n save_json(filename, template)\n\n\n# Common helpers\n\n\ndef dict_add_nested(dct, name_parts, value):\n current_nesting = name_parts[0]\n rest_name_parts = name_parts[1:]\n if len(rest_name_parts) > 0:\n dct.setdefault(current_nesting, {})\n dct[current_nesting].setdefault('properties', {})\n\n dict_add_nested(\n dct[current_nesting]['properties'],\n rest_name_parts,\n value)\n\n else:\n if current_nesting in dct and 'type' in value and 'object' == value['type']:\n return\n dct[current_nesting] = value\n\n\ndef entry_for(field):\n field_entry = {'type': field['type']}\n try:\n if field['type'] == 'object' or field['type'] == 'nested':\n if 'enabled' in field and not field['enabled']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])\n # the index field is only valid for field types that are not object and nested\n elif 'index' in field and not field['index']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])\n\n if field['type'] == 'keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])\n elif field['type'] == 'constant_keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])\n elif field['type'] == 'text':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])\n elif field['type'] == 'alias':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])\n elif field['type'] == 'scaled_float':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])\n\n if 'multi_fields' in field:\n field_entry['fields'] = {}\n for mf in field['multi_fields']:\n mf_type = mf['type']\n mf_entry = {'type': mf_type}\n if mf_type == 'keyword':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])\n elif mf_type == 'text':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms'])\n field_entry['fields'][mf['name']] = mf_entry\n\n except KeyError as ex:\n print(\"Exception {} occurred for field {}\".format(ex, field))\n raise ex\n return field_entry\n\n\ndef mapping_settings(mapping_settings_file):\n if mapping_settings_file:\n with open(mapping_settings_file) as f:\n mappings = json.load(f)\n else:\n mappings = default_mapping_settings()\n return mappings\n\n\ndef template_settings(es_version, ecs_version, mappings_section, template_settings_file):\n if template_settings_file:\n with open(template_settings_file) as f:\n template = json.load(f)\n else:\n template = default_template_settings(ecs_version)\n\n if es_version == 6:\n mappings_section = copy.deepcopy(mappings_section)\n es6_type_fallback(mappings_section['properties'])\n\n # error.stack_trace needs special handling to set\n # index: false and doc_values: false if the field\n # is present in the mappings\n try:\n error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']\n error_stack_trace_mappings.setdefault('index', False)\n error_stack_trace_mappings.setdefault('doc_values', False)\n except KeyError:\n pass\n\n template['mappings'] = {'_doc': mappings_section}\n else:\n template['mappings'] = mappings_section\n\n # _meta can't be at template root in legacy templates, so moving back to mappings section\n # if present\n if '_meta' in template:\n mappings_section['_meta'] = template.pop('_meta')\n\n return template\n\n\ndef save_json(file, data):\n open_mode = \"wb\"\n if sys.version_info >= (3, 0):\n open_mode = \"w\"\n with open(file, open_mode) as jsonfile:\n jsonfile.write(json.dumps(data, indent=2, sort_keys=True))\n\n\ndef default_template_settings(ecs_version):\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\"version\": ecs_version},\n \"order\": 1,\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 10000\n }\n },\n \"refresh_interval\": \"5s\"\n }\n }\n }\n\n\ndef default_mapping_settings():\n return {\n \"date_detection\": False,\n \"dynamic_templates\": [\n {\n \"strings_as_keyword\": {\n \"mapping\": {\n \"ignore_above\": 1024,\n \"type\": \"keyword\"\n },\n \"match_mapping_type\": \"string\"\n }\n }\n ]\n }\n\n\ndef es6_type_fallback(mappings):\n \"\"\"\n Visits each leaf in mappings object and fallback to an\n Elasticsearch 6.x supported type.\n\n Since a field like `wildcard` won't have the same defaults as\n a `keyword` field, we must add any missing defaults.\n \"\"\"\n\n for (name, details) in mappings.items():\n if 'type' in details:\n fallback_type = TYPE_FALLBACKS.get(details['type'])\n if fallback_type:\n mappings[name]['type'] = fallback_type\n field_or_multi_field_datatype_defaults(mappings[name])\n if 'properties' in details:\n es6_type_fallback(details['properties'])\n", "path": "scripts/generators/es_template.py"}], "after_files": [{"content": "import copy\nimport json\nimport sys\n\nfrom os.path import join\n\nfrom generators import ecs_helpers\nfrom schema.cleaner import field_or_multi_field_datatype_defaults\n\n\nTYPE_FALLBACKS = {\n 'constant_keyword': 'keyword',\n 'wildcard': 'keyword',\n 'version': 'keyword',\n 'match_only_text': 'text'\n}\n\n# Composable Template\n\n\ndef generate(ecs_nested, ecs_version, out_dir, mapping_settings_file):\n \"\"\"This generates all artifacts for the composable template approach\"\"\"\n all_component_templates(ecs_nested, ecs_version, out_dir)\n component_names = component_name_convention(ecs_version, ecs_nested)\n save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file)\n\n\ndef save_composable_template(ecs_version, component_names, out_dir, mapping_settings_file):\n \"\"\"Generate the master sample composable template\"\"\"\n template = {\n \"index_patterns\": [\"try-ecs-*\"],\n \"composed_of\": component_names,\n \"priority\": 1, # Very low, as this is a sample template\n \"_meta\": {\n \"ecs_version\": ecs_version,\n \"description\": \"Sample composable template that includes all ECS fields\"\n },\n \"template\": {\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 2000\n }\n }\n }\n },\n \"mappings\": mapping_settings(mapping_settings_file)\n }\n }\n filename = join(out_dir, \"elasticsearch/template.json\")\n save_json(filename, template)\n\n\ndef all_component_templates(ecs_nested, ecs_version, out_dir):\n \"\"\"Generate one component template per field set\"\"\"\n component_dir = join(out_dir, 'elasticsearch/component')\n ecs_helpers.make_dirs(component_dir)\n\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n field_mappings = {}\n for (flat_name, field) in fieldset['fields'].items():\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n save_component_template(fieldset_name, ecs_version, component_dir, field_mappings)\n\n\ndef save_component_template(template_name, ecs_version, out_dir, field_mappings):\n filename = join(out_dir, template_name) + \".json\"\n reference_url = \"https://www.elastic.co/guide/en/ecs/current/ecs-{}.html\".format(template_name)\n\n template = {\n 'template': {'mappings': {'properties': field_mappings}},\n '_meta': {\n 'ecs_version': ecs_version,\n 'documentation': reference_url\n }\n }\n save_json(filename, template)\n\n\ndef component_name_convention(ecs_version, ecs_nested):\n version = ecs_version.replace('+', '-')\n names = []\n for (fieldset_name, fieldset) in candidate_components(ecs_nested).items():\n names.append(\"ecs_{}_{}\".format(version, fieldset_name.lower()))\n return names\n\n\ndef candidate_components(ecs_nested):\n \"\"\"Returns same structure as ecs_nested, but skips all field sets with reusable.top_level: False\"\"\"\n components = {}\n for (fieldset_name, fieldset) in ecs_nested.items():\n if fieldset.get('reusable', None):\n if not fieldset['reusable']['top_level']:\n continue\n components[fieldset_name] = fieldset\n return components\n\n\n# Legacy template\n\n\ndef generate_legacy(ecs_flat, ecs_version, out_dir, template_settings_file, mapping_settings_file):\n \"\"\"Generate the legacy index template\"\"\"\n field_mappings = {}\n for flat_name in sorted(ecs_flat):\n field = ecs_flat[flat_name]\n name_parts = flat_name.split('.')\n dict_add_nested(field_mappings, name_parts, entry_for(field))\n\n mappings_section = mapping_settings(mapping_settings_file)\n mappings_section['properties'] = field_mappings\n\n generate_legacy_template_version(6, ecs_version, mappings_section, out_dir, template_settings_file)\n generate_legacy_template_version(7, ecs_version, mappings_section, out_dir, template_settings_file)\n\n\ndef generate_legacy_template_version(es_version, ecs_version, mappings_section, out_dir, template_settings_file):\n ecs_helpers.make_dirs(join(out_dir, 'elasticsearch', str(es_version)))\n template = template_settings(es_version, ecs_version, mappings_section, template_settings_file)\n\n filename = join(out_dir, \"elasticsearch/{}/template.json\".format(es_version))\n save_json(filename, template)\n\n\n# Common helpers\n\n\ndef dict_add_nested(dct, name_parts, value):\n current_nesting = name_parts[0]\n rest_name_parts = name_parts[1:]\n if len(rest_name_parts) > 0:\n dct.setdefault(current_nesting, {})\n dct[current_nesting].setdefault('properties', {})\n\n dict_add_nested(\n dct[current_nesting]['properties'],\n rest_name_parts,\n value)\n\n else:\n if current_nesting in dct and 'type' in value and 'object' == value['type']:\n return\n dct[current_nesting] = value\n\n\ndef entry_for(field):\n field_entry = {'type': field['type']}\n try:\n if field['type'] == 'object' or field['type'] == 'nested':\n if 'enabled' in field and not field['enabled']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['enabled'])\n # the index field is only valid for field types that are not object and nested\n elif 'index' in field and not field['index']:\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['index', 'doc_values'])\n\n if field['type'] == 'keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['ignore_above'])\n elif field['type'] == 'constant_keyword':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['value'])\n elif field['type'] == 'text':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['norms'])\n elif field['type'] == 'alias':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['path'])\n elif field['type'] == 'scaled_float':\n ecs_helpers.dict_copy_existing_keys(field, field_entry, ['scaling_factor'])\n\n if 'multi_fields' in field:\n field_entry['fields'] = {}\n for mf in field['multi_fields']:\n mf_type = mf['type']\n mf_entry = {'type': mf_type}\n if mf_type == 'keyword':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['normalizer', 'ignore_above'])\n elif mf_type == 'text':\n ecs_helpers.dict_copy_existing_keys(mf, mf_entry, ['norms'])\n field_entry['fields'][mf['name']] = mf_entry\n\n except KeyError as ex:\n print(\"Exception {} occurred for field {}\".format(ex, field))\n raise ex\n return field_entry\n\n\ndef mapping_settings(mapping_settings_file):\n if mapping_settings_file:\n with open(mapping_settings_file) as f:\n mappings = json.load(f)\n else:\n mappings = default_mapping_settings()\n return mappings\n\n\ndef template_settings(es_version, ecs_version, mappings_section, template_settings_file):\n if template_settings_file:\n with open(template_settings_file) as f:\n template = json.load(f)\n else:\n template = default_template_settings(ecs_version)\n\n if es_version == 6:\n mappings_section = copy.deepcopy(mappings_section)\n es6_type_fallback(mappings_section['properties'])\n\n # error.stack_trace needs special handling to set\n # index: false and doc_values: false if the field\n # is present in the mappings\n try:\n error_stack_trace_mappings = mappings_section['properties']['error']['properties']['stack_trace']\n error_stack_trace_mappings.setdefault('index', False)\n error_stack_trace_mappings.setdefault('doc_values', False)\n except KeyError:\n pass\n\n template['mappings'] = {'_doc': mappings_section}\n else:\n template['mappings'] = mappings_section\n\n # _meta can't be at template root in legacy templates, so moving back to mappings section\n # if present\n if '_meta' in template:\n mappings_section['_meta'] = template.pop('_meta')\n\n return template\n\n\ndef save_json(file, data):\n open_mode = \"wb\"\n if sys.version_info >= (3, 0):\n open_mode = \"w\"\n with open(file, open_mode) as jsonfile:\n jsonfile.write(json.dumps(data, indent=2, sort_keys=True))\n\n\ndef default_template_settings(ecs_version):\n return {\n \"index_patterns\": [\"try-ecs-*\"],\n \"_meta\": {\"version\": ecs_version},\n \"order\": 1,\n \"settings\": {\n \"index\": {\n \"mapping\": {\n \"total_fields\": {\n \"limit\": 10000\n }\n },\n \"refresh_interval\": \"5s\"\n }\n }\n }\n\n\ndef default_mapping_settings():\n return {\n \"date_detection\": False,\n \"dynamic_templates\": [\n {\n \"strings_as_keyword\": {\n \"mapping\": {\n \"ignore_above\": 1024,\n \"type\": \"keyword\"\n },\n \"match_mapping_type\": \"string\"\n }\n }\n ]\n }\n\n\ndef es6_type_fallback(mappings):\n \"\"\"\n Visits each leaf in mappings object and fallback to an\n Elasticsearch 6.x supported type.\n\n Since a field like `wildcard` won't have the same defaults as\n a `keyword` field, we must add any missing defaults.\n \"\"\"\n\n for (name, details) in mappings.items():\n if 'type' in details:\n fallback_type = TYPE_FALLBACKS.get(details['type'])\n if fallback_type:\n mappings[name]['type'] = fallback_type\n field_or_multi_field_datatype_defaults(mappings[name])\n # support multi-fields\n if 'fields' in details:\n # potentially multiple multi-fields\n for field_name, field_value in details['fields'].items():\n fallback_type = TYPE_FALLBACKS.get(field_value['type'])\n if fallback_type:\n mappings[name]['fields'][field_name]['type'] = fallback_type\n field_or_multi_field_datatype_defaults(mappings[name]['fields'][field_name])\n if 'properties' in details:\n es6_type_fallback(details['properties'])\n", "path": "scripts/generators/es_template.py"}]}
3,275
261
gh_patches_debug_37678
rasdani/github-patches
git_diff
bokeh__bokeh-5176
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- ColorMapping - color values out of high/low ``` python data = [-1, 0, np.NaN, 1, 2] cmap = LinearColorMapper(palette=["red", "green"], low=0, high=1, nan_color=‘gray’, high_color=‘orange’, low_color=‘pink’) # expect: [‘pink’, ‘red’, ‘gray’, ‘green’, ‘orange’] ``` LogColorMapper should also receive this treatment. --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `bokeh/models/mappers.py` Content: ``` 1 """ Models for mapping values from one range or space to another. 2 3 """ 4 from __future__ import absolute_import 5 import warnings 6 7 from ..model import Model 8 from ..core.properties import abstract 9 from ..core.properties import Color, Enum, Seq, Either, String, Int, Float, Date, Datetime 10 from ..core.enums import Palette 11 from .. import palettes 12 13 14 @abstract 15 class ColorMapper(Model): 16 """ Base class for color mapper types. ``ColorMapper`` is not 17 generally useful to instantiate on its own. 18 19 """ 20 21 palette = Seq(Color, help=""" 22 A sequence of colors to use as the target palette for mapping. 23 24 This property can also be set as a ``String``, to the name of 25 any of the palettes shown in :ref:`bokeh.palettes`. 26 """).accepts(Enum(Palette), lambda pal: getattr(palettes, pal)) 27 28 nan_color = Color(default="gray", help=""" 29 Color to be used if data is NaN. Default: 'gray' 30 """) 31 32 def __init__(self, palette=None, **kwargs): 33 if palette is not None: 34 kwargs['palette'] = palette 35 super(ColorMapper, self).__init__(**kwargs) 36 37 38 class CategoricalColorMapper(ColorMapper): 39 """ Map categories to colors. Values that are passed to 40 this mapper that aren't in factors will be assigned the nan_color. 41 42 """ 43 44 factors = Either(Seq(String), Seq(Int), Seq(Float), Seq(Datetime), Seq(Date), help=""" 45 A sequence of factors / categories that map to the color palette. 46 """) 47 48 49 def __init__(self, **kwargs): 50 super(ColorMapper, self).__init__(**kwargs) 51 palette = self.palette 52 factors = self.factors 53 if palette and factors: 54 if len(palette) < len(factors): 55 extra_factors = factors[len(palette):] 56 warnings.warn("""Palette length does not match number of 57 factors. %s will be assigned to `nan_color` %s""" % (extra_factors, self.nan_color)) 58 59 60 @abstract 61 class ContinuousColorMapper(ColorMapper): 62 """ Base class for cotinuous color mapper types. ``ContinuousColorMapper`` is not 63 generally useful to instantiate on its own. 64 65 """ 66 67 low = Float(help=""" 68 The minimum value of the range to map into the palette. Values below 69 this are clamped to ``low``. 70 """) 71 72 high = Float(help=""" 73 The maximum value of the range to map into the palette. Values above 74 this are clamped to ``high``. 75 """) 76 77 78 class LinearColorMapper(ContinuousColorMapper): 79 """ Map numbers in a range [*low*, *high*] linearly into a 80 sequence of colors (a palette). 81 82 For example, if the range is [0, 99] and the palette is 83 ``['red', 'green', 'blue']``, the values would be mapped as 84 follows:: 85 86 x < 0 : 'red' # values < low are clamped 87 0 >= x < 33 : 'red' 88 33 >= x < 66 : 'green' 89 66 >= x < 99 : 'blue' 90 99 >= x : 'blue' # values > high are clamped 91 92 """ 93 94 95 class LogColorMapper(ContinuousColorMapper): 96 """ Map numbers in a range [*low*, *high*] into a 97 sequence of colors (a palette) on a natural logarithm scale. 98 99 For example, if the range is [0, 25] and the palette is 100 ``['red', 'green', 'blue']``, the values would be mapped as 101 follows:: 102 103 x < 0 : 'red' # values < low are clamped 104 0 >= x < 2.72 : 'red' # math.e ** 1 105 2.72 >= x < 7.39 : 'green' # math.e ** 2 106 7.39 >= x < 20.09 : 'blue' # math.e ** 3 107 20.09 >= x : 'blue' # values > high are clamped 108 109 .. warning:: 110 The LogColorMapper only works for images with scalar values that are 111 non-negative. 112 113 """ 114 ``` Path: `examples/plotting/file/color_data_map.py` Content: ``` 1 import numpy as np 2 3 from bokeh.io import show 4 from bokeh.layouts import gridplot 5 from bokeh.models import ( 6 ColumnDataSource, 7 ColorBar, 8 LinearColorMapper, 9 LogColorMapper, 10 ) 11 from bokeh.palettes import Viridis3, Viridis256 12 from bokeh.plotting import figure 13 14 x = np.random.random(2500) * 100 15 y = np.random.normal(size=2500) * 2 + 5 16 source = ColumnDataSource(dict(x=x, y=y)) 17 opts = dict(x='x', line_color=None, source=source) 18 19 20 def make_plot(mapper, title): 21 p = figure(toolbar_location=None, tools='', title=title) 22 color_bar = ColorBar(color_mapper=mapper, location=(0, 0)) 23 p.circle( 24 x='x', y='y', 25 fill_color={'field': 'x', 'transform': mapper}, line_color=None, 26 source=source 27 ) 28 p.add_layout(color_bar, 'right') 29 return p 30 31 p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear') 32 p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log') 33 p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear') 34 p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log') 35 36 show(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None)) 37 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/bokeh/models/mappers.py b/bokeh/models/mappers.py --- a/bokeh/models/mappers.py +++ b/bokeh/models/mappers.py @@ -74,6 +74,15 @@ this are clamped to ``high``. """) + low_color = Color(default=None, help=""" + Color to be used if data is lower than ``low`` value. If None, + values lower than ``low`` are mapped to the first color in the palette. + """) + + high_color = Color(default=None, help=""" + Color to be used if data is lower than ``high`` value. If None, + values lower than ``high`` are mapped to the last color in the palette. + """) class LinearColorMapper(ContinuousColorMapper): """ Map numbers in a range [*low*, *high*] linearly into a @@ -92,6 +101,7 @@ """ + class LogColorMapper(ContinuousColorMapper): """ Map numbers in a range [*low*, *high*] into a sequence of colors (a palette) on a natural logarithm scale. diff --git a/examples/plotting/file/color_data_map.py b/examples/plotting/file/color_data_map.py --- a/examples/plotting/file/color_data_map.py +++ b/examples/plotting/file/color_data_map.py @@ -11,13 +11,15 @@ from bokeh.palettes import Viridis3, Viridis256 from bokeh.plotting import figure -x = np.random.random(2500) * 100 +x = np.random.random(2500) * 140 - 20 y = np.random.normal(size=2500) * 2 + 5 source = ColumnDataSource(dict(x=x, y=y)) opts = dict(x='x', line_color=None, source=source) def make_plot(mapper, title): + mapper.low_color = 'blue' + mapper.high_color = 'red' p = figure(toolbar_location=None, tools='', title=title) color_bar = ColorBar(color_mapper=mapper, location=(0, 0)) p.circle( @@ -28,9 +30,9 @@ p.add_layout(color_bar, 'right') return p -p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear') -p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log') -p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear') -p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log') +p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear, low/high = blue/red') +p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log, low/high = blue/red') +p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear, low/high = blue/red') +p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log, low/high =, blue/red') show(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))
{"golden_diff": "diff --git a/bokeh/models/mappers.py b/bokeh/models/mappers.py\n--- a/bokeh/models/mappers.py\n+++ b/bokeh/models/mappers.py\n@@ -74,6 +74,15 @@\n this are clamped to ``high``.\n \"\"\")\n \n+ low_color = Color(default=None, help=\"\"\"\n+ Color to be used if data is lower than ``low`` value. If None,\n+ values lower than ``low`` are mapped to the first color in the palette.\n+ \"\"\")\n+\n+ high_color = Color(default=None, help=\"\"\"\n+ Color to be used if data is lower than ``high`` value. If None,\n+ values lower than ``high`` are mapped to the last color in the palette.\n+ \"\"\")\n \n class LinearColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] linearly into a\n@@ -92,6 +101,7 @@\n \"\"\"\n \n \n+\n class LogColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] into a\n sequence of colors (a palette) on a natural logarithm scale.\ndiff --git a/examples/plotting/file/color_data_map.py b/examples/plotting/file/color_data_map.py\n--- a/examples/plotting/file/color_data_map.py\n+++ b/examples/plotting/file/color_data_map.py\n@@ -11,13 +11,15 @@\n from bokeh.palettes import Viridis3, Viridis256\n from bokeh.plotting import figure\n \n-x = np.random.random(2500) * 100\n+x = np.random.random(2500) * 140 - 20\n y = np.random.normal(size=2500) * 2 + 5\n source = ColumnDataSource(dict(x=x, y=y))\n opts = dict(x='x', line_color=None, source=source)\n \n \n def make_plot(mapper, title):\n+ mapper.low_color = 'blue'\n+ mapper.high_color = 'red'\n p = figure(toolbar_location=None, tools='', title=title)\n color_bar = ColorBar(color_mapper=mapper, location=(0, 0))\n p.circle(\n@@ -28,9 +30,9 @@\n p.add_layout(color_bar, 'right')\n return p\n \n-p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear')\n-p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log')\n-p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear')\n-p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log')\n+p1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear, low/high = blue/red')\n+p2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log, low/high = blue/red')\n+p3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear, low/high = blue/red')\n+p4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log, low/high =, blue/red')\n \n show(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))\n", "issue": "ColorMapping - color values out of high/low\n``` python\ndata = [-1, 0, np.NaN, 1, 2]\ncmap = LinearColorMapper(palette=[\"red\", \"green\"], low=0, high=1, nan_color=\u2018gray\u2019, high_color=\u2018orange\u2019, low_color=\u2018pink\u2019)\n# expect: [\u2018pink\u2019, \u2018red\u2019, \u2018gray\u2019, \u2018green\u2019, \u2018orange\u2019]\n```\n\nLogColorMapper should also receive this treatment.\n\n", "before_files": [{"content": "\"\"\" Models for mapping values from one range or space to another.\n\n\"\"\"\nfrom __future__ import absolute_import\nimport warnings\n\nfrom ..model import Model\nfrom ..core.properties import abstract\nfrom ..core.properties import Color, Enum, Seq, Either, String, Int, Float, Date, Datetime\nfrom ..core.enums import Palette\nfrom .. import palettes\n\n\n@abstract\nclass ColorMapper(Model):\n \"\"\" Base class for color mapper types. ``ColorMapper`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n palette = Seq(Color, help=\"\"\"\n A sequence of colors to use as the target palette for mapping.\n\n This property can also be set as a ``String``, to the name of\n any of the palettes shown in :ref:`bokeh.palettes`.\n \"\"\").accepts(Enum(Palette), lambda pal: getattr(palettes, pal))\n\n nan_color = Color(default=\"gray\", help=\"\"\"\n Color to be used if data is NaN. Default: 'gray'\n \"\"\")\n\n def __init__(self, palette=None, **kwargs):\n if palette is not None:\n kwargs['palette'] = palette\n super(ColorMapper, self).__init__(**kwargs)\n\n\nclass CategoricalColorMapper(ColorMapper):\n \"\"\" Map categories to colors. Values that are passed to\n this mapper that aren't in factors will be assigned the nan_color.\n\n \"\"\"\n\n factors = Either(Seq(String), Seq(Int), Seq(Float), Seq(Datetime), Seq(Date), help=\"\"\"\n A sequence of factors / categories that map to the color palette.\n \"\"\")\n\n\n def __init__(self, **kwargs):\n super(ColorMapper, self).__init__(**kwargs)\n palette = self.palette\n factors = self.factors\n if palette and factors:\n if len(palette) < len(factors):\n extra_factors = factors[len(palette):]\n warnings.warn(\"\"\"Palette length does not match number of\nfactors. %s will be assigned to `nan_color` %s\"\"\" % (extra_factors, self.nan_color))\n\n\n@abstract\nclass ContinuousColorMapper(ColorMapper):\n \"\"\" Base class for cotinuous color mapper types. ``ContinuousColorMapper`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n low = Float(help=\"\"\"\n The minimum value of the range to map into the palette. Values below\n this are clamped to ``low``.\n \"\"\")\n\n high = Float(help=\"\"\"\n The maximum value of the range to map into the palette. Values above\n this are clamped to ``high``.\n \"\"\")\n\n\nclass LinearColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] linearly into a\n sequence of colors (a palette).\n\n For example, if the range is [0, 99] and the palette is\n ``['red', 'green', 'blue']``, the values would be mapped as\n follows::\n\n x < 0 : 'red' # values < low are clamped\n 0 >= x < 33 : 'red'\n 33 >= x < 66 : 'green'\n 66 >= x < 99 : 'blue'\n 99 >= x : 'blue' # values > high are clamped\n\n \"\"\"\n\n\nclass LogColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] into a\n sequence of colors (a palette) on a natural logarithm scale.\n\n For example, if the range is [0, 25] and the palette is\n ``['red', 'green', 'blue']``, the values would be mapped as\n follows::\n\n x < 0 : 'red' # values < low are clamped\n 0 >= x < 2.72 : 'red' # math.e ** 1\n 2.72 >= x < 7.39 : 'green' # math.e ** 2\n 7.39 >= x < 20.09 : 'blue' # math.e ** 3\n 20.09 >= x : 'blue' # values > high are clamped\n\n .. warning::\n The LogColorMapper only works for images with scalar values that are\n non-negative.\n\n \"\"\"\n", "path": "bokeh/models/mappers.py"}, {"content": "import numpy as np\n\nfrom bokeh.io import show\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import (\n ColumnDataSource,\n ColorBar,\n LinearColorMapper,\n LogColorMapper,\n)\nfrom bokeh.palettes import Viridis3, Viridis256\nfrom bokeh.plotting import figure\n\nx = np.random.random(2500) * 100\ny = np.random.normal(size=2500) * 2 + 5\nsource = ColumnDataSource(dict(x=x, y=y))\nopts = dict(x='x', line_color=None, source=source)\n\n\ndef make_plot(mapper, title):\n p = figure(toolbar_location=None, tools='', title=title)\n color_bar = ColorBar(color_mapper=mapper, location=(0, 0))\n p.circle(\n x='x', y='y',\n fill_color={'field': 'x', 'transform': mapper}, line_color=None,\n source=source\n )\n p.add_layout(color_bar, 'right')\n return p\n\np1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear')\np2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log')\np3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear')\np4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log')\n\nshow(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))\n", "path": "examples/plotting/file/color_data_map.py"}], "after_files": [{"content": "\"\"\" Models for mapping values from one range or space to another.\n\n\"\"\"\nfrom __future__ import absolute_import\nimport warnings\n\nfrom ..model import Model\nfrom ..core.properties import abstract\nfrom ..core.properties import Color, Enum, Seq, Either, String, Int, Float, Date, Datetime\nfrom ..core.enums import Palette\nfrom .. import palettes\n\n\n@abstract\nclass ColorMapper(Model):\n \"\"\" Base class for color mapper types. ``ColorMapper`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n palette = Seq(Color, help=\"\"\"\n A sequence of colors to use as the target palette for mapping.\n\n This property can also be set as a ``String``, to the name of\n any of the palettes shown in :ref:`bokeh.palettes`.\n \"\"\").accepts(Enum(Palette), lambda pal: getattr(palettes, pal))\n\n nan_color = Color(default=\"gray\", help=\"\"\"\n Color to be used if data is NaN. Default: 'gray'\n \"\"\")\n\n def __init__(self, palette=None, **kwargs):\n if palette is not None:\n kwargs['palette'] = palette\n super(ColorMapper, self).__init__(**kwargs)\n\n\nclass CategoricalColorMapper(ColorMapper):\n \"\"\" Map categories to colors. Values that are passed to\n this mapper that aren't in factors will be assigned the nan_color.\n\n \"\"\"\n\n factors = Either(Seq(String), Seq(Int), Seq(Float), Seq(Datetime), Seq(Date), help=\"\"\"\n A sequence of factors / categories that map to the color palette.\n \"\"\")\n\n\n def __init__(self, **kwargs):\n super(ColorMapper, self).__init__(**kwargs)\n palette = self.palette\n factors = self.factors\n if palette and factors:\n if len(palette) < len(factors):\n extra_factors = factors[len(palette):]\n warnings.warn(\"\"\"Palette length does not match number of\nfactors. %s will be assigned to `nan_color` %s\"\"\" % (extra_factors, self.nan_color))\n\n\n@abstract\nclass ContinuousColorMapper(ColorMapper):\n \"\"\" Base class for cotinuous color mapper types. ``ContinuousColorMapper`` is not\n generally useful to instantiate on its own.\n\n \"\"\"\n\n low = Float(help=\"\"\"\n The minimum value of the range to map into the palette. Values below\n this are clamped to ``low``.\n \"\"\")\n\n high = Float(help=\"\"\"\n The maximum value of the range to map into the palette. Values above\n this are clamped to ``high``.\n \"\"\")\n\n low_color = Color(default=None, help=\"\"\"\n Color to be used if data is lower than ``low`` value. If None,\n values lower than ``low`` are mapped to the first color in the palette.\n \"\"\")\n\n high_color = Color(default=None, help=\"\"\"\n Color to be used if data is lower than ``high`` value. If None,\n values lower than ``high`` are mapped to the last color in the palette.\n \"\"\")\n\nclass LinearColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] linearly into a\n sequence of colors (a palette).\n\n For example, if the range is [0, 99] and the palette is\n ``['red', 'green', 'blue']``, the values would be mapped as\n follows::\n\n x < 0 : 'red' # values < low are clamped\n 0 >= x < 33 : 'red'\n 33 >= x < 66 : 'green'\n 66 >= x < 99 : 'blue'\n 99 >= x : 'blue' # values > high are clamped\n\n \"\"\"\n\n\n\nclass LogColorMapper(ContinuousColorMapper):\n \"\"\" Map numbers in a range [*low*, *high*] into a\n sequence of colors (a palette) on a natural logarithm scale.\n\n For example, if the range is [0, 25] and the palette is\n ``['red', 'green', 'blue']``, the values would be mapped as\n follows::\n\n x < 0 : 'red' # values < low are clamped\n 0 >= x < 2.72 : 'red' # math.e ** 1\n 2.72 >= x < 7.39 : 'green' # math.e ** 2\n 7.39 >= x < 20.09 : 'blue' # math.e ** 3\n 20.09 >= x : 'blue' # values > high are clamped\n\n .. warning::\n The LogColorMapper only works for images with scalar values that are\n non-negative.\n\n \"\"\"\n", "path": "bokeh/models/mappers.py"}, {"content": "import numpy as np\n\nfrom bokeh.io import show\nfrom bokeh.layouts import gridplot\nfrom bokeh.models import (\n ColumnDataSource,\n ColorBar,\n LinearColorMapper,\n LogColorMapper,\n)\nfrom bokeh.palettes import Viridis3, Viridis256\nfrom bokeh.plotting import figure\n\nx = np.random.random(2500) * 140 - 20\ny = np.random.normal(size=2500) * 2 + 5\nsource = ColumnDataSource(dict(x=x, y=y))\nopts = dict(x='x', line_color=None, source=source)\n\n\ndef make_plot(mapper, title):\n mapper.low_color = 'blue'\n mapper.high_color = 'red'\n p = figure(toolbar_location=None, tools='', title=title)\n color_bar = ColorBar(color_mapper=mapper, location=(0, 0))\n p.circle(\n x='x', y='y',\n fill_color={'field': 'x', 'transform': mapper}, line_color=None,\n source=source\n )\n p.add_layout(color_bar, 'right')\n return p\n\np1 = make_plot(LinearColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Linear, low/high = blue/red')\np2 = make_plot(LogColorMapper(palette=Viridis256, low=0, high=100), title='Viridis256 - Log, low/high = blue/red')\np3 = make_plot(LinearColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Linear, low/high = blue/red')\np4 = make_plot(LogColorMapper(palette=Viridis3, low=0, high=100), title='Viridis3 - Log, low/high =, blue/red')\n\nshow(gridplot([p1, p2, p3, p4], ncols=2, plot_width=400, plot_height=300, toolbar_location=None))\n", "path": "examples/plotting/file/color_data_map.py"}]}
2,055
866
gh_patches_debug_28320
rasdani/github-patches
git_diff
DataBiosphere__toil-3645
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- Add type hints to compatibility.py Adds type hints to src/toil/lib/compatibility.py so it can be checked under mypy during linting. Refers to #3568. ┆Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-921) ┆Issue Number: TOIL-921 --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `src/toil/lib/compatibility.py` Content: ``` 1 def compat_bytes(s): 2 return s.decode('utf-8') if isinstance(s, bytes) else s 3 ``` Path: `contrib/admin/mypy-with-ignore.py` Content: ``` 1 #!/usr/bin/env python3 2 """ 3 Runs mypy and ignores files that do not yet have passing type hints. 4 5 Does not type check test files (any path including "src/toil/test"). 6 """ 7 import os 8 import subprocess 9 import sys 10 11 os.environ['MYPYPATH'] = 'contrib/typeshed' 12 pkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa 13 sys.path.insert(0, pkg_root) # noqa 14 15 from src.toil.lib.resources import glob # type: ignore 16 17 18 def main(): 19 all_files_to_check = [] 20 for d in ['dashboard', 'docker', 'docs', 'src']: 21 all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d)) 22 23 # TODO: Remove these paths as typing is added and mypy conflicts are addressed 24 ignore_paths = [os.path.abspath(f) for f in [ 25 'docker/Dockerfile.py', 26 'docs/conf.py', 27 'docs/vendor/sphinxcontrib/fulltoc.py', 28 'docs/vendor/sphinxcontrib/__init__.py', 29 'src/toil/job.py', 30 'src/toil/leader.py', 31 'src/toil/statsAndLogging.py', 32 'src/toil/common.py', 33 'src/toil/realtimeLogger.py', 34 'src/toil/worker.py', 35 'src/toil/serviceManager.py', 36 'src/toil/toilState.py', 37 'src/toil/__init__.py', 38 'src/toil/resource.py', 39 'src/toil/deferred.py', 40 'src/toil/version.py', 41 'src/toil/wdl/utils.py', 42 'src/toil/wdl/wdl_types.py', 43 'src/toil/wdl/wdl_synthesis.py', 44 'src/toil/wdl/wdl_analysis.py', 45 'src/toil/wdl/wdl_functions.py', 46 'src/toil/wdl/toilwdl.py', 47 'src/toil/wdl/versions/draft2.py', 48 'src/toil/wdl/versions/v1.py', 49 'src/toil/wdl/versions/dev.py', 50 'src/toil/provisioners/clusterScaler.py', 51 'src/toil/provisioners/abstractProvisioner.py', 52 'src/toil/provisioners/gceProvisioner.py', 53 'src/toil/provisioners/__init__.py', 54 'src/toil/provisioners/node.py', 55 'src/toil/provisioners/aws/boto2Context.py', 56 'src/toil/provisioners/aws/awsProvisioner.py', 57 'src/toil/provisioners/aws/__init__.py', 58 'src/toil/batchSystems/slurm.py', 59 'src/toil/batchSystems/gridengine.py', 60 'src/toil/batchSystems/singleMachine.py', 61 'src/toil/batchSystems/abstractBatchSystem.py', 62 'src/toil/batchSystems/parasol.py', 63 'src/toil/batchSystems/kubernetes.py', 64 'src/toil/batchSystems/torque.py', 65 'src/toil/batchSystems/options.py', 66 'src/toil/batchSystems/registry.py', 67 'src/toil/batchSystems/lsf.py', 68 'src/toil/batchSystems/__init__.py', 69 'src/toil/batchSystems/abstractGridEngineBatchSystem.py', 70 'src/toil/batchSystems/lsfHelper.py', 71 'src/toil/batchSystems/htcondor.py', 72 'src/toil/batchSystems/mesos/batchSystem.py', 73 'src/toil/batchSystems/mesos/executor.py', 74 'src/toil/batchSystems/mesos/conftest.py', 75 'src/toil/batchSystems/mesos/__init__.py', 76 'src/toil/batchSystems/mesos/test/__init__.py', 77 'src/toil/cwl/conftest.py', 78 'src/toil/cwl/__init__.py', 79 'src/toil/cwl/cwltoil.py', 80 'src/toil/fileStores/cachingFileStore.py', 81 'src/toil/fileStores/abstractFileStore.py', 82 'src/toil/fileStores/nonCachingFileStore.py', 83 'src/toil/fileStores/__init__.py', 84 'src/toil/jobStores/utils.py', 85 'src/toil/jobStores/abstractJobStore.py', 86 'src/toil/jobStores/conftest.py', 87 'src/toil/jobStores/fileJobStore.py', 88 'src/toil/jobStores/__init__.py', 89 'src/toil/jobStores/googleJobStore.py', 90 'src/toil/jobStores/aws/utils.py', 91 'src/toil/jobStores/aws/jobStore.py', 92 'src/toil/jobStores/aws/__init__.py', 93 'src/toil/utils/toilDebugFile.py', 94 # 'src/toil/utils/toilStatus.py', 95 'src/toil/utils/toilStats.py', 96 'src/toil/utils/__init__.py', 97 'src/toil/utils/toilLaunchCluster.py', 98 'src/toil/lib/memoize.py', 99 'src/toil/lib/throttle.py', 100 'src/toil/lib/humanize.py', 101 'src/toil/lib/compatibility.py', 102 'src/toil/lib/iterables.py', 103 'src/toil/lib/bioio.py', 104 'src/toil/lib/ec2.py', 105 'src/toil/lib/ec2nodes.py', 106 'src/toil/lib/expando.py', 107 'src/toil/lib/threading.py', 108 'src/toil/lib/exceptions.py', 109 'src/toil/lib/__init__.py', 110 'src/toil/lib/generatedEC2Lists.py', 111 'src/toil/lib/retry.py', 112 'src/toil/lib/objects.py', 113 'src/toil/lib/io.py', 114 'src/toil/lib/docker.py', 115 # 'src/toil/lib/encryption/_nacl.py', 116 'src/toil/lib/encryption/_dummy.py', 117 'src/toil/lib/encryption/conftest.py', 118 'src/toil/lib/encryption/__init__.py', 119 'src/toil/lib/aws/utils.py', 120 'src/toil/lib/aws/__init__.py' 121 ]] 122 123 filtered_files_to_check = [] 124 for file_path in all_files_to_check: 125 if file_path not in ignore_paths and 'src/toil/test' not in file_path: 126 filtered_files_to_check.append(file_path) 127 # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default? 128 args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check 129 p = subprocess.run(args=args, stdout=subprocess.PIPE) 130 result = p.stdout.decode() 131 print(result) 132 if 'Success: no issues found' not in result: 133 exit(1) 134 135 136 if __name__ == '__main__': 137 main() 138 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py --- a/contrib/admin/mypy-with-ignore.py +++ b/contrib/admin/mypy-with-ignore.py @@ -91,14 +91,12 @@ 'src/toil/jobStores/aws/jobStore.py', 'src/toil/jobStores/aws/__init__.py', 'src/toil/utils/toilDebugFile.py', - # 'src/toil/utils/toilStatus.py', 'src/toil/utils/toilStats.py', 'src/toil/utils/__init__.py', 'src/toil/utils/toilLaunchCluster.py', 'src/toil/lib/memoize.py', 'src/toil/lib/throttle.py', 'src/toil/lib/humanize.py', - 'src/toil/lib/compatibility.py', 'src/toil/lib/iterables.py', 'src/toil/lib/bioio.py', 'src/toil/lib/ec2.py', @@ -112,7 +110,6 @@ 'src/toil/lib/objects.py', 'src/toil/lib/io.py', 'src/toil/lib/docker.py', - # 'src/toil/lib/encryption/_nacl.py', 'src/toil/lib/encryption/_dummy.py', 'src/toil/lib/encryption/conftest.py', 'src/toil/lib/encryption/__init__.py', diff --git a/src/toil/lib/compatibility.py b/src/toil/lib/compatibility.py --- a/src/toil/lib/compatibility.py +++ b/src/toil/lib/compatibility.py @@ -1,2 +1,4 @@ -def compat_bytes(s): +from typing import Union + +def compat_bytes(s: Union[bytes, str]) -> str: return s.decode('utf-8') if isinstance(s, bytes) else s
{"golden_diff": "diff --git a/contrib/admin/mypy-with-ignore.py b/contrib/admin/mypy-with-ignore.py\n--- a/contrib/admin/mypy-with-ignore.py\n+++ b/contrib/admin/mypy-with-ignore.py\n@@ -91,14 +91,12 @@\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n- # 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n- 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n@@ -112,7 +110,6 @@\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n- # 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\ndiff --git a/src/toil/lib/compatibility.py b/src/toil/lib/compatibility.py\n--- a/src/toil/lib/compatibility.py\n+++ b/src/toil/lib/compatibility.py\n@@ -1,2 +1,4 @@\n-def compat_bytes(s):\n+from typing import Union\n+\n+def compat_bytes(s: Union[bytes, str]) -> str:\n return s.decode('utf-8') if isinstance(s, bytes) else s\n", "issue": "Add type hints to compatibility.py\nAdds type hints to src/toil/lib/compatibility.py so it can be checked under mypy during linting.\n\nRefers to #3568.\n\n\u2506Issue is synchronized with this [Jira Task](https://ucsc-cgl.atlassian.net/browse/TOIL-921)\n\u2506Issue Number: TOIL-921\n\n", "before_files": [{"content": "def compat_bytes(s):\n return s.decode('utf-8') if isinstance(s, bytes) else s\n", "path": "src/toil/lib/compatibility.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\nos.environ['MYPYPATH'] = 'contrib/typeshed'\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n # 'src/toil/utils/toilStatus.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/compatibility.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n 'src/toil/lib/ec2nodes.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n # 'src/toil/lib/encryption/_nacl.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}], "after_files": [{"content": "from typing import Union\n\ndef compat_bytes(s: Union[bytes, str]) -> str:\n return s.decode('utf-8') if isinstance(s, bytes) else s\n", "path": "src/toil/lib/compatibility.py"}, {"content": "#!/usr/bin/env python3\n\"\"\"\nRuns mypy and ignores files that do not yet have passing type hints.\n\nDoes not type check test files (any path including \"src/toil/test\").\n\"\"\"\nimport os\nimport subprocess\nimport sys\n\nos.environ['MYPYPATH'] = 'contrib/typeshed'\npkg_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')) # noqa\nsys.path.insert(0, pkg_root) # noqa\n\nfrom src.toil.lib.resources import glob # type: ignore\n\n\ndef main():\n all_files_to_check = []\n for d in ['dashboard', 'docker', 'docs', 'src']:\n all_files_to_check += glob(glob_pattern='*.py', directoryname=os.path.join(pkg_root, d))\n\n # TODO: Remove these paths as typing is added and mypy conflicts are addressed\n ignore_paths = [os.path.abspath(f) for f in [\n 'docker/Dockerfile.py',\n 'docs/conf.py',\n 'docs/vendor/sphinxcontrib/fulltoc.py',\n 'docs/vendor/sphinxcontrib/__init__.py',\n 'src/toil/job.py',\n 'src/toil/leader.py',\n 'src/toil/statsAndLogging.py',\n 'src/toil/common.py',\n 'src/toil/realtimeLogger.py',\n 'src/toil/worker.py',\n 'src/toil/serviceManager.py',\n 'src/toil/toilState.py',\n 'src/toil/__init__.py',\n 'src/toil/resource.py',\n 'src/toil/deferred.py',\n 'src/toil/version.py',\n 'src/toil/wdl/utils.py',\n 'src/toil/wdl/wdl_types.py',\n 'src/toil/wdl/wdl_synthesis.py',\n 'src/toil/wdl/wdl_analysis.py',\n 'src/toil/wdl/wdl_functions.py',\n 'src/toil/wdl/toilwdl.py',\n 'src/toil/wdl/versions/draft2.py',\n 'src/toil/wdl/versions/v1.py',\n 'src/toil/wdl/versions/dev.py',\n 'src/toil/provisioners/clusterScaler.py',\n 'src/toil/provisioners/abstractProvisioner.py',\n 'src/toil/provisioners/gceProvisioner.py',\n 'src/toil/provisioners/__init__.py',\n 'src/toil/provisioners/node.py',\n 'src/toil/provisioners/aws/boto2Context.py',\n 'src/toil/provisioners/aws/awsProvisioner.py',\n 'src/toil/provisioners/aws/__init__.py',\n 'src/toil/batchSystems/slurm.py',\n 'src/toil/batchSystems/gridengine.py',\n 'src/toil/batchSystems/singleMachine.py',\n 'src/toil/batchSystems/abstractBatchSystem.py',\n 'src/toil/batchSystems/parasol.py',\n 'src/toil/batchSystems/kubernetes.py',\n 'src/toil/batchSystems/torque.py',\n 'src/toil/batchSystems/options.py',\n 'src/toil/batchSystems/registry.py',\n 'src/toil/batchSystems/lsf.py',\n 'src/toil/batchSystems/__init__.py',\n 'src/toil/batchSystems/abstractGridEngineBatchSystem.py',\n 'src/toil/batchSystems/lsfHelper.py',\n 'src/toil/batchSystems/htcondor.py',\n 'src/toil/batchSystems/mesos/batchSystem.py',\n 'src/toil/batchSystems/mesos/executor.py',\n 'src/toil/batchSystems/mesos/conftest.py',\n 'src/toil/batchSystems/mesos/__init__.py',\n 'src/toil/batchSystems/mesos/test/__init__.py',\n 'src/toil/cwl/conftest.py',\n 'src/toil/cwl/__init__.py',\n 'src/toil/cwl/cwltoil.py',\n 'src/toil/fileStores/cachingFileStore.py',\n 'src/toil/fileStores/abstractFileStore.py',\n 'src/toil/fileStores/nonCachingFileStore.py',\n 'src/toil/fileStores/__init__.py',\n 'src/toil/jobStores/utils.py',\n 'src/toil/jobStores/abstractJobStore.py',\n 'src/toil/jobStores/conftest.py',\n 'src/toil/jobStores/fileJobStore.py',\n 'src/toil/jobStores/__init__.py',\n 'src/toil/jobStores/googleJobStore.py',\n 'src/toil/jobStores/aws/utils.py',\n 'src/toil/jobStores/aws/jobStore.py',\n 'src/toil/jobStores/aws/__init__.py',\n 'src/toil/utils/toilDebugFile.py',\n 'src/toil/utils/toilStats.py',\n 'src/toil/utils/__init__.py',\n 'src/toil/utils/toilLaunchCluster.py',\n 'src/toil/lib/memoize.py',\n 'src/toil/lib/throttle.py',\n 'src/toil/lib/humanize.py',\n 'src/toil/lib/iterables.py',\n 'src/toil/lib/bioio.py',\n 'src/toil/lib/ec2.py',\n 'src/toil/lib/ec2nodes.py',\n 'src/toil/lib/expando.py',\n 'src/toil/lib/threading.py',\n 'src/toil/lib/exceptions.py',\n 'src/toil/lib/__init__.py',\n 'src/toil/lib/generatedEC2Lists.py',\n 'src/toil/lib/retry.py',\n 'src/toil/lib/objects.py',\n 'src/toil/lib/io.py',\n 'src/toil/lib/docker.py',\n 'src/toil/lib/encryption/_dummy.py',\n 'src/toil/lib/encryption/conftest.py',\n 'src/toil/lib/encryption/__init__.py',\n 'src/toil/lib/aws/utils.py',\n 'src/toil/lib/aws/__init__.py'\n ]]\n\n filtered_files_to_check = []\n for file_path in all_files_to_check:\n if file_path not in ignore_paths and 'src/toil/test' not in file_path:\n filtered_files_to_check.append(file_path)\n # follow-imports type checks pypi projects we don't control, so we skip it; why is this their default?\n args = ['mypy', '--follow-imports=skip'] + filtered_files_to_check\n p = subprocess.run(args=args, stdout=subprocess.PIPE)\n result = p.stdout.decode()\n print(result)\n if 'Success: no issues found' not in result:\n exit(1)\n\n\nif __name__ == '__main__':\n main()\n", "path": "contrib/admin/mypy-with-ignore.py"}]}
2,178
412
gh_patches_debug_19762
rasdani/github-patches
git_diff
scrapy__scrapy-4676
We are currently solving the following issue within our repository. Here is the issue text: --- BEGIN ISSUE --- startproject only allows creating the project on an existing folder if the path is specified with an invalid module name ``` (venv) [adrian@afonsox temporal]$ mkdir existing_folder (venv) [adrian@afonsox temporal]$ scrapy startproject existing_folder Error: Module 'existing_folder' already exists (venv) [adrian@afonsox temporal]$ cd existing_folder && scrapy startproject existing_folder . New Scrapy project 'existing_folder', using template directory '/home/adrian/temporal/venv/lib/python3.8/site-packages/scrapy/templates/project', created in: /home/adrian/temporal/existing_folder You can start your first spider with: cd . scrapy genspider example example.com ``` --- END ISSUE --- Below are some code segments, each from a relevant file. One or more of these files may contain bugs. --- BEGIN FILES --- Path: `scrapy/commands/startproject.py` Content: ``` 1 import re 2 import os 3 import string 4 from importlib import import_module 5 from os.path import join, exists, abspath 6 from shutil import ignore_patterns, move, copy2, copystat 7 from stat import S_IWUSR as OWNER_WRITE_PERMISSION 8 9 import scrapy 10 from scrapy.commands import ScrapyCommand 11 from scrapy.utils.template import render_templatefile, string_camelcase 12 from scrapy.exceptions import UsageError 13 14 15 TEMPLATES_TO_RENDER = ( 16 ('scrapy.cfg',), 17 ('${project_name}', 'settings.py.tmpl'), 18 ('${project_name}', 'items.py.tmpl'), 19 ('${project_name}', 'pipelines.py.tmpl'), 20 ('${project_name}', 'middlewares.py.tmpl'), 21 ) 22 23 IGNORE = ignore_patterns('*.pyc', '__pycache__', '.svn') 24 25 26 def _make_writable(path): 27 current_permissions = os.stat(path).st_mode 28 os.chmod(path, current_permissions | OWNER_WRITE_PERMISSION) 29 30 31 class Command(ScrapyCommand): 32 33 requires_project = False 34 default_settings = {'LOG_ENABLED': False, 35 'SPIDER_LOADER_WARN_ONLY': True} 36 37 def syntax(self): 38 return "<project_name> [project_dir]" 39 40 def short_desc(self): 41 return "Create new project" 42 43 def _is_valid_name(self, project_name): 44 def _module_exists(module_name): 45 try: 46 import_module(module_name) 47 return True 48 except ImportError: 49 return False 50 51 if not re.search(r'^[_a-zA-Z]\w*$', project_name): 52 print('Error: Project names must begin with a letter and contain' 53 ' only\nletters, numbers and underscores') 54 elif _module_exists(project_name): 55 print(f'Error: Module {project_name!r} already exists') 56 else: 57 return True 58 return False 59 60 def _copytree(self, src, dst): 61 """ 62 Since the original function always creates the directory, to resolve 63 the issue a new function had to be created. It's a simple copy and 64 was reduced for this case. 65 66 More info at: 67 https://github.com/scrapy/scrapy/pull/2005 68 """ 69 ignore = IGNORE 70 names = os.listdir(src) 71 ignored_names = ignore(src, names) 72 73 if not os.path.exists(dst): 74 os.makedirs(dst) 75 76 for name in names: 77 if name in ignored_names: 78 continue 79 80 srcname = os.path.join(src, name) 81 dstname = os.path.join(dst, name) 82 if os.path.isdir(srcname): 83 self._copytree(srcname, dstname) 84 else: 85 copy2(srcname, dstname) 86 _make_writable(dstname) 87 88 copystat(src, dst) 89 _make_writable(dst) 90 91 def run(self, args, opts): 92 if len(args) not in (1, 2): 93 raise UsageError() 94 95 project_name = args[0] 96 project_dir = args[0] 97 98 if len(args) == 2: 99 project_dir = args[1] 100 101 if exists(join(project_dir, 'scrapy.cfg')): 102 self.exitcode = 1 103 print(f'Error: scrapy.cfg already exists in {abspath(project_dir)}') 104 return 105 106 if not self._is_valid_name(project_name): 107 self.exitcode = 1 108 return 109 110 self._copytree(self.templates_dir, abspath(project_dir)) 111 move(join(project_dir, 'module'), join(project_dir, project_name)) 112 for paths in TEMPLATES_TO_RENDER: 113 path = join(*paths) 114 tplfile = join(project_dir, string.Template(path).substitute(project_name=project_name)) 115 render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name)) 116 print(f"New Scrapy project '{project_name}', using template directory " 117 f"'{self.templates_dir}', created in:") 118 print(f" {abspath(project_dir)}\n") 119 print("You can start your first spider with:") 120 print(f" cd {project_dir}") 121 print(" scrapy genspider example example.com") 122 123 @property 124 def templates_dir(self): 125 return join( 126 self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'), 127 'project' 128 ) 129 ``` --- END FILES --- Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks. Here is an example: ```diff diff --git a/examples/server_async.py b/examples/server_async.py --- a/examples/server_async.py +++ b/examples/server_async.py @@ -313,4 +313,4 @@ if __name__ == "__main__": - asyncio.run(run_async_server("."), debug=True) + asyncio.run(run_async_server(), debug=True) diff --git a/examples/server_sync.py b/examples/server_sync.py --- a/examples/server_sync.py +++ b/examples/server_sync.py @@ -313,5 +313,5 @@ if __name__ == "__main__": - server = run_sync_server(".") + server = run_sync_server() server.shutdown() ```
diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py --- a/scrapy/commands/startproject.py +++ b/scrapy/commands/startproject.py @@ -1,7 +1,7 @@ import re import os import string -from importlib import import_module +from importlib.util import find_spec from os.path import join, exists, abspath from shutil import ignore_patterns, move, copy2, copystat from stat import S_IWUSR as OWNER_WRITE_PERMISSION @@ -42,11 +42,8 @@ def _is_valid_name(self, project_name): def _module_exists(module_name): - try: - import_module(module_name) - return True - except ImportError: - return False + spec = find_spec(module_name) + return spec is not None and spec.loader is not None if not re.search(r'^[_a-zA-Z]\w*$', project_name): print('Error: Project names must begin with a letter and contain'
{"golden_diff": "diff --git a/scrapy/commands/startproject.py b/scrapy/commands/startproject.py\n--- a/scrapy/commands/startproject.py\n+++ b/scrapy/commands/startproject.py\n@@ -1,7 +1,7 @@\n import re\n import os\n import string\n-from importlib import import_module\n+from importlib.util import find_spec\n from os.path import join, exists, abspath\n from shutil import ignore_patterns, move, copy2, copystat\n from stat import S_IWUSR as OWNER_WRITE_PERMISSION\n@@ -42,11 +42,8 @@\n \n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n- try:\n- import_module(module_name)\n- return True\n- except ImportError:\n- return False\n+ spec = find_spec(module_name)\n+ return spec is not None and spec.loader is not None\n \n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\n", "issue": "startproject only allows creating the project on an existing folder if the path is specified with an invalid module name\n```\r\n(venv) [adrian@afonsox temporal]$ mkdir existing_folder\r\n(venv) [adrian@afonsox temporal]$ scrapy startproject existing_folder\r\nError: Module 'existing_folder' already exists\r\n(venv) [adrian@afonsox temporal]$ cd existing_folder && scrapy startproject existing_folder .\r\nNew Scrapy project 'existing_folder', using template directory '/home/adrian/temporal/venv/lib/python3.8/site-packages/scrapy/templates/project', created in:\r\n /home/adrian/temporal/existing_folder\r\n\r\nYou can start your first spider with:\r\n cd .\r\n scrapy genspider example example.com\r\n```\n", "before_files": [{"content": "import re\nimport os\nimport string\nfrom importlib import import_module\nfrom os.path import join, exists, abspath\nfrom shutil import ignore_patterns, move, copy2, copystat\nfrom stat import S_IWUSR as OWNER_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\nTEMPLATES_TO_RENDER = (\n ('scrapy.cfg',),\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n ('${project_name}', 'middlewares.py.tmpl'),\n)\n\nIGNORE = ignore_patterns('*.pyc', '__pycache__', '.svn')\n\n\ndef _make_writable(path):\n current_permissions = os.stat(path).st_mode\n os.chmod(path, current_permissions | OWNER_WRITE_PERMISSION)\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False,\n 'SPIDER_LOADER_WARN_ONLY': True}\n\n def syntax(self):\n return \"<project_name> [project_dir]\"\n\n def short_desc(self):\n return \"Create new project\"\n\n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n try:\n import_module(module_name)\n return True\n except ImportError:\n return False\n\n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\n ' only\\nletters, numbers and underscores')\n elif _module_exists(project_name):\n print(f'Error: Module {project_name!r} already exists')\n else:\n return True\n return False\n\n def _copytree(self, src, dst):\n \"\"\"\n Since the original function always creates the directory, to resolve\n the issue a new function had to be created. It's a simple copy and\n was reduced for this case.\n\n More info at:\n https://github.com/scrapy/scrapy/pull/2005\n \"\"\"\n ignore = IGNORE\n names = os.listdir(src)\n ignored_names = ignore(src, names)\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n for name in names:\n if name in ignored_names:\n continue\n\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n if os.path.isdir(srcname):\n self._copytree(srcname, dstname)\n else:\n copy2(srcname, dstname)\n _make_writable(dstname)\n\n copystat(src, dst)\n _make_writable(dst)\n\n def run(self, args, opts):\n if len(args) not in (1, 2):\n raise UsageError()\n\n project_name = args[0]\n project_dir = args[0]\n\n if len(args) == 2:\n project_dir = args[1]\n\n if exists(join(project_dir, 'scrapy.cfg')):\n self.exitcode = 1\n print(f'Error: scrapy.cfg already exists in {abspath(project_dir)}')\n return\n\n if not self._is_valid_name(project_name):\n self.exitcode = 1\n return\n\n self._copytree(self.templates_dir, abspath(project_dir))\n move(join(project_dir, 'module'), join(project_dir, project_name))\n for paths in TEMPLATES_TO_RENDER:\n path = join(*paths)\n tplfile = join(project_dir, string.Template(path).substitute(project_name=project_name))\n render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name))\n print(f\"New Scrapy project '{project_name}', using template directory \"\n f\"'{self.templates_dir}', created in:\")\n print(f\" {abspath(project_dir)}\\n\")\n print(\"You can start your first spider with:\")\n print(f\" cd {project_dir}\")\n print(\" scrapy genspider example example.com\")\n\n @property\n def templates_dir(self):\n return join(\n self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'),\n 'project'\n )\n", "path": "scrapy/commands/startproject.py"}], "after_files": [{"content": "import re\nimport os\nimport string\nfrom importlib.util import find_spec\nfrom os.path import join, exists, abspath\nfrom shutil import ignore_patterns, move, copy2, copystat\nfrom stat import S_IWUSR as OWNER_WRITE_PERMISSION\n\nimport scrapy\nfrom scrapy.commands import ScrapyCommand\nfrom scrapy.utils.template import render_templatefile, string_camelcase\nfrom scrapy.exceptions import UsageError\n\n\nTEMPLATES_TO_RENDER = (\n ('scrapy.cfg',),\n ('${project_name}', 'settings.py.tmpl'),\n ('${project_name}', 'items.py.tmpl'),\n ('${project_name}', 'pipelines.py.tmpl'),\n ('${project_name}', 'middlewares.py.tmpl'),\n)\n\nIGNORE = ignore_patterns('*.pyc', '__pycache__', '.svn')\n\n\ndef _make_writable(path):\n current_permissions = os.stat(path).st_mode\n os.chmod(path, current_permissions | OWNER_WRITE_PERMISSION)\n\n\nclass Command(ScrapyCommand):\n\n requires_project = False\n default_settings = {'LOG_ENABLED': False,\n 'SPIDER_LOADER_WARN_ONLY': True}\n\n def syntax(self):\n return \"<project_name> [project_dir]\"\n\n def short_desc(self):\n return \"Create new project\"\n\n def _is_valid_name(self, project_name):\n def _module_exists(module_name):\n spec = find_spec(module_name)\n return spec is not None and spec.loader is not None\n\n if not re.search(r'^[_a-zA-Z]\\w*$', project_name):\n print('Error: Project names must begin with a letter and contain'\n ' only\\nletters, numbers and underscores')\n elif _module_exists(project_name):\n print('Error: Module %r already exists' % project_name)\n else:\n return True\n return False\n\n def _copytree(self, src, dst):\n \"\"\"\n Since the original function always creates the directory, to resolve\n the issue a new function had to be created. It's a simple copy and\n was reduced for this case.\n\n More info at:\n https://github.com/scrapy/scrapy/pull/2005\n \"\"\"\n ignore = IGNORE\n names = os.listdir(src)\n ignored_names = ignore(src, names)\n\n if not os.path.exists(dst):\n os.makedirs(dst)\n\n for name in names:\n if name in ignored_names:\n continue\n\n srcname = os.path.join(src, name)\n dstname = os.path.join(dst, name)\n if os.path.isdir(srcname):\n self._copytree(srcname, dstname)\n else:\n copy2(srcname, dstname)\n _make_writable(dstname)\n\n copystat(src, dst)\n _make_writable(dst)\n\n def run(self, args, opts):\n if len(args) not in (1, 2):\n raise UsageError()\n\n project_name = args[0]\n project_dir = args[0]\n\n if len(args) == 2:\n project_dir = args[1]\n\n if exists(join(project_dir, 'scrapy.cfg')):\n self.exitcode = 1\n print('Error: scrapy.cfg already exists in %s' % abspath(project_dir))\n return\n\n if not self._is_valid_name(project_name):\n self.exitcode = 1\n return\n\n self._copytree(self.templates_dir, abspath(project_dir))\n move(join(project_dir, 'module'), join(project_dir, project_name))\n for paths in TEMPLATES_TO_RENDER:\n path = join(*paths)\n tplfile = join(project_dir, string.Template(path).substitute(project_name=project_name))\n render_templatefile(tplfile, project_name=project_name, ProjectName=string_camelcase(project_name))\n print(\"New Scrapy project '%s', using template directory '%s', \"\n \"created in:\" % (project_name, self.templates_dir))\n print(\" %s\\n\" % abspath(project_dir))\n print(\"You can start your first spider with:\")\n print(\" cd %s\" % project_dir)\n print(\" scrapy genspider example example.com\")\n\n @property\n def templates_dir(self):\n return join(\n self.settings['TEMPLATES_DIR'] or join(scrapy.__path__[0], 'templates'),\n 'project'\n )\n", "path": "scrapy/commands/startproject.py"}]}
1,632
227